2012-11-30 10:14:21 +08:00
|
|
|
|
/*
|
|
|
|
|
* This is the Fusion MPT base driver providing common API layer interface
|
|
|
|
|
* for access to MPT (Message Passing Technology) firmware.
|
|
|
|
|
*
|
|
|
|
|
* This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
|
2014-09-12 18:05:29 +08:00
|
|
|
|
* Copyright (C) 2012-2014 LSI Corporation
|
2015-01-12 14:09:02 +08:00
|
|
|
|
* Copyright (C) 2013-2014 Avago Technologies
|
|
|
|
|
* (mailto: MPT-FusionLinux.pdl@avagotech.com)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*
|
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
|
* as published by the Free Software Foundation; either version 2
|
|
|
|
|
* of the License, or (at your option) any later version.
|
|
|
|
|
*
|
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
|
*
|
|
|
|
|
* NO WARRANTY
|
|
|
|
|
* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
|
|
|
|
|
* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
|
|
|
|
|
* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
|
|
|
|
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
|
|
|
|
|
* solely responsible for determining the appropriateness of using and
|
|
|
|
|
* distributing the Program and assumes all risks associated with its
|
|
|
|
|
* exercise of rights under this Agreement, including but not limited to
|
|
|
|
|
* the risks and costs of program errors, damage to or loss of data,
|
|
|
|
|
* programs or equipment, and unavailability or interruption of operations.
|
|
|
|
|
|
|
|
|
|
* DISCLAIMER OF LIABILITY
|
|
|
|
|
* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
|
|
|
|
|
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
|
* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
|
|
|
|
|
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
|
|
|
|
|
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
|
|
|
|
* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
|
|
|
|
|
* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
|
|
|
|
|
|
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
|
|
|
|
|
* USA.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
|
#include <linux/errno.h>
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
|
#include <linux/slab.h>
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
|
#include <linux/pci.h>
|
|
|
|
|
#include <linux/kdev_t.h>
|
|
|
|
|
#include <linux/blkdev.h>
|
|
|
|
|
#include <linux/delay.h>
|
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
|
#include <linux/io.h>
|
|
|
|
|
#include <linux/time.h>
|
2016-04-13 15:01:40 +08:00
|
|
|
|
#include <linux/ktime.h>
|
2012-11-30 10:14:21 +08:00
|
|
|
|
#include <linux/kthread.h>
|
2017-10-31 20:32:28 +08:00
|
|
|
|
#include <asm/page.h> /* To get host page size per arch */
|
2012-11-30 10:14:21 +08:00
|
|
|
|
#include <linux/aer.h>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#include "mpt3sas_base.h"
|
|
|
|
|
|
|
|
|
|
static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
|
|
|
|
|
|
|
|
|
|
/* maximum controller queue depth */
|
|
|
|
|
#define MAX_HBA_QUEUE_DEPTH 30000
|
|
|
|
|
#define MAX_CHAIN_DEPTH 100000
|
|
|
|
|
static int max_queue_depth = -1;
|
2019-06-14 22:41:43 +08:00
|
|
|
|
module_param(max_queue_depth, int, 0444);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
|
|
|
|
|
|
|
|
|
|
static int max_sgl_entries = -1;
|
2019-06-14 22:41:43 +08:00
|
|
|
|
module_param(max_sgl_entries, int, 0444);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
|
|
|
|
|
|
|
|
|
|
static int msix_disable = -1;
|
2019-06-14 22:41:43 +08:00
|
|
|
|
module_param(msix_disable, int, 0444);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
|
|
|
|
|
|
2016-02-09 00:43:39 +08:00
|
|
|
|
static int smp_affinity_enable = 1;
|
2019-06-14 22:41:43 +08:00
|
|
|
|
module_param(smp_affinity_enable, int, 0444);
|
2018-04-29 20:31:49 +08:00
|
|
|
|
MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
|
2016-02-09 00:43:39 +08:00
|
|
|
|
|
2015-06-30 14:54:47 +08:00
|
|
|
|
static int max_msix_vectors = -1;
|
2019-06-14 22:41:43 +08:00
|
|
|
|
module_param(max_msix_vectors, int, 0444);
|
2013-08-14 20:53:20 +08:00
|
|
|
|
MODULE_PARM_DESC(max_msix_vectors,
|
2015-06-30 14:54:47 +08:00
|
|
|
|
" max msix vectors");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2019-02-15 15:40:29 +08:00
|
|
|
|
static int irqpoll_weight = -1;
|
2019-06-14 22:41:43 +08:00
|
|
|
|
module_param(irqpoll_weight, int, 0444);
|
2019-02-15 15:40:29 +08:00
|
|
|
|
MODULE_PARM_DESC(irqpoll_weight,
|
|
|
|
|
"irq poll weight (default= one fourth of HBA queue depth)");
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
static int mpt3sas_fwfault_debug;
|
|
|
|
|
MODULE_PARM_DESC(mpt3sas_fwfault_debug,
|
|
|
|
|
" enable detection of firmware fault and halt firmware - (default=0)");
|
|
|
|
|
|
2019-05-31 20:14:42 +08:00
|
|
|
|
static int perf_mode = -1;
|
2019-06-14 22:41:43 +08:00
|
|
|
|
module_param(perf_mode, int, 0444);
|
2019-05-31 20:14:42 +08:00
|
|
|
|
MODULE_PARM_DESC(perf_mode,
|
|
|
|
|
"Performance mode (only for Aero/Sea Generation), options:\n\t\t"
|
|
|
|
|
"0 - balanced: high iops mode is enabled &\n\t\t"
|
|
|
|
|
"interrupt coalescing is enabled only on high iops queues,\n\t\t"
|
|
|
|
|
"1 - iops: high iops mode is disabled &\n\t\t"
|
|
|
|
|
"interrupt coalescing is enabled on all queues,\n\t\t"
|
|
|
|
|
"2 - latency: high iops mode is disabled &\n\t\t"
|
|
|
|
|
"interrupt coalescing is enabled on all queues with timeout value 0xA,\n"
|
2019-06-24 22:42:53 +08:00
|
|
|
|
"\t\tdefault - default perf_mode is 'balanced'"
|
2019-05-31 20:14:42 +08:00
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
enum mpt3sas_perf_mode {
|
|
|
|
|
MPT_PERF_MODE_DEFAULT = -1,
|
|
|
|
|
MPT_PERF_MODE_BALANCED = 0,
|
|
|
|
|
MPT_PERF_MODE_IOPS = 1,
|
|
|
|
|
MPT_PERF_MODE_LATENCY = 2,
|
|
|
|
|
};
|
|
|
|
|
|
2019-12-26 19:13:27 +08:00
|
|
|
|
static int
|
|
|
|
|
_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc,
|
|
|
|
|
u32 ioc_state, int timeout);
|
2014-09-12 18:05:31 +08:00
|
|
|
|
static int
|
2016-07-29 12:38:21 +08:00
|
|
|
|
_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
|
2019-12-26 19:13:28 +08:00
|
|
|
|
static void
|
|
|
|
|
_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc);
|
|
|
|
|
static void
|
|
|
|
|
_base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2018-05-31 18:34:50 +08:00
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_check_cmd_timeout - Function
|
|
|
|
|
* to check timeout and command termination due
|
|
|
|
|
* to Host reset.
|
|
|
|
|
*
|
|
|
|
|
* @ioc: per adapter object.
|
|
|
|
|
* @status: Status of issued command.
|
|
|
|
|
* @mpi_request:mf request pointer.
|
|
|
|
|
* @sz: size of buffer.
|
|
|
|
|
*
|
|
|
|
|
* @Returns - 1/0 Reset to be done or Not
|
|
|
|
|
*/
|
|
|
|
|
u8
|
|
|
|
|
mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
|
|
|
|
|
u8 status, void *mpi_request, int sz)
|
|
|
|
|
{
|
|
|
|
|
u8 issue_reset = 0;
|
|
|
|
|
|
|
|
|
|
if (!(status & MPT3_CMD_RESET))
|
|
|
|
|
issue_reset = 1;
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "Command %s\n",
|
|
|
|
|
issue_reset == 0 ? "terminated due to Host Reset" : "Timeout");
|
2018-05-31 18:34:50 +08:00
|
|
|
|
_debug_dump_mf(mpi_request, sz);
|
|
|
|
|
|
|
|
|
|
return issue_reset;
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/**
|
|
|
|
|
* _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* @val: ?
|
|
|
|
|
* @kp: ?
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: ?
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
static int
|
treewide: Fix function prototypes for module_param_call()
Several function prototypes for the set/get functions defined by
module_param_call() have a slightly wrong argument types. This fixes
those in an effort to clean up the calls when running under type-enforced
compiler instrumentation for CFI. This is the result of running the
following semantic patch:
@match_module_param_call_function@
declarer name module_param_call;
identifier _name, _set_func, _get_func;
expression _arg, _mode;
@@
module_param_call(_name, _set_func, _get_func, _arg, _mode);
@fix_set_prototype
depends on match_module_param_call_function@
identifier match_module_param_call_function._set_func;
identifier _val, _param;
type _val_type, _param_type;
@@
int _set_func(
-_val_type _val
+const char * _val
,
-_param_type _param
+const struct kernel_param * _param
) { ... }
@fix_get_prototype
depends on match_module_param_call_function@
identifier match_module_param_call_function._get_func;
identifier _val, _param;
type _val_type, _param_type;
@@
int _get_func(
-_val_type _val
+char * _val
,
-_param_type _param
+const struct kernel_param * _param
) { ... }
Two additional by-hand changes are included for places where the above
Coccinelle script didn't notice them:
drivers/platform/x86/thinkpad_acpi.c
fs/lockd/svc.c
Signed-off-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Jessica Yu <jeyu@kernel.org>
2017-10-18 10:04:42 +08:00
|
|
|
|
_scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
{
|
|
|
|
|
int ret = param_set_int(val, kp);
|
|
|
|
|
struct MPT3SAS_ADAPTER *ioc;
|
|
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
|
2015-11-11 20:00:33 +08:00
|
|
|
|
/* global ioc spinlock to protect controller list on list operations */
|
2012-11-30 10:14:21 +08:00
|
|
|
|
pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
|
2015-11-11 20:00:33 +08:00
|
|
|
|
spin_lock(&gioc_lock);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
|
|
|
|
|
ioc->fwfault_debug = mpt3sas_fwfault_debug;
|
2015-11-11 20:00:33 +08:00
|
|
|
|
spin_unlock(&gioc_lock);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
|
|
|
|
|
param_get_int, &mpt3sas_fwfault_debug, 0644);
|
|
|
|
|
|
2018-12-07 15:28:33 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_readl_aero - retry readl for max three times.
|
|
|
|
|
* @addr - MPT Fusion system interface register address
|
|
|
|
|
*
|
|
|
|
|
* Retry the readl() for max three times if it gets zero value
|
|
|
|
|
* while reading the system interface register.
|
|
|
|
|
*/
|
|
|
|
|
static inline u32
|
|
|
|
|
_base_readl_aero(const volatile void __iomem *addr)
|
|
|
|
|
{
|
|
|
|
|
u32 i = 0, ret_val;
|
|
|
|
|
|
|
|
|
|
do {
|
|
|
|
|
ret_val = readl(addr);
|
|
|
|
|
i++;
|
|
|
|
|
} while (ret_val == 0 && i < 3);
|
|
|
|
|
|
|
|
|
|
return ret_val;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline u32
|
|
|
|
|
_base_readl(const volatile void __iomem *addr)
|
|
|
|
|
{
|
|
|
|
|
return readl(addr);
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-07 18:51:50 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_clone_reply_to_sys_mem - copies reply to reply free iomem
|
|
|
|
|
* in BAR0 space.
|
|
|
|
|
*
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @reply: reply message frame(lower 32bit addr)
|
|
|
|
|
* @index: System request message index.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
|
|
|
|
|
u32 index)
|
|
|
|
|
{
|
|
|
|
|
/*
|
|
|
|
|
* 256 is offset within sys register.
|
|
|
|
|
* 256 offset MPI frame starts. Max MPI frame supported is 32.
|
|
|
|
|
* 32 * 128 = 4K. From here, Clone of reply free for mcpu starts
|
|
|
|
|
*/
|
|
|
|
|
u16 cmd_credit = ioc->facts.RequestCredit + 1;
|
|
|
|
|
void __iomem *reply_free_iomem = (void __iomem *)ioc->chip +
|
|
|
|
|
MPI_FRAME_START_OFFSET +
|
|
|
|
|
(cmd_credit * ioc->request_sz) + (index * sizeof(u32));
|
|
|
|
|
|
|
|
|
|
writel(reply, reply_free_iomem);
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-07 18:51:49 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_clone_mpi_to_sys_mem - Writes/copies MPI frames
|
|
|
|
|
* to system/BAR0 region.
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* @dst_iomem: Pointer to the destination location in BAR0 space.
|
2018-02-07 18:51:49 +08:00
|
|
|
|
* @src: Pointer to the Source data.
|
|
|
|
|
* @size: Size of data to be copied.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
u32 *src_virt_mem = (u32 *)src;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < size/4; i++)
|
|
|
|
|
writel((u32)src_virt_mem[i],
|
|
|
|
|
(void __iomem *)dst_iomem + (i * 4));
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-07 18:51:48 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_clone_to_sys_mem - Writes/copies data to system/BAR0 region
|
|
|
|
|
*
|
|
|
|
|
* @dst_iomem: Pointer to the destination location in BAR0 space.
|
|
|
|
|
* @src: Pointer to the Source data.
|
|
|
|
|
* @size: Size of data to be copied.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
u32 *src_virt_mem = (u32 *)(src);
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < size/4; i++)
|
|
|
|
|
writel((u32)src_virt_mem[i],
|
|
|
|
|
(void __iomem *)dst_iomem + (i * 4));
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-07 18:51:47 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_get_chain - Calculates and Returns virtual chain address
|
|
|
|
|
* for the provided smid in BAR0 space.
|
|
|
|
|
*
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @smid: system request message index
|
|
|
|
|
* @sge_chain_count: Scatter gather chain count.
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: the chain address.
|
2018-02-07 18:51:47 +08:00
|
|
|
|
*/
|
|
|
|
|
static inline void __iomem*
|
|
|
|
|
_base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
|
|
|
|
|
u8 sge_chain_count)
|
|
|
|
|
{
|
|
|
|
|
void __iomem *base_chain, *chain_virt;
|
|
|
|
|
u16 cmd_credit = ioc->facts.RequestCredit + 1;
|
|
|
|
|
|
|
|
|
|
base_chain = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET +
|
|
|
|
|
(cmd_credit * ioc->request_sz) +
|
|
|
|
|
REPLY_FREE_POOL_SIZE;
|
|
|
|
|
chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth *
|
|
|
|
|
ioc->request_sz) + (sge_chain_count * ioc->request_sz);
|
|
|
|
|
return chain_virt;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_get_chain_phys - Calculates and Returns physical address
|
|
|
|
|
* in BAR0 for scatter gather chains, for
|
|
|
|
|
* the provided smid.
|
|
|
|
|
*
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @smid: system request message index
|
|
|
|
|
* @sge_chain_count: Scatter gather chain count.
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: Physical chain address.
|
2018-02-07 18:51:47 +08:00
|
|
|
|
*/
|
2018-03-01 21:07:07 +08:00
|
|
|
|
static inline phys_addr_t
|
2018-02-07 18:51:47 +08:00
|
|
|
|
_base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
|
|
|
|
|
u8 sge_chain_count)
|
|
|
|
|
{
|
2018-03-01 21:07:07 +08:00
|
|
|
|
phys_addr_t base_chain_phys, chain_phys;
|
2018-02-07 18:51:47 +08:00
|
|
|
|
u16 cmd_credit = ioc->facts.RequestCredit + 1;
|
|
|
|
|
|
2018-03-01 21:07:07 +08:00
|
|
|
|
base_chain_phys = ioc->chip_phys + MPI_FRAME_START_OFFSET +
|
2018-02-07 18:51:47 +08:00
|
|
|
|
(cmd_credit * ioc->request_sz) +
|
|
|
|
|
REPLY_FREE_POOL_SIZE;
|
|
|
|
|
chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth *
|
|
|
|
|
ioc->request_sz) + (sge_chain_count * ioc->request_sz);
|
|
|
|
|
return chain_phys;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host
|
|
|
|
|
* buffer address for the provided smid.
|
|
|
|
|
* (Each smid can have 64K starts from 17024)
|
|
|
|
|
*
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @smid: system request message index
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: Pointer to buffer location in BAR0.
|
2018-02-07 18:51:47 +08:00
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static void __iomem *
|
|
|
|
|
_base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
|
|
|
|
|
{
|
|
|
|
|
u16 cmd_credit = ioc->facts.RequestCredit + 1;
|
|
|
|
|
// Added extra 1 to reach end of chain.
|
|
|
|
|
void __iomem *chain_end = _base_get_chain(ioc,
|
|
|
|
|
cmd_credit + 1,
|
|
|
|
|
ioc->facts.MaxChainDepth);
|
|
|
|
|
return chain_end + (smid * 64 * 1024);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_get_buffer_phys_bar0 - Calculates and Returns BAR0 mapped
|
|
|
|
|
* Host buffer Physical address for the provided smid.
|
|
|
|
|
* (Each smid can have 64K starts from 17024)
|
|
|
|
|
*
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @smid: system request message index
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: Pointer to buffer location in BAR0.
|
2018-02-07 18:51:47 +08:00
|
|
|
|
*/
|
2018-03-01 21:07:07 +08:00
|
|
|
|
static phys_addr_t
|
2018-02-07 18:51:47 +08:00
|
|
|
|
_base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
|
|
|
|
|
{
|
|
|
|
|
u16 cmd_credit = ioc->facts.RequestCredit + 1;
|
2018-03-01 21:07:07 +08:00
|
|
|
|
phys_addr_t chain_end_phys = _base_get_chain_phys(ioc,
|
2018-02-07 18:51:47 +08:00
|
|
|
|
cmd_credit + 1,
|
|
|
|
|
ioc->facts.MaxChainDepth);
|
|
|
|
|
return chain_end_phys + (smid * 64 * 1024);
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-07 18:51:48 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_get_chain_buffer_dma_to_chain_buffer - Iterates chain
|
|
|
|
|
* lookup list and Provides chain_buffer
|
|
|
|
|
* address for the matching dma address.
|
|
|
|
|
* (Each smid can have 64K starts from 17024)
|
|
|
|
|
*
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @chain_buffer_dma: Chain buffer dma address.
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: Pointer to chain buffer. Or Null on Failure.
|
2018-02-07 18:51:48 +08:00
|
|
|
|
*/
|
|
|
|
|
static void *
|
|
|
|
|
_base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
|
|
|
|
|
dma_addr_t chain_buffer_dma)
|
|
|
|
|
{
|
2018-04-24 17:28:32 +08:00
|
|
|
|
u16 index, j;
|
|
|
|
|
struct chain_tracker *ct;
|
|
|
|
|
|
|
|
|
|
for (index = 0; index < ioc->scsiio_depth; index++) {
|
|
|
|
|
for (j = 0; j < ioc->chains_needed_per_io; j++) {
|
|
|
|
|
ct = &ioc->chain_lookup[index].chains_per_smid[j];
|
|
|
|
|
if (ct && ct->chain_buffer_dma == chain_buffer_dma)
|
|
|
|
|
return ct->chain_buffer;
|
|
|
|
|
}
|
2018-02-07 18:51:48 +08:00
|
|
|
|
}
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n");
|
2018-02-07 18:51:48 +08:00
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _clone_sg_entries - MPI EP's scsiio and config requests
|
|
|
|
|
* are handled here. Base function for
|
|
|
|
|
* double buffering, before submitting
|
|
|
|
|
* the requests.
|
|
|
|
|
*
|
|
|
|
|
* @ioc: per adapter object.
|
|
|
|
|
* @mpi_request: mf request pointer.
|
|
|
|
|
* @smid: system request message index.
|
|
|
|
|
*/
|
|
|
|
|
static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
|
|
|
|
|
void *mpi_request, u16 smid)
|
|
|
|
|
{
|
|
|
|
|
Mpi2SGESimple32_t *sgel, *sgel_next;
|
|
|
|
|
u32 sgl_flags, sge_chain_count = 0;
|
|
|
|
|
bool is_write = 0;
|
|
|
|
|
u16 i = 0;
|
|
|
|
|
void __iomem *buffer_iomem;
|
2018-03-01 21:07:07 +08:00
|
|
|
|
phys_addr_t buffer_iomem_phys;
|
2018-02-07 18:51:48 +08:00
|
|
|
|
void __iomem *buff_ptr;
|
2018-03-01 21:07:07 +08:00
|
|
|
|
phys_addr_t buff_ptr_phys;
|
2018-02-07 18:51:48 +08:00
|
|
|
|
void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO];
|
2018-03-01 21:07:07 +08:00
|
|
|
|
void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO];
|
|
|
|
|
phys_addr_t dst_addr_phys;
|
2018-02-07 18:51:48 +08:00
|
|
|
|
MPI2RequestHeader_t *request_hdr;
|
|
|
|
|
struct scsi_cmnd *scmd;
|
|
|
|
|
struct scatterlist *sg_scmd = NULL;
|
|
|
|
|
int is_scsiio_req = 0;
|
|
|
|
|
|
|
|
|
|
request_hdr = (MPI2RequestHeader_t *) mpi_request;
|
|
|
|
|
|
|
|
|
|
if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
|
|
|
|
|
Mpi25SCSIIORequest_t *scsiio_request =
|
|
|
|
|
(Mpi25SCSIIORequest_t *)mpi_request;
|
|
|
|
|
sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL;
|
|
|
|
|
is_scsiio_req = 1;
|
|
|
|
|
} else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
|
|
|
|
|
Mpi2ConfigRequest_t *config_req =
|
|
|
|
|
(Mpi2ConfigRequest_t *)mpi_request;
|
|
|
|
|
sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE;
|
|
|
|
|
} else
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
/* From smid we can get scsi_cmd, once we have sg_scmd,
|
|
|
|
|
* we just need to get sg_virt and sg_next to get virual
|
|
|
|
|
* address associated with sgel->Address.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
if (is_scsiio_req) {
|
|
|
|
|
/* Get scsi_cmd using smid */
|
|
|
|
|
scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
|
|
|
|
|
if (scmd == NULL) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "scmd is NULL\n");
|
2018-02-07 18:51:48 +08:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Get sg_scmd from scmd provided */
|
|
|
|
|
sg_scmd = scsi_sglist(scmd);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* 0 - 255 System register
|
|
|
|
|
* 256 - 4352 MPI Frame. (This is based on maxCredit 32)
|
|
|
|
|
* 4352 - 4864 Reply_free pool (512 byte is reserved
|
|
|
|
|
* considering maxCredit 32. Reply need extra
|
|
|
|
|
* room, for mCPU case kept four times of
|
|
|
|
|
* maxCredit).
|
|
|
|
|
* 4864 - 17152 SGE chain element. (32cmd * 3 chain of
|
|
|
|
|
* 128 byte size = 12288)
|
|
|
|
|
* 17152 - x Host buffer mapped with smid.
|
|
|
|
|
* (Each smid can have 64K Max IO.)
|
|
|
|
|
* BAR0+Last 1K MSIX Addr and Data
|
|
|
|
|
* Total size in use 2113664 bytes of 4MB BAR0
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
buffer_iomem = _base_get_buffer_bar0(ioc, smid);
|
|
|
|
|
buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid);
|
|
|
|
|
|
|
|
|
|
buff_ptr = buffer_iomem;
|
|
|
|
|
buff_ptr_phys = buffer_iomem_phys;
|
2018-03-01 21:07:07 +08:00
|
|
|
|
WARN_ON(buff_ptr_phys > U32_MAX);
|
2018-02-07 18:51:48 +08:00
|
|
|
|
|
2018-04-24 17:28:30 +08:00
|
|
|
|
if (le32_to_cpu(sgel->FlagsLength) &
|
2018-02-07 18:51:48 +08:00
|
|
|
|
(MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
|
|
|
|
|
is_write = 1;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
|
|
|
|
|
|
2018-04-24 17:28:30 +08:00
|
|
|
|
sgl_flags =
|
|
|
|
|
(le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT);
|
2018-02-07 18:51:48 +08:00
|
|
|
|
|
|
|
|
|
switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) {
|
|
|
|
|
case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
|
|
|
|
|
/*
|
|
|
|
|
* Helper function which on passing
|
|
|
|
|
* chain_buffer_dma returns chain_buffer. Get
|
|
|
|
|
* the virtual address for sgel->Address
|
|
|
|
|
*/
|
|
|
|
|
sgel_next =
|
|
|
|
|
_base_get_chain_buffer_dma_to_chain_buffer(ioc,
|
2018-04-24 17:28:30 +08:00
|
|
|
|
le32_to_cpu(sgel->Address));
|
2018-02-07 18:51:48 +08:00
|
|
|
|
if (sgel_next == NULL)
|
|
|
|
|
return;
|
|
|
|
|
/*
|
|
|
|
|
* This is coping 128 byte chain
|
|
|
|
|
* frame (not a host buffer)
|
|
|
|
|
*/
|
|
|
|
|
dst_chain_addr[sge_chain_count] =
|
|
|
|
|
_base_get_chain(ioc,
|
|
|
|
|
smid, sge_chain_count);
|
|
|
|
|
src_chain_addr[sge_chain_count] =
|
|
|
|
|
(void *) sgel_next;
|
2018-03-01 21:07:07 +08:00
|
|
|
|
dst_addr_phys = _base_get_chain_phys(ioc,
|
2018-02-07 18:51:48 +08:00
|
|
|
|
smid, sge_chain_count);
|
2018-03-01 21:07:07 +08:00
|
|
|
|
WARN_ON(dst_addr_phys > U32_MAX);
|
2018-04-24 17:28:30 +08:00
|
|
|
|
sgel->Address =
|
|
|
|
|
cpu_to_le32(lower_32_bits(dst_addr_phys));
|
2018-02-07 18:51:48 +08:00
|
|
|
|
sgel = sgel_next;
|
|
|
|
|
sge_chain_count++;
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
|
|
|
|
|
if (is_write) {
|
|
|
|
|
if (is_scsiio_req) {
|
|
|
|
|
_base_clone_to_sys_mem(buff_ptr,
|
|
|
|
|
sg_virt(sg_scmd),
|
2018-04-24 17:28:30 +08:00
|
|
|
|
(le32_to_cpu(sgel->FlagsLength) &
|
|
|
|
|
0x00ffffff));
|
2018-03-01 21:07:07 +08:00
|
|
|
|
/*
|
|
|
|
|
* FIXME: this relies on a a zero
|
|
|
|
|
* PCI mem_offset.
|
|
|
|
|
*/
|
2018-04-24 17:28:30 +08:00
|
|
|
|
sgel->Address =
|
|
|
|
|
cpu_to_le32((u32)buff_ptr_phys);
|
2018-02-07 18:51:48 +08:00
|
|
|
|
} else {
|
|
|
|
|
_base_clone_to_sys_mem(buff_ptr,
|
|
|
|
|
ioc->config_vaddr,
|
2018-04-24 17:28:30 +08:00
|
|
|
|
(le32_to_cpu(sgel->FlagsLength) &
|
|
|
|
|
0x00ffffff));
|
|
|
|
|
sgel->Address =
|
|
|
|
|
cpu_to_le32((u32)buff_ptr_phys);
|
2018-02-07 18:51:48 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
2018-04-24 17:28:30 +08:00
|
|
|
|
buff_ptr += (le32_to_cpu(sgel->FlagsLength) &
|
|
|
|
|
0x00ffffff);
|
|
|
|
|
buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) &
|
|
|
|
|
0x00ffffff);
|
|
|
|
|
if ((le32_to_cpu(sgel->FlagsLength) &
|
2018-02-07 18:51:48 +08:00
|
|
|
|
(MPI2_SGE_FLAGS_END_OF_BUFFER
|
|
|
|
|
<< MPI2_SGE_FLAGS_SHIFT)))
|
|
|
|
|
goto eob_clone_chain;
|
|
|
|
|
else {
|
|
|
|
|
/*
|
|
|
|
|
* Every single element in MPT will have
|
|
|
|
|
* associated sg_next. Better to sanity that
|
|
|
|
|
* sg_next is not NULL, but it will be a bug
|
|
|
|
|
* if it is null.
|
|
|
|
|
*/
|
|
|
|
|
if (is_scsiio_req) {
|
|
|
|
|
sg_scmd = sg_next(sg_scmd);
|
|
|
|
|
if (sg_scmd)
|
|
|
|
|
sgel++;
|
|
|
|
|
else
|
|
|
|
|
goto eob_clone_chain;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
eob_clone_chain:
|
|
|
|
|
for (i = 0; i < sge_chain_count; i++) {
|
|
|
|
|
if (is_scsiio_req)
|
|
|
|
|
_base_clone_to_sys_mem(dst_chain_addr[i],
|
|
|
|
|
src_chain_addr[i], ioc->request_sz);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
|
|
|
|
|
* @arg: input argument, used to derive ioc
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return:
|
|
|
|
|
* 0 if controller is removed from pci subsystem.
|
|
|
|
|
* -1 for other case.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
static int mpt3sas_remove_dead_ioc_func(void *arg)
|
|
|
|
|
{
|
|
|
|
|
struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
|
|
|
|
|
struct pci_dev *pdev;
|
|
|
|
|
|
2018-09-21 04:10:02 +08:00
|
|
|
|
if (!ioc)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
|
|
pdev = ioc->pdev;
|
2018-09-21 04:10:02 +08:00
|
|
|
|
if (!pdev)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -1;
|
2014-01-10 22:27:56 +08:00
|
|
|
|
pci_stop_and_remove_bus_device_locked(pdev);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_fault_reset_work - workq handling ioc fault conditions
|
|
|
|
|
* @work: input argument, used to derive ioc
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Context: sleep.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_fault_reset_work(struct work_struct *work)
|
|
|
|
|
{
|
|
|
|
|
struct MPT3SAS_ADAPTER *ioc =
|
|
|
|
|
container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
u32 doorbell;
|
|
|
|
|
int rc;
|
|
|
|
|
struct task_struct *p;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
|
2019-12-26 19:13:28 +08:00
|
|
|
|
if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) ||
|
|
|
|
|
ioc->pci_error_recovery)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
goto rearm_timer;
|
|
|
|
|
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
|
|
|
|
|
|
|
|
|
|
doorbell = mpt3sas_base_get_iocstate(ioc, 0);
|
|
|
|
|
if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "SAS host is non-operational !!!!\n");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2015-11-11 20:00:27 +08:00
|
|
|
|
/* It may be possible that EEH recovery can resolve some of
|
|
|
|
|
* pci bus failure issues rather removing the dead ioc function
|
|
|
|
|
* by considering controller is in a non-operational state. So
|
|
|
|
|
* here priority is given to the EEH recovery. If it doesn't
|
|
|
|
|
* not resolve this issue, mpt3sas driver will consider this
|
|
|
|
|
* controller to non-operational state and remove the dead ioc
|
|
|
|
|
* function.
|
|
|
|
|
*/
|
|
|
|
|
if (ioc->non_operational_loop++ < 5) {
|
|
|
|
|
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
|
|
|
|
|
flags);
|
|
|
|
|
goto rearm_timer;
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/*
|
|
|
|
|
* Call _scsih_flush_pending_cmds callback so that we flush all
|
|
|
|
|
* pending commands back to OS. This call is required to aovid
|
|
|
|
|
* deadlock at block layer. Dead IOC will fail to do diag reset,
|
|
|
|
|
* and this call is safe since dead ioc will never return any
|
|
|
|
|
* command back from HW.
|
|
|
|
|
*/
|
|
|
|
|
ioc->schedule_dead_ioc_flush_running_cmds(ioc);
|
|
|
|
|
/*
|
|
|
|
|
* Set remove_host flag early since kernel thread will
|
|
|
|
|
* take some time to execute.
|
|
|
|
|
*/
|
|
|
|
|
ioc->remove_host = 1;
|
|
|
|
|
/*Remove the Dead Host */
|
|
|
|
|
p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
|
2015-11-11 20:00:35 +08:00
|
|
|
|
"%s_dead_ioc_%d", ioc->driver_name, ioc->id);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (IS_ERR(p))
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
|
|
|
|
|
__func__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
else
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
|
|
|
|
|
__func__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return; /* don't rearm timer */
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-26 19:13:28 +08:00
|
|
|
|
if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
|
|
|
|
|
u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
|
|
|
|
|
ioc->manu_pg11.CoreDumpTOSec :
|
|
|
|
|
MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
|
|
|
|
|
|
|
|
|
|
timeout /= (FAULT_POLLING_INTERVAL/1000);
|
|
|
|
|
|
|
|
|
|
if (ioc->ioc_coredump_loop == 0) {
|
2019-12-26 19:13:29 +08:00
|
|
|
|
mpt3sas_print_coredump_info(ioc,
|
2019-12-26 19:13:28 +08:00
|
|
|
|
doorbell & MPI2_DOORBELL_DATA_MASK);
|
|
|
|
|
/* do not accept any IOs and disable the interrupts */
|
|
|
|
|
spin_lock_irqsave(
|
|
|
|
|
&ioc->ioc_reset_in_progress_lock, flags);
|
|
|
|
|
ioc->shost_recovery = 1;
|
|
|
|
|
spin_unlock_irqrestore(
|
|
|
|
|
&ioc->ioc_reset_in_progress_lock, flags);
|
|
|
|
|
_base_mask_interrupts(ioc);
|
|
|
|
|
_base_clear_outstanding_commands(ioc);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ioc_info(ioc, "%s: CoreDump loop %d.",
|
|
|
|
|
__func__, ioc->ioc_coredump_loop);
|
|
|
|
|
|
|
|
|
|
/* Wait until CoreDump completes or times out */
|
|
|
|
|
if (ioc->ioc_coredump_loop++ < timeout) {
|
|
|
|
|
spin_lock_irqsave(
|
|
|
|
|
&ioc->ioc_reset_in_progress_lock, flags);
|
|
|
|
|
goto rearm_timer;
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-11-11 20:00:27 +08:00
|
|
|
|
|
2019-12-26 19:13:28 +08:00
|
|
|
|
if (ioc->ioc_coredump_loop) {
|
|
|
|
|
if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_COREDUMP)
|
|
|
|
|
ioc_err(ioc, "%s: CoreDump completed. LoopCount: %d",
|
|
|
|
|
__func__, ioc->ioc_coredump_loop);
|
|
|
|
|
else
|
|
|
|
|
ioc_err(ioc, "%s: CoreDump Timed out. LoopCount: %d",
|
|
|
|
|
__func__, ioc->ioc_coredump_loop);
|
|
|
|
|
ioc->ioc_coredump_loop = MPT3SAS_COREDUMP_LOOP_DONE;
|
|
|
|
|
}
|
|
|
|
|
ioc->non_operational_loop = 0;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
|
2016-07-29 12:38:21 +08:00
|
|
|
|
rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_warn(ioc, "%s: hard reset: %s\n",
|
|
|
|
|
__func__, rc == 0 ? "success" : "failed");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
doorbell = mpt3sas_base_get_iocstate(ioc, 0);
|
2019-12-26 19:13:28 +08:00
|
|
|
|
if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
|
2019-12-26 19:13:29 +08:00
|
|
|
|
mpt3sas_print_fault_code(ioc, doorbell &
|
2012-11-30 10:14:21 +08:00
|
|
|
|
MPI2_DOORBELL_DATA_MASK);
|
2019-12-26 19:13:28 +08:00
|
|
|
|
} else if ((doorbell & MPI2_IOC_STATE_MASK) ==
|
|
|
|
|
MPI2_IOC_STATE_COREDUMP)
|
2019-12-26 19:13:29 +08:00
|
|
|
|
mpt3sas_print_coredump_info(ioc, doorbell &
|
2019-12-26 19:13:28 +08:00
|
|
|
|
MPI2_DOORBELL_DATA_MASK);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
|
|
|
|
|
MPI2_IOC_STATE_OPERATIONAL)
|
|
|
|
|
return; /* don't rearm timer */
|
|
|
|
|
}
|
2019-12-26 19:13:28 +08:00
|
|
|
|
ioc->ioc_coredump_loop = 0;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
|
|
|
|
|
rearm_timer:
|
|
|
|
|
if (ioc->fault_reset_work_q)
|
|
|
|
|
queue_delayed_work(ioc->fault_reset_work_q,
|
|
|
|
|
&ioc->fault_reset_work,
|
|
|
|
|
msecs_to_jiffies(FAULT_POLLING_INTERVAL));
|
|
|
|
|
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_start_watchdog - start the fault_reset_work_q
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Context: sleep.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
|
|
if (ioc->fault_reset_work_q)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
/* initialize fault polling */
|
|
|
|
|
|
|
|
|
|
INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
|
|
|
|
|
snprintf(ioc->fault_reset_work_q_name,
|
2015-11-11 20:00:35 +08:00
|
|
|
|
sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
|
|
|
|
|
ioc->driver_name, ioc->id);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
ioc->fault_reset_work_q =
|
|
|
|
|
create_singlethread_workqueue(ioc->fault_reset_work_q_name);
|
|
|
|
|
if (!ioc->fault_reset_work_q) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
|
2018-06-16 05:41:54 +08:00
|
|
|
|
return;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
|
|
|
|
|
if (ioc->fault_reset_work_q)
|
|
|
|
|
queue_delayed_work(ioc->fault_reset_work_q,
|
|
|
|
|
&ioc->fault_reset_work,
|
|
|
|
|
msecs_to_jiffies(FAULT_POLLING_INTERVAL));
|
|
|
|
|
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Context: sleep.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
struct workqueue_struct *wq;
|
|
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
|
|
|
|
|
wq = ioc->fault_reset_work_q;
|
|
|
|
|
ioc->fault_reset_work_q = NULL;
|
|
|
|
|
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
|
|
|
|
|
if (wq) {
|
2014-07-14 14:31:35 +08:00
|
|
|
|
if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
|
2012-11-30 10:14:21 +08:00
|
|
|
|
flush_workqueue(wq);
|
|
|
|
|
destroy_workqueue(wq);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_fault_info - verbose translation of firmware FAULT code
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @fault_code: fault code
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
|
|
|
|
|
{
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
|
2019-12-26 19:13:27 +08:00
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_coredump_info - verbose translation of firmware CoreDump state
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @fault_code: fault code
|
|
|
|
|
*
|
|
|
|
|
* Return nothing.
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
|
|
|
|
|
{
|
|
|
|
|
ioc_err(ioc, "coredump_state(0x%04x)!\n", fault_code);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_wait_for_coredump_completion - Wait until coredump
|
|
|
|
|
* completes or times out
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
|
|
|
|
* Returns 0 for success, non-zero for failure.
|
|
|
|
|
*/
|
|
|
|
|
int
|
|
|
|
|
mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc,
|
|
|
|
|
const char *caller)
|
|
|
|
|
{
|
|
|
|
|
u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
|
|
|
|
|
ioc->manu_pg11.CoreDumpTOSec :
|
|
|
|
|
MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
|
|
|
|
|
|
|
|
|
|
int ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_FAULT,
|
|
|
|
|
timeout);
|
|
|
|
|
|
|
|
|
|
if (ioc_state)
|
|
|
|
|
ioc_err(ioc,
|
|
|
|
|
"%s: CoreDump timed out. (ioc_state=0x%x)\n",
|
|
|
|
|
caller, ioc_state);
|
|
|
|
|
else
|
|
|
|
|
ioc_info(ioc,
|
|
|
|
|
"%s: CoreDump completed. (ioc_state=0x%x)\n",
|
|
|
|
|
caller, ioc_state);
|
|
|
|
|
|
|
|
|
|
return ioc_state;
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_halt_firmware - halt's mpt controller firmware
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
|
|
|
|
* For debugging timeout related issues. Writing 0xCOFFEE00
|
|
|
|
|
* to the doorbell register will halt controller firmware. With
|
|
|
|
|
* the purpose to stop both driver and firmware, the enduser can
|
|
|
|
|
* obtain a ring buffer from controller UART.
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
|
|
|
|
u32 doorbell;
|
|
|
|
|
|
|
|
|
|
if (!ioc->fwfault_debug)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
dump_stack();
|
|
|
|
|
|
2018-12-07 15:28:34 +08:00
|
|
|
|
doorbell = ioc->base_readl(&ioc->chip->Doorbell);
|
2019-12-26 19:13:27 +08:00
|
|
|
|
if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
|
2019-12-26 19:13:29 +08:00
|
|
|
|
mpt3sas_print_fault_code(ioc, doorbell &
|
2019-12-26 19:13:27 +08:00
|
|
|
|
MPI2_DOORBELL_DATA_MASK);
|
|
|
|
|
} else if ((doorbell & MPI2_IOC_STATE_MASK) ==
|
|
|
|
|
MPI2_IOC_STATE_COREDUMP) {
|
2019-12-26 19:13:29 +08:00
|
|
|
|
mpt3sas_print_coredump_info(ioc, doorbell &
|
2019-12-26 19:13:27 +08:00
|
|
|
|
MPI2_DOORBELL_DATA_MASK);
|
|
|
|
|
} else {
|
2012-11-30 10:14:21 +08:00
|
|
|
|
writel(0xC0FFEE00, &ioc->chip->Doorbell);
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "Firmware is halted due to command timeout\n");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ioc->fwfault_debug == 2)
|
|
|
|
|
for (;;)
|
|
|
|
|
;
|
|
|
|
|
else
|
|
|
|
|
panic("panic in %s\n", __func__);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_sas_ioc_info - verbose translation of the ioc status
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @mpi_reply: reply mf payload returned from firmware
|
|
|
|
|
* @request_hdr: request mf
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
|
|
|
|
|
MPI2RequestHeader_t *request_hdr)
|
|
|
|
|
{
|
|
|
|
|
u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
|
|
|
|
|
MPI2_IOCSTATUS_MASK;
|
|
|
|
|
char *desc = NULL;
|
|
|
|
|
u16 frame_sz;
|
|
|
|
|
char *func_str = NULL;
|
|
|
|
|
|
|
|
|
|
/* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
|
|
|
|
|
if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
|
|
|
|
|
request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
|
|
|
|
|
request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
switch (ioc_status) {
|
|
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
|
* Common IOCStatus values for all replies
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
|
|
case MPI2_IOCSTATUS_INVALID_FUNCTION:
|
|
|
|
|
desc = "invalid function";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_IOCSTATUS_BUSY:
|
|
|
|
|
desc = "busy";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_IOCSTATUS_INVALID_SGL:
|
|
|
|
|
desc = "invalid sgl";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_IOCSTATUS_INTERNAL_ERROR:
|
|
|
|
|
desc = "internal error";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_IOCSTATUS_INVALID_VPID:
|
|
|
|
|
desc = "invalid vpid";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
|
|
|
|
|
desc = "insufficient resources";
|
|
|
|
|
break;
|
2016-01-28 14:36:58 +08:00
|
|
|
|
case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
|
|
|
|
|
desc = "insufficient power";
|
|
|
|
|
break;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
case MPI2_IOCSTATUS_INVALID_FIELD:
|
|
|
|
|
desc = "invalid field";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_IOCSTATUS_INVALID_STATE:
|
|
|
|
|
desc = "invalid state";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
|
|
|
|
|
desc = "op state not supported";
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
|
* Config IOCStatus values
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
|
|
case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
|
|
|
|
|
desc = "config invalid action";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
|
|
|
|
|
desc = "config invalid type";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
|
|
|
|
|
desc = "config invalid page";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
|
|
|
|
|
desc = "config invalid data";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
|
|
|
|
|
desc = "config no defaults";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
|
|
|
|
|
desc = "config cant commit";
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
|
* SCSI IO Reply
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
|
|
case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
|
|
|
|
|
case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
|
|
|
|
|
case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
|
|
|
|
|
case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
|
|
|
|
|
case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
|
|
|
|
|
case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
|
|
|
|
|
case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
|
|
|
|
|
case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
|
|
|
|
|
case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
|
|
|
|
|
case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
|
|
|
|
|
case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
|
|
|
|
|
case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
|
* For use by SCSI Initiator and SCSI Target end-to-end data protection
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
|
|
case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
|
|
|
|
|
desc = "eedp guard error";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
|
|
|
|
|
desc = "eedp ref tag error";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
|
|
|
|
|
desc = "eedp app tag error";
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
|
* SCSI Target values
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
|
|
case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
|
|
|
|
|
desc = "target invalid io index";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_IOCSTATUS_TARGET_ABORTED:
|
|
|
|
|
desc = "target aborted";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
|
|
|
|
|
desc = "target no conn retryable";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
|
|
|
|
|
desc = "target no connection";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
|
|
|
|
|
desc = "target xfer count mismatch";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
|
|
|
|
|
desc = "target data offset error";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
|
|
|
|
|
desc = "target too much write data";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
|
|
|
|
|
desc = "target iu too short";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
|
|
|
|
|
desc = "target ack nak timeout";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
|
|
|
|
|
desc = "target nak received";
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
|
* Serial Attached SCSI values
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
|
|
case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
|
|
|
|
|
desc = "smp request failed";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
|
|
|
|
|
desc = "smp data overrun";
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
|
* Diagnostic Buffer Post / Diagnostic Release values
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
|
|
case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
|
|
|
|
|
desc = "diagnostic released";
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!desc)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
switch (request_hdr->Function) {
|
|
|
|
|
case MPI2_FUNCTION_CONFIG:
|
|
|
|
|
frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
|
|
|
|
|
func_str = "config_page";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_FUNCTION_SCSI_TASK_MGMT:
|
|
|
|
|
frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
|
|
|
|
|
func_str = "task_mgmt";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
|
|
|
|
|
frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
|
|
|
|
|
func_str = "sas_iounit_ctl";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
|
|
|
|
|
frame_sz = sizeof(Mpi2SepRequest_t);
|
|
|
|
|
func_str = "enclosure";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_FUNCTION_IOC_INIT:
|
|
|
|
|
frame_sz = sizeof(Mpi2IOCInitRequest_t);
|
|
|
|
|
func_str = "ioc_init";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_FUNCTION_PORT_ENABLE:
|
|
|
|
|
frame_sz = sizeof(Mpi2PortEnableRequest_t);
|
|
|
|
|
func_str = "port_enable";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_FUNCTION_SMP_PASSTHROUGH:
|
|
|
|
|
frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
|
|
|
|
|
func_str = "smp_passthru";
|
|
|
|
|
break;
|
2017-10-31 20:32:29 +08:00
|
|
|
|
case MPI2_FUNCTION_NVME_ENCAPSULATED:
|
|
|
|
|
frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) +
|
|
|
|
|
ioc->sge_size;
|
|
|
|
|
func_str = "nvme_encapsulated";
|
|
|
|
|
break;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
default:
|
|
|
|
|
frame_sz = 32;
|
|
|
|
|
func_str = "unknown";
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
|
|
|
|
|
desc, ioc_status, request_hdr, func_str);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
_debug_dump_mf(request_hdr, frame_sz/4);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_display_event_data - verbose translation of firmware asyn events
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @mpi_reply: reply mf payload returned from firmware
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
|
|
|
|
|
Mpi2EventNotificationReply_t *mpi_reply)
|
|
|
|
|
{
|
|
|
|
|
char *desc = NULL;
|
|
|
|
|
u16 event;
|
|
|
|
|
|
|
|
|
|
if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
event = le16_to_cpu(mpi_reply->Event);
|
|
|
|
|
|
|
|
|
|
switch (event) {
|
|
|
|
|
case MPI2_EVENT_LOG_DATA:
|
|
|
|
|
desc = "Log Data";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_EVENT_STATE_CHANGE:
|
|
|
|
|
desc = "Status Change";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_EVENT_HARD_RESET_RECEIVED:
|
|
|
|
|
desc = "Hard Reset Received";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_EVENT_EVENT_CHANGE:
|
|
|
|
|
desc = "Event Change";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
|
|
|
|
|
desc = "Device Status Change";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_EVENT_IR_OPERATION_STATUS:
|
2015-11-11 20:00:28 +08:00
|
|
|
|
if (!ioc->hide_ir_msg)
|
|
|
|
|
desc = "IR Operation Status";
|
2012-11-30 10:14:21 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPI2_EVENT_SAS_DISCOVERY:
|
|
|
|
|
{
|
|
|
|
|
Mpi2EventDataSasDiscovery_t *event_data =
|
|
|
|
|
(Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "Discovery: (%s)",
|
|
|
|
|
event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
|
|
|
|
|
"start" : "stop");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (event_data->DiscoveryStatus)
|
2017-08-15 21:51:45 +08:00
|
|
|
|
pr_cont(" discovery_status(0x%08x)",
|
2012-11-30 10:14:21 +08:00
|
|
|
|
le32_to_cpu(event_data->DiscoveryStatus));
|
2017-08-15 21:51:45 +08:00
|
|
|
|
pr_cont("\n");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
|
|
|
|
|
desc = "SAS Broadcast Primitive";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
|
|
|
|
|
desc = "SAS Init Device Status Change";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
|
|
|
|
|
desc = "SAS Init Table Overflow";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
|
|
|
|
|
desc = "SAS Topology Change List";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
|
|
|
|
|
desc = "SAS Enclosure Device Status Change";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_EVENT_IR_VOLUME:
|
2015-11-11 20:00:28 +08:00
|
|
|
|
if (!ioc->hide_ir_msg)
|
|
|
|
|
desc = "IR Volume";
|
2012-11-30 10:14:21 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPI2_EVENT_IR_PHYSICAL_DISK:
|
2015-11-11 20:00:28 +08:00
|
|
|
|
if (!ioc->hide_ir_msg)
|
|
|
|
|
desc = "IR Physical Disk";
|
2012-11-30 10:14:21 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
|
2015-11-11 20:00:28 +08:00
|
|
|
|
if (!ioc->hide_ir_msg)
|
|
|
|
|
desc = "IR Configuration Change List";
|
2012-11-30 10:14:21 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPI2_EVENT_LOG_ENTRY_ADDED:
|
2015-11-11 20:00:28 +08:00
|
|
|
|
if (!ioc->hide_ir_msg)
|
|
|
|
|
desc = "Log Entry Added";
|
2012-11-30 10:14:21 +08:00
|
|
|
|
break;
|
2015-01-12 14:08:56 +08:00
|
|
|
|
case MPI2_EVENT_TEMP_THRESHOLD:
|
|
|
|
|
desc = "Temperature Threshold";
|
|
|
|
|
break;
|
2016-05-06 16:59:27 +08:00
|
|
|
|
case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
|
2017-10-10 21:11:14 +08:00
|
|
|
|
desc = "Cable Event";
|
2016-05-06 16:59:27 +08:00
|
|
|
|
break;
|
2018-04-24 17:28:35 +08:00
|
|
|
|
case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
|
|
|
|
|
desc = "SAS Device Discovery Error";
|
|
|
|
|
break;
|
2017-10-31 20:32:32 +08:00
|
|
|
|
case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
|
|
|
|
|
desc = "PCIE Device Status Change";
|
|
|
|
|
break;
|
|
|
|
|
case MPI2_EVENT_PCIE_ENUMERATION:
|
|
|
|
|
{
|
|
|
|
|
Mpi26EventDataPCIeEnumeration_t *event_data =
|
|
|
|
|
(Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "PCIE Enumeration: (%s)",
|
|
|
|
|
event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ?
|
|
|
|
|
"start" : "stop");
|
2017-10-31 20:32:32 +08:00
|
|
|
|
if (event_data->EnumerationStatus)
|
2018-09-17 23:01:09 +08:00
|
|
|
|
pr_cont("enumeration_status(0x%08x)",
|
|
|
|
|
le32_to_cpu(event_data->EnumerationStatus));
|
|
|
|
|
pr_cont("\n");
|
2017-10-31 20:32:32 +08:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
|
|
|
|
|
desc = "PCIE Topology Change List";
|
|
|
|
|
break;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!desc)
|
|
|
|
|
return;
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n", desc);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_sas_log_info - verbose translation of firmware log info
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @log_info: log info
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
|
|
|
|
|
{
|
|
|
|
|
union loginfo_type {
|
|
|
|
|
u32 loginfo;
|
|
|
|
|
struct {
|
|
|
|
|
u32 subcode:16;
|
|
|
|
|
u32 code:8;
|
|
|
|
|
u32 originator:4;
|
|
|
|
|
u32 bus_type:4;
|
|
|
|
|
} dw;
|
|
|
|
|
};
|
|
|
|
|
union loginfo_type sas_loginfo;
|
|
|
|
|
char *originator_str = NULL;
|
|
|
|
|
|
|
|
|
|
sas_loginfo.loginfo = log_info;
|
|
|
|
|
if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
/* each nexus loss loginfo */
|
|
|
|
|
if (log_info == 0x31170000)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
/* eat the loginfos associated with task aborts */
|
|
|
|
|
if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
|
|
|
|
|
0x31140000 || log_info == 0x31130000))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
switch (sas_loginfo.dw.originator) {
|
|
|
|
|
case 0:
|
|
|
|
|
originator_str = "IOP";
|
|
|
|
|
break;
|
|
|
|
|
case 1:
|
|
|
|
|
originator_str = "PL";
|
|
|
|
|
break;
|
|
|
|
|
case 2:
|
2015-11-11 20:00:28 +08:00
|
|
|
|
if (!ioc->hide_ir_msg)
|
|
|
|
|
originator_str = "IR";
|
|
|
|
|
else
|
|
|
|
|
originator_str = "WarpDrive";
|
2012-11-30 10:14:21 +08:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
|
|
|
|
|
log_info,
|
|
|
|
|
originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_display_reply_info -
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @smid: system request message index
|
|
|
|
|
* @msix_index: MSIX table index supplied by the OS
|
|
|
|
|
* @reply: reply message frame(lower 32bit addr)
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
|
|
|
|
|
u32 reply)
|
|
|
|
|
{
|
|
|
|
|
MPI2DefaultReply_t *mpi_reply;
|
|
|
|
|
u16 ioc_status;
|
|
|
|
|
u32 loginfo = 0;
|
|
|
|
|
|
|
|
|
|
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
|
|
|
|
|
if (unlikely(!mpi_reply)) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
|
|
|
|
|
__FILE__, __LINE__, __func__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
|
2015-11-11 20:00:23 +08:00
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
|
|
|
|
|
(ioc->logging_level & MPT_DEBUG_REPLY)) {
|
|
|
|
|
_base_sas_ioc_info(ioc , mpi_reply,
|
|
|
|
|
mpt3sas_base_get_msg_frame(ioc, smid));
|
|
|
|
|
}
|
2015-11-11 20:00:23 +08:00
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
|
|
|
|
|
loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
|
|
|
|
|
_base_sas_log_info(ioc, loginfo);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ioc_status || loginfo) {
|
|
|
|
|
ioc_status &= MPI2_IOCSTATUS_MASK;
|
|
|
|
|
mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_done - base internal command completion routine
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @smid: system request message index
|
|
|
|
|
* @msix_index: MSIX table index supplied by the OS
|
|
|
|
|
* @reply: reply message frame(lower 32bit addr)
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return:
|
|
|
|
|
* 1 meaning mf should be freed from _base_interrupt
|
|
|
|
|
* 0 means the mf is freed from this function.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
u8
|
|
|
|
|
mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
|
|
|
|
|
u32 reply)
|
|
|
|
|
{
|
|
|
|
|
MPI2DefaultReply_t *mpi_reply;
|
|
|
|
|
|
|
|
|
|
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
|
|
|
|
|
if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
|
2016-01-28 14:37:02 +08:00
|
|
|
|
return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
|
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
|
|
ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
|
|
|
|
|
if (mpi_reply) {
|
|
|
|
|
ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
|
|
|
|
|
memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
|
|
|
|
|
}
|
|
|
|
|
ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
|
|
|
|
|
|
|
|
|
|
complete(&ioc->base_cmds.done);
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_async_event - main callback handler for firmware asyn events
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @msix_index: MSIX table index supplied by the OS
|
|
|
|
|
* @reply: reply message frame(lower 32bit addr)
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return:
|
|
|
|
|
* 1 meaning mf should be freed from _base_interrupt
|
|
|
|
|
* 0 means the mf is freed from this function.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
static u8
|
|
|
|
|
_base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
|
|
|
|
|
{
|
|
|
|
|
Mpi2EventNotificationReply_t *mpi_reply;
|
|
|
|
|
Mpi2EventAckRequest_t *ack_request;
|
|
|
|
|
u16 smid;
|
2016-01-28 14:37:02 +08:00
|
|
|
|
struct _event_ack_list *delayed_event_ack;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
|
|
|
|
|
if (!mpi_reply)
|
|
|
|
|
return 1;
|
|
|
|
|
if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
|
|
|
|
|
return 1;
|
2015-11-11 20:00:23 +08:00
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
_base_display_event_data(ioc, mpi_reply);
|
2015-11-11 20:00:23 +08:00
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
|
|
|
|
|
goto out;
|
|
|
|
|
smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
|
|
|
|
|
if (!smid) {
|
2016-01-28 14:37:02 +08:00
|
|
|
|
delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
|
|
|
|
|
GFP_ATOMIC);
|
|
|
|
|
if (!delayed_event_ack)
|
|
|
|
|
goto out;
|
|
|
|
|
INIT_LIST_HEAD(&delayed_event_ack->list);
|
|
|
|
|
delayed_event_ack->Event = mpi_reply->Event;
|
|
|
|
|
delayed_event_ack->EventContext = mpi_reply->EventContext;
|
|
|
|
|
list_add_tail(&delayed_event_ack->list,
|
|
|
|
|
&ioc->delayed_event_ack_list);
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dewtprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n",
|
|
|
|
|
le16_to_cpu(mpi_reply->Event)));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
|
|
|
|
|
memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
|
|
|
|
|
ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
|
|
|
|
|
ack_request->Event = mpi_reply->Event;
|
|
|
|
|
ack_request->EventContext = mpi_reply->EventContext;
|
|
|
|
|
ack_request->VF_ID = 0; /* TODO */
|
|
|
|
|
ack_request->VP_ID = 0;
|
2019-05-31 20:14:34 +08:00
|
|
|
|
ioc->put_smid_default(ioc, smid);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
|
|
|
|
|
/* scsih callback handler */
|
|
|
|
|
mpt3sas_scsih_event_callback(ioc, msix_index, reply);
|
|
|
|
|
|
|
|
|
|
/* ctl callback handler */
|
|
|
|
|
mpt3sas_ctl_event_callback(ioc, msix_index, reply);
|
|
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-12 00:55:39 +08:00
|
|
|
|
static struct scsiio_tracker *
|
2018-01-04 20:57:11 +08:00
|
|
|
|
_get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
|
2018-01-04 20:57:05 +08:00
|
|
|
|
{
|
2018-01-04 20:57:11 +08:00
|
|
|
|
struct scsi_cmnd *cmd;
|
|
|
|
|
|
2018-01-04 20:57:05 +08:00
|
|
|
|
if (WARN_ON(!smid) ||
|
|
|
|
|
WARN_ON(smid >= ioc->hi_priority_smid))
|
|
|
|
|
return NULL;
|
2018-01-04 20:57:11 +08:00
|
|
|
|
|
|
|
|
|
cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
|
|
|
|
|
if (cmd)
|
|
|
|
|
return scsi_cmd_priv(cmd);
|
|
|
|
|
|
|
|
|
|
return NULL;
|
2018-01-04 20:57:05 +08:00
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_get_cb_idx - obtain the callback index
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @smid: system request message index
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: callback index.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
static u8
|
|
|
|
|
_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
2018-01-04 20:57:07 +08:00
|
|
|
|
u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
|
2018-01-04 20:57:01 +08:00
|
|
|
|
u8 cb_idx = 0xFF;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
if (smid < ioc->hi_priority_smid) {
|
2018-01-04 20:57:05 +08:00
|
|
|
|
struct scsiio_tracker *st;
|
|
|
|
|
|
2018-01-04 20:57:07 +08:00
|
|
|
|
if (smid < ctl_smid) {
|
2018-01-04 20:57:11 +08:00
|
|
|
|
st = _get_st_from_smid(ioc, smid);
|
2018-01-04 20:57:07 +08:00
|
|
|
|
if (st)
|
|
|
|
|
cb_idx = st->cb_idx;
|
|
|
|
|
} else if (smid == ctl_smid)
|
|
|
|
|
cb_idx = ioc->ctl_cb_idx;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
} else if (smid < ioc->internal_smid) {
|
|
|
|
|
i = smid - ioc->hi_priority_smid;
|
|
|
|
|
cb_idx = ioc->hpr_lookup[i].cb_idx;
|
|
|
|
|
} else if (smid <= ioc->hba_queue_depth) {
|
|
|
|
|
i = smid - ioc->internal_smid;
|
|
|
|
|
cb_idx = ioc->internal_lookup[i].cb_idx;
|
2018-01-04 20:57:01 +08:00
|
|
|
|
}
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return cb_idx;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_mask_interrupts - disable interrupts
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
|
|
|
|
* Disabling ResetIRQ, Reply and Doorbell Interrupts
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
|
|
|
|
u32 him_register;
|
|
|
|
|
|
|
|
|
|
ioc->mask_interrupts = 1;
|
2018-12-07 15:28:34 +08:00
|
|
|
|
him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
|
|
|
|
|
writel(him_register, &ioc->chip->HostInterruptMask);
|
2018-12-07 15:28:34 +08:00
|
|
|
|
ioc->base_readl(&ioc->chip->HostInterruptMask);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_unmask_interrupts - enable interrupts
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
|
|
|
|
* Enabling only Reply Interrupts
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
|
|
|
|
u32 him_register;
|
|
|
|
|
|
2018-12-07 15:28:34 +08:00
|
|
|
|
him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
him_register &= ~MPI2_HIM_RIM;
|
|
|
|
|
writel(him_register, &ioc->chip->HostInterruptMask);
|
|
|
|
|
ioc->mask_interrupts = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
union reply_descriptor {
|
|
|
|
|
u64 word;
|
|
|
|
|
struct {
|
|
|
|
|
u32 low;
|
|
|
|
|
u32 high;
|
|
|
|
|
} u;
|
|
|
|
|
};
|
|
|
|
|
|
2019-02-15 15:40:28 +08:00
|
|
|
|
static u32 base_mod64(u64 dividend, u32 divisor)
|
|
|
|
|
{
|
|
|
|
|
u32 remainder;
|
|
|
|
|
|
|
|
|
|
if (!divisor)
|
|
|
|
|
pr_err("mpt3sas: DIVISOR is zero, in div fn\n");
|
|
|
|
|
remainder = do_div(dividend, divisor);
|
|
|
|
|
return remainder;
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/**
|
2019-02-15 15:40:25 +08:00
|
|
|
|
* _base_process_reply_queue - Process reply descriptors from reply
|
|
|
|
|
* descriptor post queue.
|
|
|
|
|
* @reply_q: per IRQ's reply queue object.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*
|
2019-02-15 15:40:25 +08:00
|
|
|
|
* Return: number of reply descriptors processed from reply
|
|
|
|
|
* descriptor queue.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
2019-02-15 15:40:25 +08:00
|
|
|
|
static int
|
|
|
|
|
_base_process_reply_queue(struct adapter_reply_queue *reply_q)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
{
|
|
|
|
|
union reply_descriptor rd;
|
2019-02-15 15:40:29 +08:00
|
|
|
|
u64 completed_cmds;
|
2019-02-15 15:40:24 +08:00
|
|
|
|
u8 request_descript_type;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
u16 smid;
|
|
|
|
|
u8 cb_idx;
|
|
|
|
|
u32 reply;
|
|
|
|
|
u8 msix_index = reply_q->msix_index;
|
|
|
|
|
struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
|
|
|
|
|
Mpi2ReplyDescriptorsUnion_t *rpf;
|
|
|
|
|
u8 rc;
|
|
|
|
|
|
2019-02-15 15:40:25 +08:00
|
|
|
|
completed_cmds = 0;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (!atomic_add_unless(&reply_q->busy, 1, 1))
|
2019-02-15 15:40:25 +08:00
|
|
|
|
return completed_cmds;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
|
2019-02-15 15:40:24 +08:00
|
|
|
|
request_descript_type = rpf->Default.ReplyFlags
|
2012-11-30 10:14:21 +08:00
|
|
|
|
& MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
|
2019-02-15 15:40:24 +08:00
|
|
|
|
if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
|
2012-11-30 10:14:21 +08:00
|
|
|
|
atomic_dec(&reply_q->busy);
|
2019-02-15 15:40:25 +08:00
|
|
|
|
return completed_cmds;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cb_idx = 0xFF;
|
|
|
|
|
do {
|
|
|
|
|
rd.word = le64_to_cpu(rpf->Words);
|
|
|
|
|
if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
|
|
|
|
|
goto out;
|
|
|
|
|
reply = 0;
|
|
|
|
|
smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
|
2019-02-15 15:40:24 +08:00
|
|
|
|
if (request_descript_type ==
|
2012-11-30 10:14:21 +08:00
|
|
|
|
MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
|
2019-02-15 15:40:24 +08:00
|
|
|
|
request_descript_type ==
|
2017-10-31 20:32:29 +08:00
|
|
|
|
MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
|
2019-02-15 15:40:24 +08:00
|
|
|
|
request_descript_type ==
|
2017-10-31 20:32:29 +08:00
|
|
|
|
MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
|
2012-11-30 10:14:21 +08:00
|
|
|
|
cb_idx = _base_get_cb_idx(ioc, smid);
|
|
|
|
|
if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
|
|
|
|
|
(likely(mpt_callbacks[cb_idx] != NULL))) {
|
|
|
|
|
rc = mpt_callbacks[cb_idx](ioc, smid,
|
|
|
|
|
msix_index, 0);
|
|
|
|
|
if (rc)
|
|
|
|
|
mpt3sas_base_free_smid(ioc, smid);
|
|
|
|
|
}
|
2019-02-15 15:40:24 +08:00
|
|
|
|
} else if (request_descript_type ==
|
2012-11-30 10:14:21 +08:00
|
|
|
|
MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
|
|
|
|
|
reply = le32_to_cpu(
|
|
|
|
|
rpf->AddressReply.ReplyFrameAddress);
|
|
|
|
|
if (reply > ioc->reply_dma_max_address ||
|
|
|
|
|
reply < ioc->reply_dma_min_address)
|
|
|
|
|
reply = 0;
|
|
|
|
|
if (smid) {
|
|
|
|
|
cb_idx = _base_get_cb_idx(ioc, smid);
|
|
|
|
|
if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
|
|
|
|
|
(likely(mpt_callbacks[cb_idx] != NULL))) {
|
|
|
|
|
rc = mpt_callbacks[cb_idx](ioc, smid,
|
|
|
|
|
msix_index, reply);
|
|
|
|
|
if (reply)
|
|
|
|
|
_base_display_reply_info(ioc,
|
|
|
|
|
smid, msix_index, reply);
|
|
|
|
|
if (rc)
|
|
|
|
|
mpt3sas_base_free_smid(ioc,
|
|
|
|
|
smid);
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
_base_async_event(ioc, msix_index, reply);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* reply free queue handling */
|
|
|
|
|
if (reply) {
|
|
|
|
|
ioc->reply_free_host_index =
|
|
|
|
|
(ioc->reply_free_host_index ==
|
|
|
|
|
(ioc->reply_free_queue_depth - 1)) ?
|
|
|
|
|
0 : ioc->reply_free_host_index + 1;
|
|
|
|
|
ioc->reply_free[ioc->reply_free_host_index] =
|
|
|
|
|
cpu_to_le32(reply);
|
2018-02-07 18:51:50 +08:00
|
|
|
|
if (ioc->is_mcpu_endpoint)
|
|
|
|
|
_base_clone_reply_to_sys_mem(ioc,
|
2018-04-24 17:28:30 +08:00
|
|
|
|
reply,
|
2018-02-07 18:51:50 +08:00
|
|
|
|
ioc->reply_free_host_index);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
writel(ioc->reply_free_host_index,
|
|
|
|
|
&ioc->chip->ReplyFreeHostIndex);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
rpf->Words = cpu_to_le64(ULLONG_MAX);
|
|
|
|
|
reply_q->reply_post_host_index =
|
|
|
|
|
(reply_q->reply_post_host_index ==
|
|
|
|
|
(ioc->reply_post_queue_depth - 1)) ? 0 :
|
|
|
|
|
reply_q->reply_post_host_index + 1;
|
2019-02-15 15:40:24 +08:00
|
|
|
|
request_descript_type =
|
2012-11-30 10:14:21 +08:00
|
|
|
|
reply_q->reply_post_free[reply_q->reply_post_host_index].
|
|
|
|
|
Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
|
|
|
|
|
completed_cmds++;
|
2017-01-23 17:56:09 +08:00
|
|
|
|
/* Update the reply post host index after continuously
|
|
|
|
|
* processing the threshold number of Reply Descriptors.
|
|
|
|
|
* So that FW can find enough entries to post the Reply
|
|
|
|
|
* Descriptors in the reply descriptor post queue.
|
|
|
|
|
*/
|
2019-02-15 15:40:29 +08:00
|
|
|
|
if (!base_mod64(completed_cmds, ioc->thresh_hold)) {
|
2017-01-23 17:56:09 +08:00
|
|
|
|
if (ioc->combined_reply_queue) {
|
|
|
|
|
writel(reply_q->reply_post_host_index |
|
|
|
|
|
((msix_index & 7) <<
|
|
|
|
|
MPI2_RPHI_MSIX_INDEX_SHIFT),
|
|
|
|
|
ioc->replyPostRegisterIndex[msix_index/8]);
|
|
|
|
|
} else {
|
|
|
|
|
writel(reply_q->reply_post_host_index |
|
|
|
|
|
(msix_index <<
|
|
|
|
|
MPI2_RPHI_MSIX_INDEX_SHIFT),
|
|
|
|
|
&ioc->chip->ReplyPostHostIndex);
|
|
|
|
|
}
|
scsi: mpt3sas: Irq poll to avoid CPU hard lockups
Issue Description:
We have seen cpu lock up issue from fields if system has greater (more than
96) logical cpu count. SAS3.0 controller (Invader series) supports at max
96 msix vector and SAS3.5 product (Ventura) supports at max 128 msix
vectors.
This may be a generic issue (if PCI device supports completion on multiple
reply queues). Let me explain it w.r.t to mpt3sas supported h/w just to
simplify the problem and possible changes to handle such issues. IT HBA
(mpt3sas) supports multiple reply queues in completion path. Driver creates
MSI-x vectors for controller as "min of (FW supported Reply queue, Logical
CPUs)". If submitter is not interrupted via completion on same CPU, there
is a loop in the IO path. This behavior can cause hard/soft CPU lockups, IO
timeout, system sluggish etc.
Example - one CPU (e.g. CPU A) is busy submitting the IOs and another CPU
(e.g. CPU B) is busy with processing the corresponding IO's reply
descriptors from reply descriptor queue upon receiving the interrupts from
HBA. If the CPU A is continuously pumping the IOs then always CPU B (which
is executing the ISR) will see the valid reply descriptors in the reply
descriptor queue and it will be continuously processing those reply
descriptor in a loop without quitting the ISR handler.
Mpt3sas driver will exit ISR handler if it finds unused reply descriptor in
the reply descriptor queue. Since CPU A will be continuously sending the
IOs, CPU B may always see a valid reply descriptor (posted by HBA Firmware
after processing the IO) in the reply descriptor queue. In worst case,
driver will not quit from this loop in the ISR handler. Eventually, CPU
lockup will be detected by watchdog.
Above mentioned behavior is not common if "rq_affinity" set to 2 or
affinity_hint is honored by irqbalance as "exact". If rq_affinity is set
to 2, submitter will be always interrupted via completion on same CPU. If
irqbalance is using "exact" policy, interrupt will be delivered to
submitter CPU.
If CPU counts to MSI-X vectors (reply descriptor Queues) count ratio is not
1:1, we still have exposure of issue explained above and for that we don't
have any solution.
Exposure of soft/hard lockup if CPU count is more than MSI-x supported by
device.
If CPUs count to MSI-x vectors count ratio is not 1:1, (Other way, if CPU
counts to MSI-x vector count ratio is something like X:1, where X > 1) then
'exact' irqbalance policy OR rq_affinity = 2 won't help to avoid CPU
hard/soft lockups. There won't be any one to one mapping between CPU to
MSI-x vector instead one MSI-x interrupt (or reply descriptor queue) is
shared with group/set of CPUs and there is a possibility of having a loop
in the IO path within that CPU group and may observe lockups.
For example: Consider a system having two NUMA nodes and each node having
four logical CPUs and also consider that number of MSI-x vectors enabled on
the HBA is two, then CPUs count to MSI-x vector count ratio as 4:1. e.g.
MSIx vector 0 is affinity to CPU 0, CPU 1, CPU 2 & CPU 3 of NUMA node 0 and
MSI-x vector 1 is affinity to CPU 4, CPU 5, CPU 6 & CPU 7 of NUMA node 1.
numactl --hardware
available: 2 nodes (0-1)
node 0 cpus: 0 1 2 3 --> MSI-x 0
node 0 size: 65536 MB
node 0 free: 63176 MB
node 1 cpus: 4 5 6 7 -->MSI-x 1
node 1 size: 65536 MB
node 1 free: 63176 MB
Assume that user started an application which uses all the CPUs of NUMA
node 0 for issuing the IOs. Only one CPU from affinity list (it can be any
cpu since this behavior depends upon irqbalance) CPU0 will receive the
interrupts from MSIx vector 0 for all the IOs. Eventually, CPU 0 IO
submission percentage will be decreasing and ISR processing percentage will
be increasing as it is more busy with processing the interrupts. Gradually
IO submission percentage on CPU 0 will be zero and it's ISR processing
percentage will be 100 percentage as IO loop has already formed within the
NUMA node 0, i.e. CPU 1, CPU 2 & CPU 3 will be continuously busy with
submitting the heavy IOs and only CPU 0 is busy in the ISR path as it
always find the valid reply descriptor in the reply descriptor
queue. Eventually, we will observe the hard lockup here.
Chances of occurring of hard/soft lockups are directly proportional to
value of X. If value of X is high, then chances of observing CPU lockups is
high.
Solution: Use IRQ poll interface defined in " irq_poll.c". mpt3sas driver
will execute ISR routine in Softirq context and it will always quit the
loop based on budget provided in IRQ poll interface.
In these scenarios (i.e. where CPUs count to MSI-X vectors count ratio is
X:1 (where X > 1)), IRQ poll interface will avoid CPU hard lockups due to
voluntary exit from the reply queue processing based on budget. Note -
Only one MSI-x vector is busy doing processing.
Irqstat output:
IRQs / 1 second(s)
IRQ# TOTAL NODE0 NODE1 NODE2 NODE3 NAME
44 122871 122871 0 0 0 IR-PCI-MSI-edge mpt3sas0-msix0
45 0 0 0 0 0 IR-PCI-MSI-edge mpt3sas0-msix1
We use this approach only if cpu count is more than FW supported MSI-x
vector
Signed-off-by: Suganath Prabu <suganath-prabu.subramani@broadcom.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-15 15:40:27 +08:00
|
|
|
|
if (!reply_q->irq_poll_scheduled) {
|
|
|
|
|
reply_q->irq_poll_scheduled = true;
|
|
|
|
|
irq_poll_sched(&reply_q->irqpoll);
|
|
|
|
|
}
|
2019-03-21 00:50:00 +08:00
|
|
|
|
atomic_dec(&reply_q->busy);
|
|
|
|
|
return completed_cmds;
|
2017-01-23 17:56:09 +08:00
|
|
|
|
}
|
2019-02-15 15:40:24 +08:00
|
|
|
|
if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
goto out;
|
|
|
|
|
if (!reply_q->reply_post_host_index)
|
|
|
|
|
rpf = reply_q->reply_post_free;
|
|
|
|
|
else
|
|
|
|
|
rpf++;
|
|
|
|
|
} while (1);
|
|
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
|
|
|
|
|
if (!completed_cmds) {
|
|
|
|
|
atomic_dec(&reply_q->busy);
|
2019-02-15 15:40:25 +08:00
|
|
|
|
return completed_cmds;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
|
2015-11-11 20:00:28 +08:00
|
|
|
|
if (ioc->is_warpdrive) {
|
|
|
|
|
writel(reply_q->reply_post_host_index,
|
|
|
|
|
ioc->reply_post_host_index[msix_index]);
|
|
|
|
|
atomic_dec(&reply_q->busy);
|
2019-02-15 15:40:25 +08:00
|
|
|
|
return completed_cmds;
|
2015-11-11 20:00:28 +08:00
|
|
|
|
}
|
2015-06-30 14:54:47 +08:00
|
|
|
|
|
|
|
|
|
/* Update Reply Post Host Index.
|
|
|
|
|
* For those HBA's which support combined reply queue feature
|
|
|
|
|
* 1. Get the correct Supplemental Reply Post Host Index Register.
|
|
|
|
|
* i.e. (msix_index / 8)th entry from Supplemental Reply Post Host
|
|
|
|
|
* Index Register address bank i.e replyPostRegisterIndex[],
|
|
|
|
|
* 2. Then update this register with new reply host index value
|
|
|
|
|
* in ReplyPostIndex field and the MSIxIndex field with
|
|
|
|
|
* msix_index value reduced to a value between 0 and 7,
|
|
|
|
|
* using a modulo 8 operation. Since each Supplemental Reply Post
|
|
|
|
|
* Host Index Register supports 8 MSI-X vectors.
|
|
|
|
|
*
|
|
|
|
|
* For other HBA's just update the Reply Post Host Index register with
|
|
|
|
|
* new reply host index value in ReplyPostIndex Field and msix_index
|
|
|
|
|
* value in MSIxIndex field.
|
|
|
|
|
*/
|
2016-10-26 16:04:38 +08:00
|
|
|
|
if (ioc->combined_reply_queue)
|
2015-06-30 14:54:47 +08:00
|
|
|
|
writel(reply_q->reply_post_host_index | ((msix_index & 7) <<
|
|
|
|
|
MPI2_RPHI_MSIX_INDEX_SHIFT),
|
|
|
|
|
ioc->replyPostRegisterIndex[msix_index/8]);
|
|
|
|
|
else
|
|
|
|
|
writel(reply_q->reply_post_host_index | (msix_index <<
|
|
|
|
|
MPI2_RPHI_MSIX_INDEX_SHIFT),
|
|
|
|
|
&ioc->chip->ReplyPostHostIndex);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
atomic_dec(&reply_q->busy);
|
2019-02-15 15:40:25 +08:00
|
|
|
|
return completed_cmds;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_interrupt - MPT adapter (IOC) specific interrupt handler.
|
|
|
|
|
* @irq: irq number (not used)
|
|
|
|
|
* @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
|
|
|
|
|
*
|
|
|
|
|
* Return: IRQ_HANDLED if processed, else IRQ_NONE.
|
|
|
|
|
*/
|
|
|
|
|
static irqreturn_t
|
|
|
|
|
_base_interrupt(int irq, void *bus_id)
|
|
|
|
|
{
|
|
|
|
|
struct adapter_reply_queue *reply_q = bus_id;
|
|
|
|
|
struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
|
|
|
|
|
|
|
|
|
|
if (ioc->mask_interrupts)
|
|
|
|
|
return IRQ_NONE;
|
scsi: mpt3sas: Irq poll to avoid CPU hard lockups
Issue Description:
We have seen cpu lock up issue from fields if system has greater (more than
96) logical cpu count. SAS3.0 controller (Invader series) supports at max
96 msix vector and SAS3.5 product (Ventura) supports at max 128 msix
vectors.
This may be a generic issue (if PCI device supports completion on multiple
reply queues). Let me explain it w.r.t to mpt3sas supported h/w just to
simplify the problem and possible changes to handle such issues. IT HBA
(mpt3sas) supports multiple reply queues in completion path. Driver creates
MSI-x vectors for controller as "min of (FW supported Reply queue, Logical
CPUs)". If submitter is not interrupted via completion on same CPU, there
is a loop in the IO path. This behavior can cause hard/soft CPU lockups, IO
timeout, system sluggish etc.
Example - one CPU (e.g. CPU A) is busy submitting the IOs and another CPU
(e.g. CPU B) is busy with processing the corresponding IO's reply
descriptors from reply descriptor queue upon receiving the interrupts from
HBA. If the CPU A is continuously pumping the IOs then always CPU B (which
is executing the ISR) will see the valid reply descriptors in the reply
descriptor queue and it will be continuously processing those reply
descriptor in a loop without quitting the ISR handler.
Mpt3sas driver will exit ISR handler if it finds unused reply descriptor in
the reply descriptor queue. Since CPU A will be continuously sending the
IOs, CPU B may always see a valid reply descriptor (posted by HBA Firmware
after processing the IO) in the reply descriptor queue. In worst case,
driver will not quit from this loop in the ISR handler. Eventually, CPU
lockup will be detected by watchdog.
Above mentioned behavior is not common if "rq_affinity" set to 2 or
affinity_hint is honored by irqbalance as "exact". If rq_affinity is set
to 2, submitter will be always interrupted via completion on same CPU. If
irqbalance is using "exact" policy, interrupt will be delivered to
submitter CPU.
If CPU counts to MSI-X vectors (reply descriptor Queues) count ratio is not
1:1, we still have exposure of issue explained above and for that we don't
have any solution.
Exposure of soft/hard lockup if CPU count is more than MSI-x supported by
device.
If CPUs count to MSI-x vectors count ratio is not 1:1, (Other way, if CPU
counts to MSI-x vector count ratio is something like X:1, where X > 1) then
'exact' irqbalance policy OR rq_affinity = 2 won't help to avoid CPU
hard/soft lockups. There won't be any one to one mapping between CPU to
MSI-x vector instead one MSI-x interrupt (or reply descriptor queue) is
shared with group/set of CPUs and there is a possibility of having a loop
in the IO path within that CPU group and may observe lockups.
For example: Consider a system having two NUMA nodes and each node having
four logical CPUs and also consider that number of MSI-x vectors enabled on
the HBA is two, then CPUs count to MSI-x vector count ratio as 4:1. e.g.
MSIx vector 0 is affinity to CPU 0, CPU 1, CPU 2 & CPU 3 of NUMA node 0 and
MSI-x vector 1 is affinity to CPU 4, CPU 5, CPU 6 & CPU 7 of NUMA node 1.
numactl --hardware
available: 2 nodes (0-1)
node 0 cpus: 0 1 2 3 --> MSI-x 0
node 0 size: 65536 MB
node 0 free: 63176 MB
node 1 cpus: 4 5 6 7 -->MSI-x 1
node 1 size: 65536 MB
node 1 free: 63176 MB
Assume that user started an application which uses all the CPUs of NUMA
node 0 for issuing the IOs. Only one CPU from affinity list (it can be any
cpu since this behavior depends upon irqbalance) CPU0 will receive the
interrupts from MSIx vector 0 for all the IOs. Eventually, CPU 0 IO
submission percentage will be decreasing and ISR processing percentage will
be increasing as it is more busy with processing the interrupts. Gradually
IO submission percentage on CPU 0 will be zero and it's ISR processing
percentage will be 100 percentage as IO loop has already formed within the
NUMA node 0, i.e. CPU 1, CPU 2 & CPU 3 will be continuously busy with
submitting the heavy IOs and only CPU 0 is busy in the ISR path as it
always find the valid reply descriptor in the reply descriptor
queue. Eventually, we will observe the hard lockup here.
Chances of occurring of hard/soft lockups are directly proportional to
value of X. If value of X is high, then chances of observing CPU lockups is
high.
Solution: Use IRQ poll interface defined in " irq_poll.c". mpt3sas driver
will execute ISR routine in Softirq context and it will always quit the
loop based on budget provided in IRQ poll interface.
In these scenarios (i.e. where CPUs count to MSI-X vectors count ratio is
X:1 (where X > 1)), IRQ poll interface will avoid CPU hard lockups due to
voluntary exit from the reply queue processing based on budget. Note -
Only one MSI-x vector is busy doing processing.
Irqstat output:
IRQs / 1 second(s)
IRQ# TOTAL NODE0 NODE1 NODE2 NODE3 NAME
44 122871 122871 0 0 0 IR-PCI-MSI-edge mpt3sas0-msix0
45 0 0 0 0 0 IR-PCI-MSI-edge mpt3sas0-msix1
We use this approach only if cpu count is more than FW supported MSI-x
vector
Signed-off-by: Suganath Prabu <suganath-prabu.subramani@broadcom.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-15 15:40:27 +08:00
|
|
|
|
if (reply_q->irq_poll_scheduled)
|
|
|
|
|
return IRQ_HANDLED;
|
2019-02-15 15:40:25 +08:00
|
|
|
|
return ((_base_process_reply_queue(reply_q) > 0) ?
|
|
|
|
|
IRQ_HANDLED : IRQ_NONE);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
|
scsi: mpt3sas: Irq poll to avoid CPU hard lockups
Issue Description:
We have seen cpu lock up issue from fields if system has greater (more than
96) logical cpu count. SAS3.0 controller (Invader series) supports at max
96 msix vector and SAS3.5 product (Ventura) supports at max 128 msix
vectors.
This may be a generic issue (if PCI device supports completion on multiple
reply queues). Let me explain it w.r.t to mpt3sas supported h/w just to
simplify the problem and possible changes to handle such issues. IT HBA
(mpt3sas) supports multiple reply queues in completion path. Driver creates
MSI-x vectors for controller as "min of (FW supported Reply queue, Logical
CPUs)". If submitter is not interrupted via completion on same CPU, there
is a loop in the IO path. This behavior can cause hard/soft CPU lockups, IO
timeout, system sluggish etc.
Example - one CPU (e.g. CPU A) is busy submitting the IOs and another CPU
(e.g. CPU B) is busy with processing the corresponding IO's reply
descriptors from reply descriptor queue upon receiving the interrupts from
HBA. If the CPU A is continuously pumping the IOs then always CPU B (which
is executing the ISR) will see the valid reply descriptors in the reply
descriptor queue and it will be continuously processing those reply
descriptor in a loop without quitting the ISR handler.
Mpt3sas driver will exit ISR handler if it finds unused reply descriptor in
the reply descriptor queue. Since CPU A will be continuously sending the
IOs, CPU B may always see a valid reply descriptor (posted by HBA Firmware
after processing the IO) in the reply descriptor queue. In worst case,
driver will not quit from this loop in the ISR handler. Eventually, CPU
lockup will be detected by watchdog.
Above mentioned behavior is not common if "rq_affinity" set to 2 or
affinity_hint is honored by irqbalance as "exact". If rq_affinity is set
to 2, submitter will be always interrupted via completion on same CPU. If
irqbalance is using "exact" policy, interrupt will be delivered to
submitter CPU.
If CPU counts to MSI-X vectors (reply descriptor Queues) count ratio is not
1:1, we still have exposure of issue explained above and for that we don't
have any solution.
Exposure of soft/hard lockup if CPU count is more than MSI-x supported by
device.
If CPUs count to MSI-x vectors count ratio is not 1:1, (Other way, if CPU
counts to MSI-x vector count ratio is something like X:1, where X > 1) then
'exact' irqbalance policy OR rq_affinity = 2 won't help to avoid CPU
hard/soft lockups. There won't be any one to one mapping between CPU to
MSI-x vector instead one MSI-x interrupt (or reply descriptor queue) is
shared with group/set of CPUs and there is a possibility of having a loop
in the IO path within that CPU group and may observe lockups.
For example: Consider a system having two NUMA nodes and each node having
four logical CPUs and also consider that number of MSI-x vectors enabled on
the HBA is two, then CPUs count to MSI-x vector count ratio as 4:1. e.g.
MSIx vector 0 is affinity to CPU 0, CPU 1, CPU 2 & CPU 3 of NUMA node 0 and
MSI-x vector 1 is affinity to CPU 4, CPU 5, CPU 6 & CPU 7 of NUMA node 1.
numactl --hardware
available: 2 nodes (0-1)
node 0 cpus: 0 1 2 3 --> MSI-x 0
node 0 size: 65536 MB
node 0 free: 63176 MB
node 1 cpus: 4 5 6 7 -->MSI-x 1
node 1 size: 65536 MB
node 1 free: 63176 MB
Assume that user started an application which uses all the CPUs of NUMA
node 0 for issuing the IOs. Only one CPU from affinity list (it can be any
cpu since this behavior depends upon irqbalance) CPU0 will receive the
interrupts from MSIx vector 0 for all the IOs. Eventually, CPU 0 IO
submission percentage will be decreasing and ISR processing percentage will
be increasing as it is more busy with processing the interrupts. Gradually
IO submission percentage on CPU 0 will be zero and it's ISR processing
percentage will be 100 percentage as IO loop has already formed within the
NUMA node 0, i.e. CPU 1, CPU 2 & CPU 3 will be continuously busy with
submitting the heavy IOs and only CPU 0 is busy in the ISR path as it
always find the valid reply descriptor in the reply descriptor
queue. Eventually, we will observe the hard lockup here.
Chances of occurring of hard/soft lockups are directly proportional to
value of X. If value of X is high, then chances of observing CPU lockups is
high.
Solution: Use IRQ poll interface defined in " irq_poll.c". mpt3sas driver
will execute ISR routine in Softirq context and it will always quit the
loop based on budget provided in IRQ poll interface.
In these scenarios (i.e. where CPUs count to MSI-X vectors count ratio is
X:1 (where X > 1)), IRQ poll interface will avoid CPU hard lockups due to
voluntary exit from the reply queue processing based on budget. Note -
Only one MSI-x vector is busy doing processing.
Irqstat output:
IRQs / 1 second(s)
IRQ# TOTAL NODE0 NODE1 NODE2 NODE3 NAME
44 122871 122871 0 0 0 IR-PCI-MSI-edge mpt3sas0-msix0
45 0 0 0 0 0 IR-PCI-MSI-edge mpt3sas0-msix1
We use this approach only if cpu count is more than FW supported MSI-x
vector
Signed-off-by: Suganath Prabu <suganath-prabu.subramani@broadcom.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-15 15:40:27 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_irqpoll - IRQ poll callback handler
|
|
|
|
|
* @irqpoll - irq_poll object
|
|
|
|
|
* @budget - irq poll weight
|
|
|
|
|
*
|
|
|
|
|
* returns number of reply descriptors processed
|
|
|
|
|
*/
|
|
|
|
|
static int
|
|
|
|
|
_base_irqpoll(struct irq_poll *irqpoll, int budget)
|
|
|
|
|
{
|
|
|
|
|
struct adapter_reply_queue *reply_q;
|
|
|
|
|
int num_entries = 0;
|
|
|
|
|
|
|
|
|
|
reply_q = container_of(irqpoll, struct adapter_reply_queue,
|
|
|
|
|
irqpoll);
|
|
|
|
|
if (reply_q->irq_line_enable) {
|
|
|
|
|
disable_irq(reply_q->os_irq);
|
|
|
|
|
reply_q->irq_line_enable = false;
|
|
|
|
|
}
|
|
|
|
|
num_entries = _base_process_reply_queue(reply_q);
|
|
|
|
|
if (num_entries < budget) {
|
|
|
|
|
irq_poll_complete(irqpoll);
|
|
|
|
|
reply_q->irq_poll_scheduled = false;
|
|
|
|
|
reply_q->irq_line_enable = true;
|
|
|
|
|
enable_irq(reply_q->os_irq);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return num_entries;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_init_irqpolls - initliaze IRQ polls
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
|
|
|
|
* returns nothing
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
|
|
|
|
struct adapter_reply_queue *reply_q, *next;
|
|
|
|
|
|
|
|
|
|
if (list_empty(&ioc->reply_queue_list))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
|
|
|
|
|
irq_poll_init(&reply_q->irqpoll,
|
|
|
|
|
ioc->hba_queue_depth/4, _base_irqpoll);
|
|
|
|
|
reply_q->irq_poll_scheduled = false;
|
|
|
|
|
reply_q->irq_line_enable = true;
|
|
|
|
|
reply_q->os_irq = pci_irq_vector(ioc->pdev,
|
|
|
|
|
reply_q->msix_index);
|
|
|
|
|
}
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_is_controller_msix_enabled - is controller support muli-reply queues
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: Whether or not MSI/X is enabled.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
static inline int
|
|
|
|
|
_base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
|
|
|
|
return (ioc->facts.IOCCapabilities &
|
|
|
|
|
MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
2016-05-06 16:59:31 +08:00
|
|
|
|
* mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts
|
2012-11-30 10:14:21 +08:00
|
|
|
|
* @ioc: per adapter object
|
2016-05-06 16:59:31 +08:00
|
|
|
|
* Context: non ISR conext
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*
|
2016-05-06 16:59:31 +08:00
|
|
|
|
* Called when a Task Management request has completed.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
void
|
2016-05-06 16:59:31 +08:00
|
|
|
|
mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
{
|
|
|
|
|
struct adapter_reply_queue *reply_q;
|
|
|
|
|
|
|
|
|
|
/* If MSIX capability is turned off
|
|
|
|
|
* then multi-queues are not enabled
|
|
|
|
|
*/
|
|
|
|
|
if (!_base_is_controller_msix_enabled(ioc))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
|
2016-05-06 16:59:31 +08:00
|
|
|
|
if (ioc->shost_recovery || ioc->remove_host ||
|
|
|
|
|
ioc->pci_error_recovery)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return;
|
|
|
|
|
/* TMs are on msix_index == 0 */
|
|
|
|
|
if (reply_q->msix_index == 0)
|
|
|
|
|
continue;
|
scsi: mpt3sas: Irq poll to avoid CPU hard lockups
Issue Description:
We have seen cpu lock up issue from fields if system has greater (more than
96) logical cpu count. SAS3.0 controller (Invader series) supports at max
96 msix vector and SAS3.5 product (Ventura) supports at max 128 msix
vectors.
This may be a generic issue (if PCI device supports completion on multiple
reply queues). Let me explain it w.r.t to mpt3sas supported h/w just to
simplify the problem and possible changes to handle such issues. IT HBA
(mpt3sas) supports multiple reply queues in completion path. Driver creates
MSI-x vectors for controller as "min of (FW supported Reply queue, Logical
CPUs)". If submitter is not interrupted via completion on same CPU, there
is a loop in the IO path. This behavior can cause hard/soft CPU lockups, IO
timeout, system sluggish etc.
Example - one CPU (e.g. CPU A) is busy submitting the IOs and another CPU
(e.g. CPU B) is busy with processing the corresponding IO's reply
descriptors from reply descriptor queue upon receiving the interrupts from
HBA. If the CPU A is continuously pumping the IOs then always CPU B (which
is executing the ISR) will see the valid reply descriptors in the reply
descriptor queue and it will be continuously processing those reply
descriptor in a loop without quitting the ISR handler.
Mpt3sas driver will exit ISR handler if it finds unused reply descriptor in
the reply descriptor queue. Since CPU A will be continuously sending the
IOs, CPU B may always see a valid reply descriptor (posted by HBA Firmware
after processing the IO) in the reply descriptor queue. In worst case,
driver will not quit from this loop in the ISR handler. Eventually, CPU
lockup will be detected by watchdog.
Above mentioned behavior is not common if "rq_affinity" set to 2 or
affinity_hint is honored by irqbalance as "exact". If rq_affinity is set
to 2, submitter will be always interrupted via completion on same CPU. If
irqbalance is using "exact" policy, interrupt will be delivered to
submitter CPU.
If CPU counts to MSI-X vectors (reply descriptor Queues) count ratio is not
1:1, we still have exposure of issue explained above and for that we don't
have any solution.
Exposure of soft/hard lockup if CPU count is more than MSI-x supported by
device.
If CPUs count to MSI-x vectors count ratio is not 1:1, (Other way, if CPU
counts to MSI-x vector count ratio is something like X:1, where X > 1) then
'exact' irqbalance policy OR rq_affinity = 2 won't help to avoid CPU
hard/soft lockups. There won't be any one to one mapping between CPU to
MSI-x vector instead one MSI-x interrupt (or reply descriptor queue) is
shared with group/set of CPUs and there is a possibility of having a loop
in the IO path within that CPU group and may observe lockups.
For example: Consider a system having two NUMA nodes and each node having
four logical CPUs and also consider that number of MSI-x vectors enabled on
the HBA is two, then CPUs count to MSI-x vector count ratio as 4:1. e.g.
MSIx vector 0 is affinity to CPU 0, CPU 1, CPU 2 & CPU 3 of NUMA node 0 and
MSI-x vector 1 is affinity to CPU 4, CPU 5, CPU 6 & CPU 7 of NUMA node 1.
numactl --hardware
available: 2 nodes (0-1)
node 0 cpus: 0 1 2 3 --> MSI-x 0
node 0 size: 65536 MB
node 0 free: 63176 MB
node 1 cpus: 4 5 6 7 -->MSI-x 1
node 1 size: 65536 MB
node 1 free: 63176 MB
Assume that user started an application which uses all the CPUs of NUMA
node 0 for issuing the IOs. Only one CPU from affinity list (it can be any
cpu since this behavior depends upon irqbalance) CPU0 will receive the
interrupts from MSIx vector 0 for all the IOs. Eventually, CPU 0 IO
submission percentage will be decreasing and ISR processing percentage will
be increasing as it is more busy with processing the interrupts. Gradually
IO submission percentage on CPU 0 will be zero and it's ISR processing
percentage will be 100 percentage as IO loop has already formed within the
NUMA node 0, i.e. CPU 1, CPU 2 & CPU 3 will be continuously busy with
submitting the heavy IOs and only CPU 0 is busy in the ISR path as it
always find the valid reply descriptor in the reply descriptor
queue. Eventually, we will observe the hard lockup here.
Chances of occurring of hard/soft lockups are directly proportional to
value of X. If value of X is high, then chances of observing CPU lockups is
high.
Solution: Use IRQ poll interface defined in " irq_poll.c". mpt3sas driver
will execute ISR routine in Softirq context and it will always quit the
loop based on budget provided in IRQ poll interface.
In these scenarios (i.e. where CPUs count to MSI-X vectors count ratio is
X:1 (where X > 1)), IRQ poll interface will avoid CPU hard lockups due to
voluntary exit from the reply queue processing based on budget. Note -
Only one MSI-x vector is busy doing processing.
Irqstat output:
IRQs / 1 second(s)
IRQ# TOTAL NODE0 NODE1 NODE2 NODE3 NAME
44 122871 122871 0 0 0 IR-PCI-MSI-edge mpt3sas0-msix0
45 0 0 0 0 0 IR-PCI-MSI-edge mpt3sas0-msix1
We use this approach only if cpu count is more than FW supported MSI-x
vector
Signed-off-by: Suganath Prabu <suganath-prabu.subramani@broadcom.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-15 15:40:27 +08:00
|
|
|
|
if (reply_q->irq_poll_scheduled) {
|
|
|
|
|
/* Calling irq_poll_disable will wait for any pending
|
|
|
|
|
* callbacks to have completed.
|
|
|
|
|
*/
|
|
|
|
|
irq_poll_disable(&reply_q->irqpoll);
|
|
|
|
|
irq_poll_enable(&reply_q->irqpoll);
|
|
|
|
|
reply_q->irq_poll_scheduled = false;
|
|
|
|
|
reply_q->irq_line_enable = true;
|
|
|
|
|
enable_irq(reply_q->os_irq);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2017-02-22 18:31:41 +08:00
|
|
|
|
synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_release_callback_handler - clear interrupt callback handler
|
|
|
|
|
* @cb_idx: callback index
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
mpt3sas_base_release_callback_handler(u8 cb_idx)
|
|
|
|
|
{
|
|
|
|
|
mpt_callbacks[cb_idx] = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
|
|
|
|
|
* @cb_func: callback function
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: Index of @cb_func.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
u8
|
|
|
|
|
mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
|
|
|
|
|
{
|
|
|
|
|
u8 cb_idx;
|
|
|
|
|
|
|
|
|
|
for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
|
|
|
|
|
if (mpt_callbacks[cb_idx] == NULL)
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
mpt_callbacks[cb_idx] = cb_func;
|
|
|
|
|
return cb_idx;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
mpt3sas_base_initialize_callback_handler(void)
|
|
|
|
|
{
|
|
|
|
|
u8 cb_idx;
|
|
|
|
|
|
|
|
|
|
for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
|
|
|
|
|
mpt3sas_base_release_callback_handler(cb_idx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_build_zero_len_sge - build zero length sg entry
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @paddr: virtual address for SGE
|
|
|
|
|
*
|
|
|
|
|
* Create a zero length scatter gather entry to insure the IOCs hardware has
|
|
|
|
|
* something to use if the target device goes brain dead and tries
|
|
|
|
|
* to send data even when none is asked for.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
|
|
|
|
|
{
|
|
|
|
|
u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
|
|
|
|
|
MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
|
|
|
|
|
MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
|
|
|
|
|
MPI2_SGE_FLAGS_SHIFT);
|
|
|
|
|
ioc->base_add_sg_single(paddr, flags_length, -1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
|
|
|
|
|
* @paddr: virtual address for SGE
|
|
|
|
|
* @flags_length: SGE flags and data transfer length
|
|
|
|
|
* @dma_addr: Physical address
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
|
|
|
|
|
{
|
|
|
|
|
Mpi2SGESimple32_t *sgel = paddr;
|
|
|
|
|
|
|
|
|
|
flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
|
|
|
|
|
MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
|
|
|
|
|
sgel->FlagsLength = cpu_to_le32(flags_length);
|
|
|
|
|
sgel->Address = cpu_to_le32(dma_addr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
|
|
|
|
|
* @paddr: virtual address for SGE
|
|
|
|
|
* @flags_length: SGE flags and data transfer length
|
|
|
|
|
* @dma_addr: Physical address
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
|
|
|
|
|
{
|
|
|
|
|
Mpi2SGESimple64_t *sgel = paddr;
|
|
|
|
|
|
|
|
|
|
flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
|
|
|
|
|
MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
|
|
|
|
|
sgel->FlagsLength = cpu_to_le32(flags_length);
|
|
|
|
|
sgel->Address = cpu_to_le64(dma_addr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_get_chain_buffer_tracker - obtain chain tracker
|
|
|
|
|
* @ioc: per adapter object
|
2018-01-04 20:57:11 +08:00
|
|
|
|
* @scmd: SCSI commands of the IO request
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: chain tracker from chain_lookup table using key as
|
2018-04-24 17:28:32 +08:00
|
|
|
|
* smid and smid's chain_offset.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
static struct chain_tracker *
|
2018-01-04 20:57:11 +08:00
|
|
|
|
_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
|
|
|
|
|
struct scsi_cmnd *scmd)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
{
|
|
|
|
|
struct chain_tracker *chain_req;
|
2018-01-04 20:57:11 +08:00
|
|
|
|
struct scsiio_tracker *st = scsi_cmd_priv(scmd);
|
2018-04-24 17:28:32 +08:00
|
|
|
|
u16 smid = st->smid;
|
|
|
|
|
u8 chain_offset =
|
|
|
|
|
atomic_read(&ioc->chain_lookup[smid - 1].chain_offset);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2018-04-24 17:28:32 +08:00
|
|
|
|
if (chain_offset == ioc->chains_needed_per_io)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return NULL;
|
2018-04-24 17:28:32 +08:00
|
|
|
|
|
|
|
|
|
chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset];
|
|
|
|
|
atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return chain_req;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_build_sg - build generic sg
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @psge: virtual address for SGE
|
|
|
|
|
* @data_out_dma: physical address for WRITES
|
|
|
|
|
* @data_out_sz: data xfer size for WRITES
|
|
|
|
|
* @data_in_dma: physical address for READS
|
|
|
|
|
* @data_in_sz: data xfer size for READS
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
|
|
|
|
|
dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
|
|
|
|
|
size_t data_in_sz)
|
|
|
|
|
{
|
|
|
|
|
u32 sgl_flags;
|
|
|
|
|
|
|
|
|
|
if (!data_out_sz && !data_in_sz) {
|
|
|
|
|
_base_build_zero_len_sge(ioc, psge);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (data_out_sz && data_in_sz) {
|
|
|
|
|
/* WRITE sgel first */
|
|
|
|
|
sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
|
|
|
|
|
MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
|
|
|
|
|
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
|
|
|
|
|
ioc->base_add_sg_single(psge, sgl_flags |
|
|
|
|
|
data_out_sz, data_out_dma);
|
|
|
|
|
|
|
|
|
|
/* incr sgel */
|
|
|
|
|
psge += ioc->sge_size;
|
|
|
|
|
|
|
|
|
|
/* READ sgel last */
|
|
|
|
|
sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
|
|
|
|
|
MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
|
|
|
|
|
MPI2_SGE_FLAGS_END_OF_LIST);
|
|
|
|
|
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
|
|
|
|
|
ioc->base_add_sg_single(psge, sgl_flags |
|
|
|
|
|
data_in_sz, data_in_dma);
|
|
|
|
|
} else if (data_out_sz) /* WRITE */ {
|
|
|
|
|
sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
|
|
|
|
|
MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
|
|
|
|
|
MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
|
|
|
|
|
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
|
|
|
|
|
ioc->base_add_sg_single(psge, sgl_flags |
|
|
|
|
|
data_out_sz, data_out_dma);
|
|
|
|
|
} else if (data_in_sz) /* READ */ {
|
|
|
|
|
sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
|
|
|
|
|
MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
|
|
|
|
|
MPI2_SGE_FLAGS_END_OF_LIST);
|
|
|
|
|
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
|
|
|
|
|
ioc->base_add_sg_single(psge, sgl_flags |
|
|
|
|
|
data_in_sz, data_in_dma);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-10-31 20:32:29 +08:00
|
|
|
|
/* IEEE format sgls */
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_build_nvme_prp - This function is called for NVMe end devices to build
|
|
|
|
|
* a native SGL (NVMe PRP). The native SGL is built starting in the first PRP
|
|
|
|
|
* entry of the NVMe message (PRP1). If the data buffer is small enough to be
|
|
|
|
|
* described entirely using PRP1, then PRP2 is not used. If needed, PRP2 is
|
|
|
|
|
* used to describe a larger data buffer. If the data buffer is too large to
|
|
|
|
|
* describe using the two PRP entriess inside the NVMe message, then PRP1
|
|
|
|
|
* describes the first data memory segment, and PRP2 contains a pointer to a PRP
|
|
|
|
|
* list located elsewhere in memory to describe the remaining data memory
|
|
|
|
|
* segments. The PRP list will be contiguous.
|
2018-06-16 05:42:01 +08:00
|
|
|
|
*
|
2017-10-31 20:32:29 +08:00
|
|
|
|
* The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP
|
|
|
|
|
* consists of a list of PRP entries to describe a number of noncontigous
|
|
|
|
|
* physical memory segments as a single memory buffer, just as a SGL does. Note
|
|
|
|
|
* however, that this function is only used by the IOCTL call, so the memory
|
|
|
|
|
* given will be guaranteed to be contiguous. There is no need to translate
|
|
|
|
|
* non-contiguous SGL into a PRP in this case. All PRPs will describe
|
|
|
|
|
* contiguous space that is one page size each.
|
|
|
|
|
*
|
|
|
|
|
* Each NVMe message contains two PRP entries. The first (PRP1) either contains
|
|
|
|
|
* a PRP list pointer or a PRP element, depending upon the command. PRP2
|
|
|
|
|
* contains the second PRP element if the memory being described fits within 2
|
|
|
|
|
* PRP entries, or a PRP list pointer if the PRP spans more than two entries.
|
|
|
|
|
*
|
|
|
|
|
* A PRP list pointer contains the address of a PRP list, structured as a linear
|
|
|
|
|
* array of PRP entries. Each PRP entry in this list describes a segment of
|
|
|
|
|
* physical memory.
|
|
|
|
|
*
|
|
|
|
|
* Each 64-bit PRP entry comprises an address and an offset field. The address
|
|
|
|
|
* always points at the beginning of a 4KB physical memory page, and the offset
|
|
|
|
|
* describes where within that 4KB page the memory segment begins. Only the
|
|
|
|
|
* first element in a PRP list may contain a non-zero offest, implying that all
|
|
|
|
|
* memory segments following the first begin at the start of a 4KB page.
|
|
|
|
|
*
|
|
|
|
|
* Each PRP element normally describes 4KB of physical memory, with exceptions
|
|
|
|
|
* for the first and last elements in the list. If the memory being described
|
|
|
|
|
* by the list begins at a non-zero offset within the first 4KB page, then the
|
|
|
|
|
* first PRP element will contain a non-zero offset indicating where the region
|
|
|
|
|
* begins within the 4KB page. The last memory segment may end before the end
|
|
|
|
|
* of the 4KB segment, depending upon the overall size of the memory being
|
|
|
|
|
* described by the PRP list.
|
|
|
|
|
*
|
|
|
|
|
* Since PRP entries lack any indication of size, the overall data buffer length
|
|
|
|
|
* is used to determine where the end of the data memory buffer is located, and
|
|
|
|
|
* how many PRP entries are required to describe it.
|
|
|
|
|
*
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @smid: system request message index for getting asscociated SGL
|
|
|
|
|
* @nvme_encap_request: the NVMe request msg frame pointer
|
|
|
|
|
* @data_out_dma: physical address for WRITES
|
|
|
|
|
* @data_out_sz: data xfer size for WRITES
|
|
|
|
|
* @data_in_dma: physical address for READS
|
|
|
|
|
* @data_in_sz: data xfer size for READS
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
|
|
|
|
|
Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
|
|
|
|
|
dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
|
|
|
|
|
size_t data_in_sz)
|
|
|
|
|
{
|
|
|
|
|
int prp_size = NVME_PRP_SIZE;
|
2017-11-06 21:35:16 +08:00
|
|
|
|
__le64 *prp_entry, *prp1_entry, *prp2_entry;
|
|
|
|
|
__le64 *prp_page;
|
|
|
|
|
dma_addr_t prp_entry_dma, prp_page_dma, dma_addr;
|
2017-10-31 20:32:29 +08:00
|
|
|
|
u32 offset, entry_len;
|
|
|
|
|
u32 page_mask_result, page_mask;
|
|
|
|
|
size_t length;
|
2018-06-16 05:41:57 +08:00
|
|
|
|
struct mpt3sas_nvme_cmd *nvme_cmd =
|
|
|
|
|
(void *)nvme_encap_request->NVMe_Command;
|
2017-10-31 20:32:29 +08:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Not all commands require a data transfer. If no data, just return
|
|
|
|
|
* without constructing any PRP.
|
|
|
|
|
*/
|
|
|
|
|
if (!data_in_sz && !data_out_sz)
|
|
|
|
|
return;
|
2018-06-16 05:41:57 +08:00
|
|
|
|
prp1_entry = &nvme_cmd->prp1;
|
|
|
|
|
prp2_entry = &nvme_cmd->prp2;
|
2017-10-31 20:32:29 +08:00
|
|
|
|
prp_entry = prp1_entry;
|
|
|
|
|
/*
|
|
|
|
|
* For the PRP entries, use the specially allocated buffer of
|
|
|
|
|
* contiguous memory.
|
|
|
|
|
*/
|
2017-10-31 20:32:38 +08:00
|
|
|
|
prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid);
|
2017-11-06 21:35:16 +08:00
|
|
|
|
prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
|
2017-10-31 20:32:29 +08:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Check if we are within 1 entry of a page boundary we don't
|
|
|
|
|
* want our first entry to be a PRP List entry.
|
|
|
|
|
*/
|
|
|
|
|
page_mask = ioc->page_size - 1;
|
|
|
|
|
page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
|
|
|
|
|
if (!page_mask_result) {
|
|
|
|
|
/* Bump up to next page boundary. */
|
2017-10-31 20:32:38 +08:00
|
|
|
|
prp_page = (__le64 *)((u8 *)prp_page + prp_size);
|
2017-11-06 21:35:16 +08:00
|
|
|
|
prp_page_dma = prp_page_dma + prp_size;
|
2017-10-31 20:32:29 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Set PRP physical pointer, which initially points to the current PRP
|
|
|
|
|
* DMA memory page.
|
|
|
|
|
*/
|
2017-11-06 21:35:16 +08:00
|
|
|
|
prp_entry_dma = prp_page_dma;
|
2017-10-31 20:32:29 +08:00
|
|
|
|
|
|
|
|
|
/* Get physical address and length of the data buffer. */
|
|
|
|
|
if (data_in_sz) {
|
2017-11-06 21:35:16 +08:00
|
|
|
|
dma_addr = data_in_dma;
|
2017-10-31 20:32:29 +08:00
|
|
|
|
length = data_in_sz;
|
|
|
|
|
} else {
|
2017-11-06 21:35:16 +08:00
|
|
|
|
dma_addr = data_out_dma;
|
2017-10-31 20:32:29 +08:00
|
|
|
|
length = data_out_sz;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Loop while the length is not zero. */
|
|
|
|
|
while (length) {
|
|
|
|
|
/*
|
|
|
|
|
* Check if we need to put a list pointer here if we are at
|
|
|
|
|
* page boundary - prp_size (8 bytes).
|
|
|
|
|
*/
|
2017-11-06 21:35:16 +08:00
|
|
|
|
page_mask_result = (prp_entry_dma + prp_size) & page_mask;
|
2017-10-31 20:32:29 +08:00
|
|
|
|
if (!page_mask_result) {
|
|
|
|
|
/*
|
|
|
|
|
* This is the last entry in a PRP List, so we need to
|
|
|
|
|
* put a PRP list pointer here. What this does is:
|
|
|
|
|
* - bump the current memory pointer to the next
|
|
|
|
|
* address, which will be the next full page.
|
|
|
|
|
* - set the PRP Entry to point to that page. This
|
|
|
|
|
* is now the PRP List pointer.
|
|
|
|
|
* - bump the PRP Entry pointer the start of the
|
|
|
|
|
* next page. Since all of this PRP memory is
|
|
|
|
|
* contiguous, no need to get a new page - it's
|
|
|
|
|
* just the next address.
|
|
|
|
|
*/
|
2017-11-06 21:35:16 +08:00
|
|
|
|
prp_entry_dma++;
|
|
|
|
|
*prp_entry = cpu_to_le64(prp_entry_dma);
|
2017-10-31 20:32:29 +08:00
|
|
|
|
prp_entry++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Need to handle if entry will be part of a page. */
|
2017-11-06 21:35:16 +08:00
|
|
|
|
offset = dma_addr & page_mask;
|
2017-10-31 20:32:29 +08:00
|
|
|
|
entry_len = ioc->page_size - offset;
|
|
|
|
|
|
|
|
|
|
if (prp_entry == prp1_entry) {
|
|
|
|
|
/*
|
|
|
|
|
* Must fill in the first PRP pointer (PRP1) before
|
|
|
|
|
* moving on.
|
|
|
|
|
*/
|
2017-11-06 21:35:16 +08:00
|
|
|
|
*prp1_entry = cpu_to_le64(dma_addr);
|
2017-10-31 20:32:29 +08:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Now point to the second PRP entry within the
|
|
|
|
|
* command (PRP2).
|
|
|
|
|
*/
|
|
|
|
|
prp_entry = prp2_entry;
|
|
|
|
|
} else if (prp_entry == prp2_entry) {
|
|
|
|
|
/*
|
|
|
|
|
* Should the PRP2 entry be a PRP List pointer or just
|
|
|
|
|
* a regular PRP pointer? If there is more than one
|
|
|
|
|
* more page of data, must use a PRP List pointer.
|
|
|
|
|
*/
|
|
|
|
|
if (length > ioc->page_size) {
|
|
|
|
|
/*
|
|
|
|
|
* PRP2 will contain a PRP List pointer because
|
|
|
|
|
* more PRP's are needed with this command. The
|
|
|
|
|
* list will start at the beginning of the
|
|
|
|
|
* contiguous buffer.
|
|
|
|
|
*/
|
2017-11-06 21:35:16 +08:00
|
|
|
|
*prp2_entry = cpu_to_le64(prp_entry_dma);
|
2017-10-31 20:32:29 +08:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* The next PRP Entry will be the start of the
|
|
|
|
|
* first PRP List.
|
|
|
|
|
*/
|
|
|
|
|
prp_entry = prp_page;
|
|
|
|
|
} else {
|
|
|
|
|
/*
|
|
|
|
|
* After this, the PRP Entries are complete.
|
|
|
|
|
* This command uses 2 PRP's and no PRP list.
|
|
|
|
|
*/
|
2017-11-06 21:35:16 +08:00
|
|
|
|
*prp2_entry = cpu_to_le64(dma_addr);
|
2017-10-31 20:32:29 +08:00
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
/*
|
|
|
|
|
* Put entry in list and bump the addresses.
|
|
|
|
|
*
|
|
|
|
|
* After PRP1 and PRP2 are filled in, this will fill in
|
|
|
|
|
* all remaining PRP entries in a PRP List, one per
|
|
|
|
|
* each time through the loop.
|
|
|
|
|
*/
|
2017-11-06 21:35:16 +08:00
|
|
|
|
*prp_entry = cpu_to_le64(dma_addr);
|
2017-10-31 20:32:29 +08:00
|
|
|
|
prp_entry++;
|
2017-11-06 21:35:16 +08:00
|
|
|
|
prp_entry_dma++;
|
2017-10-31 20:32:29 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Bump the phys address of the command's data buffer by the
|
|
|
|
|
* entry_len.
|
|
|
|
|
*/
|
2017-11-06 21:35:16 +08:00
|
|
|
|
dma_addr += entry_len;
|
2017-10-31 20:32:29 +08:00
|
|
|
|
|
|
|
|
|
/* Decrement length accounting for last partial page. */
|
|
|
|
|
if (entry_len > length)
|
|
|
|
|
length = 0;
|
|
|
|
|
else
|
|
|
|
|
length -= entry_len;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-10-31 20:32:28 +08:00
|
|
|
|
/**
|
|
|
|
|
* base_make_prp_nvme -
|
|
|
|
|
* Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
|
|
|
|
|
*
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @scmd: SCSI command from the mid-layer
|
|
|
|
|
* @mpi_request: mpi request
|
|
|
|
|
* @smid: msg Index
|
|
|
|
|
* @sge_count: scatter gather element count.
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: true: PRPs are built
|
2017-10-31 20:32:28 +08:00
|
|
|
|
* false: IEEE SGLs needs to be built
|
|
|
|
|
*/
|
2017-10-31 20:32:38 +08:00
|
|
|
|
static void
|
2017-10-31 20:32:28 +08:00
|
|
|
|
base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc,
|
|
|
|
|
struct scsi_cmnd *scmd,
|
|
|
|
|
Mpi25SCSIIORequest_t *mpi_request,
|
|
|
|
|
u16 smid, int sge_count)
|
|
|
|
|
{
|
2017-11-06 21:35:16 +08:00
|
|
|
|
int sge_len, num_prp_in_chain = 0;
|
2017-10-31 20:32:28 +08:00
|
|
|
|
Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl;
|
2017-10-31 20:32:38 +08:00
|
|
|
|
__le64 *curr_buff;
|
2017-11-06 21:35:16 +08:00
|
|
|
|
dma_addr_t msg_dma, sge_addr, offset;
|
2017-10-31 20:32:28 +08:00
|
|
|
|
u32 page_mask, page_mask_result;
|
|
|
|
|
struct scatterlist *sg_scmd;
|
|
|
|
|
u32 first_prp_len;
|
|
|
|
|
int data_len = scsi_bufflen(scmd);
|
|
|
|
|
u32 nvme_pg_size;
|
|
|
|
|
|
|
|
|
|
nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE);
|
|
|
|
|
/*
|
|
|
|
|
* Nvme has a very convoluted prp format. One prp is required
|
|
|
|
|
* for each page or partial page. Driver need to split up OS sg_list
|
|
|
|
|
* entries if it is longer than one page or cross a page
|
|
|
|
|
* boundary. Driver also have to insert a PRP list pointer entry as
|
|
|
|
|
* the last entry in each physical page of the PRP list.
|
|
|
|
|
*
|
|
|
|
|
* NOTE: The first PRP "entry" is actually placed in the first
|
|
|
|
|
* SGL entry in the main message as IEEE 64 format. The 2nd
|
|
|
|
|
* entry in the main message is the chain element, and the rest
|
|
|
|
|
* of the PRP entries are built in the contiguous pcie buffer.
|
|
|
|
|
*/
|
|
|
|
|
page_mask = nvme_pg_size - 1;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Native SGL is needed.
|
|
|
|
|
* Put a chain element in main message frame that points to the first
|
|
|
|
|
* chain buffer.
|
|
|
|
|
*
|
|
|
|
|
* NOTE: The ChainOffset field must be 0 when using a chain pointer to
|
|
|
|
|
* a native SGL.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/* Set main message chain element pointer */
|
|
|
|
|
main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
|
|
|
|
|
/*
|
|
|
|
|
* For NVMe the chain element needs to be the 2nd SG entry in the main
|
|
|
|
|
* message.
|
|
|
|
|
*/
|
|
|
|
|
main_chain_element = (Mpi25IeeeSgeChain64_t *)
|
|
|
|
|
((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* For the PRP entries, use the specially allocated buffer of
|
|
|
|
|
* contiguous memory. Normal chain buffers can't be used
|
|
|
|
|
* because each chain buffer would need to be the size of an OS
|
|
|
|
|
* page (4k).
|
|
|
|
|
*/
|
|
|
|
|
curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid);
|
2017-11-06 21:35:16 +08:00
|
|
|
|
msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
|
2017-10-31 20:32:28 +08:00
|
|
|
|
|
2017-11-06 21:35:16 +08:00
|
|
|
|
main_chain_element->Address = cpu_to_le64(msg_dma);
|
2017-10-31 20:32:28 +08:00
|
|
|
|
main_chain_element->NextChainOffset = 0;
|
|
|
|
|
main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
|
|
|
|
|
MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
|
|
|
|
|
MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
|
|
|
|
|
|
|
|
|
|
/* Build first prp, sge need not to be page aligned*/
|
|
|
|
|
ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
|
|
|
|
|
sg_scmd = scsi_sglist(scmd);
|
|
|
|
|
sge_addr = sg_dma_address(sg_scmd);
|
|
|
|
|
sge_len = sg_dma_len(sg_scmd);
|
|
|
|
|
|
2017-11-06 21:35:16 +08:00
|
|
|
|
offset = sge_addr & page_mask;
|
2017-10-31 20:32:28 +08:00
|
|
|
|
first_prp_len = nvme_pg_size - offset;
|
|
|
|
|
|
|
|
|
|
ptr_first_sgl->Address = cpu_to_le64(sge_addr);
|
|
|
|
|
ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
|
|
|
|
|
|
|
|
|
|
data_len -= first_prp_len;
|
|
|
|
|
|
|
|
|
|
if (sge_len > first_prp_len) {
|
|
|
|
|
sge_addr += first_prp_len;
|
|
|
|
|
sge_len -= first_prp_len;
|
|
|
|
|
} else if (data_len && (sge_len == first_prp_len)) {
|
|
|
|
|
sg_scmd = sg_next(sg_scmd);
|
|
|
|
|
sge_addr = sg_dma_address(sg_scmd);
|
|
|
|
|
sge_len = sg_dma_len(sg_scmd);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (;;) {
|
2017-11-06 21:35:16 +08:00
|
|
|
|
offset = sge_addr & page_mask;
|
2017-10-31 20:32:28 +08:00
|
|
|
|
|
|
|
|
|
/* Put PRP pointer due to page boundary*/
|
|
|
|
|
page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask;
|
|
|
|
|
if (unlikely(!page_mask_result)) {
|
|
|
|
|
scmd_printk(KERN_NOTICE,
|
|
|
|
|
scmd, "page boundary curr_buff: 0x%p\n",
|
|
|
|
|
curr_buff);
|
2017-11-06 21:35:16 +08:00
|
|
|
|
msg_dma += 8;
|
|
|
|
|
*curr_buff = cpu_to_le64(msg_dma);
|
2017-10-31 20:32:28 +08:00
|
|
|
|
curr_buff++;
|
|
|
|
|
num_prp_in_chain++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*curr_buff = cpu_to_le64(sge_addr);
|
|
|
|
|
curr_buff++;
|
2017-11-06 21:35:16 +08:00
|
|
|
|
msg_dma += 8;
|
2017-10-31 20:32:28 +08:00
|
|
|
|
num_prp_in_chain++;
|
|
|
|
|
|
|
|
|
|
sge_addr += nvme_pg_size;
|
|
|
|
|
sge_len -= nvme_pg_size;
|
|
|
|
|
data_len -= nvme_pg_size;
|
|
|
|
|
|
|
|
|
|
if (data_len <= 0)
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
if (sge_len > 0)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
sg_scmd = sg_next(sg_scmd);
|
|
|
|
|
sge_addr = sg_dma_address(sg_scmd);
|
|
|
|
|
sge_len = sg_dma_len(sg_scmd);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
main_chain_element->Length =
|
|
|
|
|
cpu_to_le32(num_prp_in_chain * sizeof(u64));
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
|
|
|
|
|
struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
|
|
|
|
|
{
|
|
|
|
|
u32 data_length = 0;
|
|
|
|
|
bool build_prp = true;
|
|
|
|
|
|
2017-10-31 20:32:38 +08:00
|
|
|
|
data_length = scsi_bufflen(scmd);
|
2019-08-03 21:59:50 +08:00
|
|
|
|
if (pcie_device &&
|
|
|
|
|
(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))) {
|
|
|
|
|
build_prp = false;
|
|
|
|
|
return build_prp;
|
|
|
|
|
}
|
2017-10-31 20:32:28 +08:00
|
|
|
|
|
|
|
|
|
/* If Datalenth is <= 16K and number of SGE’s entries are <= 2
|
|
|
|
|
* we built IEEE SGL
|
|
|
|
|
*/
|
|
|
|
|
if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2))
|
|
|
|
|
build_prp = false;
|
|
|
|
|
|
|
|
|
|
return build_prp;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_check_pcie_native_sgl - This function is called for PCIe end devices to
|
|
|
|
|
* determine if the driver needs to build a native SGL. If so, that native
|
|
|
|
|
* SGL is built in the special contiguous buffers allocated especially for
|
|
|
|
|
* PCIe SGL creation. If the driver will not build a native SGL, return
|
|
|
|
|
* TRUE and a normal IEEE SGL will be built. Currently this routine
|
|
|
|
|
* supports NVMe.
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @mpi_request: mf request pointer
|
|
|
|
|
* @smid: system request message index
|
|
|
|
|
* @scmd: scsi command
|
|
|
|
|
* @pcie_device: points to the PCIe device's info
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 if native SGL was built, 1 if no SGL was built
|
2017-10-31 20:32:28 +08:00
|
|
|
|
*/
|
|
|
|
|
static int
|
|
|
|
|
_base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
|
|
|
|
|
Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
|
|
|
|
|
struct _pcie_device *pcie_device)
|
|
|
|
|
{
|
|
|
|
|
int sges_left;
|
|
|
|
|
|
|
|
|
|
/* Get the SG list pointer and info. */
|
|
|
|
|
sges_left = scsi_dma_map(scmd);
|
|
|
|
|
if (sges_left < 0) {
|
|
|
|
|
sdev_printk(KERN_ERR, scmd->device,
|
|
|
|
|
"scsi_dma_map failed: request for %d bytes!\n",
|
|
|
|
|
scsi_bufflen(scmd));
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Check if we need to build a native SG list. */
|
|
|
|
|
if (base_is_prp_possible(ioc, pcie_device,
|
|
|
|
|
scmd, sges_left) == 0) {
|
|
|
|
|
/* We built a native SG list, just return. */
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Build native NVMe PRP.
|
|
|
|
|
*/
|
|
|
|
|
base_make_prp_nvme(ioc, scmd, mpi_request,
|
|
|
|
|
smid, sges_left);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
out:
|
|
|
|
|
scsi_dma_unmap(scmd);
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_add_sg_single_ieee - add sg element for IEEE format
|
|
|
|
|
* @paddr: virtual address for SGE
|
|
|
|
|
* @flags: SGE flags
|
|
|
|
|
* @chain_offset: number of 128 byte elements from start of segment
|
|
|
|
|
* @length: data transfer length
|
|
|
|
|
* @dma_addr: Physical address
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
|
|
|
|
|
dma_addr_t dma_addr)
|
|
|
|
|
{
|
|
|
|
|
Mpi25IeeeSgeChain64_t *sgel = paddr;
|
|
|
|
|
|
|
|
|
|
sgel->Flags = flags;
|
|
|
|
|
sgel->NextChainOffset = chain_offset;
|
|
|
|
|
sgel->Length = cpu_to_le32(length);
|
|
|
|
|
sgel->Address = cpu_to_le64(dma_addr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @paddr: virtual address for SGE
|
|
|
|
|
*
|
|
|
|
|
* Create a zero length scatter gather entry to insure the IOCs hardware has
|
|
|
|
|
* something to use if the target device goes brain dead and tries
|
|
|
|
|
* to send data even when none is asked for.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
|
|
|
|
|
{
|
|
|
|
|
u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
|
|
|
|
|
MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
|
|
|
|
|
MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
|
2016-01-28 14:36:58 +08:00
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
_base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-11 20:00:24 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_build_sg_scmd - main sg creation routine
|
2017-10-31 20:32:28 +08:00
|
|
|
|
* pcie_device is unused here!
|
2015-11-11 20:00:24 +08:00
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @scmd: scsi command
|
|
|
|
|
* @smid: system request message index
|
2017-10-31 20:32:28 +08:00
|
|
|
|
* @unused: unused pcie_device pointer
|
2015-11-11 20:00:24 +08:00
|
|
|
|
* Context: none.
|
|
|
|
|
*
|
|
|
|
|
* The main routine that builds scatter gather table from a given
|
|
|
|
|
* scsi request sent via the .queuecommand main handler.
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 success, anything else error
|
2015-11-11 20:00:24 +08:00
|
|
|
|
*/
|
|
|
|
|
static int
|
|
|
|
|
_base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
|
2017-10-31 20:32:28 +08:00
|
|
|
|
struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused)
|
2015-11-11 20:00:24 +08:00
|
|
|
|
{
|
|
|
|
|
Mpi2SCSIIORequest_t *mpi_request;
|
|
|
|
|
dma_addr_t chain_dma;
|
|
|
|
|
struct scatterlist *sg_scmd;
|
|
|
|
|
void *sg_local, *chain;
|
|
|
|
|
u32 chain_offset;
|
|
|
|
|
u32 chain_length;
|
|
|
|
|
u32 chain_flags;
|
|
|
|
|
int sges_left;
|
|
|
|
|
u32 sges_in_segment;
|
|
|
|
|
u32 sgl_flags;
|
|
|
|
|
u32 sgl_flags_last_element;
|
|
|
|
|
u32 sgl_flags_end_buffer;
|
|
|
|
|
struct chain_tracker *chain_req;
|
|
|
|
|
|
|
|
|
|
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
|
|
|
|
|
|
|
|
|
|
/* init scatter gather flags */
|
|
|
|
|
sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
|
|
|
|
|
if (scmd->sc_data_direction == DMA_TO_DEVICE)
|
|
|
|
|
sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
|
|
|
|
|
sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
|
|
|
|
|
<< MPI2_SGE_FLAGS_SHIFT;
|
|
|
|
|
sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
|
|
|
|
|
MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
|
|
|
|
|
<< MPI2_SGE_FLAGS_SHIFT;
|
|
|
|
|
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
|
|
|
|
|
|
|
|
|
|
sg_scmd = scsi_sglist(scmd);
|
|
|
|
|
sges_left = scsi_dma_map(scmd);
|
|
|
|
|
if (sges_left < 0) {
|
|
|
|
|
sdev_printk(KERN_ERR, scmd->device,
|
2018-10-11 15:35:25 +08:00
|
|
|
|
"scsi_dma_map failed: request for %d bytes!\n",
|
2015-11-11 20:00:24 +08:00
|
|
|
|
scsi_bufflen(scmd));
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
sg_local = &mpi_request->SGL;
|
|
|
|
|
sges_in_segment = ioc->max_sges_in_main_message;
|
|
|
|
|
if (sges_left <= sges_in_segment)
|
|
|
|
|
goto fill_in_last_segment;
|
|
|
|
|
|
|
|
|
|
mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
|
|
|
|
|
(sges_in_segment * ioc->sge_size))/4;
|
|
|
|
|
|
|
|
|
|
/* fill in main message segment when there is a chain following */
|
|
|
|
|
while (sges_in_segment) {
|
|
|
|
|
if (sges_in_segment == 1)
|
|
|
|
|
ioc->base_add_sg_single(sg_local,
|
|
|
|
|
sgl_flags_last_element | sg_dma_len(sg_scmd),
|
|
|
|
|
sg_dma_address(sg_scmd));
|
|
|
|
|
else
|
|
|
|
|
ioc->base_add_sg_single(sg_local, sgl_flags |
|
|
|
|
|
sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
|
|
|
|
|
sg_scmd = sg_next(sg_scmd);
|
|
|
|
|
sg_local += ioc->sge_size;
|
|
|
|
|
sges_left--;
|
|
|
|
|
sges_in_segment--;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* initializing the chain flags and pointers */
|
|
|
|
|
chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
|
2018-01-04 20:57:11 +08:00
|
|
|
|
chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
|
2015-11-11 20:00:24 +08:00
|
|
|
|
if (!chain_req)
|
|
|
|
|
return -1;
|
|
|
|
|
chain = chain_req->chain_buffer;
|
|
|
|
|
chain_dma = chain_req->chain_buffer_dma;
|
|
|
|
|
do {
|
|
|
|
|
sges_in_segment = (sges_left <=
|
|
|
|
|
ioc->max_sges_in_chain_message) ? sges_left :
|
|
|
|
|
ioc->max_sges_in_chain_message;
|
|
|
|
|
chain_offset = (sges_left == sges_in_segment) ?
|
|
|
|
|
0 : (sges_in_segment * ioc->sge_size)/4;
|
|
|
|
|
chain_length = sges_in_segment * ioc->sge_size;
|
|
|
|
|
if (chain_offset) {
|
|
|
|
|
chain_offset = chain_offset <<
|
|
|
|
|
MPI2_SGE_CHAIN_OFFSET_SHIFT;
|
|
|
|
|
chain_length += ioc->sge_size;
|
|
|
|
|
}
|
|
|
|
|
ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
|
|
|
|
|
chain_length, chain_dma);
|
|
|
|
|
sg_local = chain;
|
|
|
|
|
if (!chain_offset)
|
|
|
|
|
goto fill_in_last_segment;
|
|
|
|
|
|
|
|
|
|
/* fill in chain segments */
|
|
|
|
|
while (sges_in_segment) {
|
|
|
|
|
if (sges_in_segment == 1)
|
|
|
|
|
ioc->base_add_sg_single(sg_local,
|
|
|
|
|
sgl_flags_last_element |
|
|
|
|
|
sg_dma_len(sg_scmd),
|
|
|
|
|
sg_dma_address(sg_scmd));
|
|
|
|
|
else
|
|
|
|
|
ioc->base_add_sg_single(sg_local, sgl_flags |
|
|
|
|
|
sg_dma_len(sg_scmd),
|
|
|
|
|
sg_dma_address(sg_scmd));
|
|
|
|
|
sg_scmd = sg_next(sg_scmd);
|
|
|
|
|
sg_local += ioc->sge_size;
|
|
|
|
|
sges_left--;
|
|
|
|
|
sges_in_segment--;
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-04 20:57:11 +08:00
|
|
|
|
chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
|
2015-11-11 20:00:24 +08:00
|
|
|
|
if (!chain_req)
|
|
|
|
|
return -1;
|
|
|
|
|
chain = chain_req->chain_buffer;
|
|
|
|
|
chain_dma = chain_req->chain_buffer_dma;
|
|
|
|
|
} while (1);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
fill_in_last_segment:
|
|
|
|
|
|
|
|
|
|
/* fill the last segment */
|
|
|
|
|
while (sges_left) {
|
|
|
|
|
if (sges_left == 1)
|
|
|
|
|
ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
|
|
|
|
|
sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
|
|
|
|
|
else
|
|
|
|
|
ioc->base_add_sg_single(sg_local, sgl_flags |
|
|
|
|
|
sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
|
|
|
|
|
sg_scmd = sg_next(sg_scmd);
|
|
|
|
|
sg_local += ioc->sge_size;
|
|
|
|
|
sges_left--;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @scmd: scsi command
|
|
|
|
|
* @smid: system request message index
|
2017-10-31 20:32:28 +08:00
|
|
|
|
* @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be
|
|
|
|
|
* constructed on need.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
* Context: none.
|
|
|
|
|
*
|
|
|
|
|
* The main routine that builds scatter gather table from a given
|
|
|
|
|
* scsi request sent via the .queuecommand main handler.
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 success, anything else error
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
static int
|
|
|
|
|
_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
|
2017-10-31 20:32:28 +08:00
|
|
|
|
struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
{
|
2017-10-31 20:32:28 +08:00
|
|
|
|
Mpi25SCSIIORequest_t *mpi_request;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
dma_addr_t chain_dma;
|
|
|
|
|
struct scatterlist *sg_scmd;
|
|
|
|
|
void *sg_local, *chain;
|
|
|
|
|
u32 chain_offset;
|
|
|
|
|
u32 chain_length;
|
|
|
|
|
int sges_left;
|
|
|
|
|
u32 sges_in_segment;
|
|
|
|
|
u8 simple_sgl_flags;
|
|
|
|
|
u8 simple_sgl_flags_last;
|
|
|
|
|
u8 chain_sgl_flags;
|
|
|
|
|
struct chain_tracker *chain_req;
|
|
|
|
|
|
|
|
|
|
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
|
|
|
|
|
|
|
|
|
|
/* init scatter gather flags */
|
|
|
|
|
simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
|
|
|
|
|
MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
|
|
|
|
|
simple_sgl_flags_last = simple_sgl_flags |
|
|
|
|
|
MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
|
|
|
|
|
chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
|
|
|
|
|
MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
|
|
|
|
|
|
2017-10-31 20:32:28 +08:00
|
|
|
|
/* Check if we need to build a native SG list. */
|
|
|
|
|
if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request,
|
|
|
|
|
smid, scmd, pcie_device) == 0)) {
|
|
|
|
|
/* We built a native SG list, just return. */
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
sg_scmd = scsi_sglist(scmd);
|
|
|
|
|
sges_left = scsi_dma_map(scmd);
|
2015-06-30 14:55:01 +08:00
|
|
|
|
if (sges_left < 0) {
|
2012-11-30 10:14:21 +08:00
|
|
|
|
sdev_printk(KERN_ERR, scmd->device,
|
2018-10-11 15:35:25 +08:00
|
|
|
|
"scsi_dma_map failed: request for %d bytes!\n",
|
2012-11-30 10:14:21 +08:00
|
|
|
|
scsi_bufflen(scmd));
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
sg_local = &mpi_request->SGL;
|
|
|
|
|
sges_in_segment = (ioc->request_sz -
|
2017-10-31 20:32:28 +08:00
|
|
|
|
offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (sges_left <= sges_in_segment)
|
|
|
|
|
goto fill_in_last_segment;
|
|
|
|
|
|
|
|
|
|
mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
|
2017-10-31 20:32:28 +08:00
|
|
|
|
(offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
/* fill in main message segment when there is a chain following */
|
|
|
|
|
while (sges_in_segment > 1) {
|
|
|
|
|
_base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
|
|
|
|
|
sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
|
|
|
|
|
sg_scmd = sg_next(sg_scmd);
|
|
|
|
|
sg_local += ioc->sge_size_ieee;
|
|
|
|
|
sges_left--;
|
|
|
|
|
sges_in_segment--;
|
|
|
|
|
}
|
|
|
|
|
|
2012-12-12 04:56:51 +08:00
|
|
|
|
/* initializing the pointers */
|
2018-01-04 20:57:11 +08:00
|
|
|
|
chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (!chain_req)
|
|
|
|
|
return -1;
|
|
|
|
|
chain = chain_req->chain_buffer;
|
|
|
|
|
chain_dma = chain_req->chain_buffer_dma;
|
|
|
|
|
do {
|
|
|
|
|
sges_in_segment = (sges_left <=
|
|
|
|
|
ioc->max_sges_in_chain_message) ? sges_left :
|
|
|
|
|
ioc->max_sges_in_chain_message;
|
|
|
|
|
chain_offset = (sges_left == sges_in_segment) ?
|
|
|
|
|
0 : sges_in_segment;
|
|
|
|
|
chain_length = sges_in_segment * ioc->sge_size_ieee;
|
|
|
|
|
if (chain_offset)
|
|
|
|
|
chain_length += ioc->sge_size_ieee;
|
|
|
|
|
_base_add_sg_single_ieee(sg_local, chain_sgl_flags,
|
|
|
|
|
chain_offset, chain_length, chain_dma);
|
|
|
|
|
|
|
|
|
|
sg_local = chain;
|
|
|
|
|
if (!chain_offset)
|
|
|
|
|
goto fill_in_last_segment;
|
|
|
|
|
|
|
|
|
|
/* fill in chain segments */
|
|
|
|
|
while (sges_in_segment) {
|
|
|
|
|
_base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
|
|
|
|
|
sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
|
|
|
|
|
sg_scmd = sg_next(sg_scmd);
|
|
|
|
|
sg_local += ioc->sge_size_ieee;
|
|
|
|
|
sges_left--;
|
|
|
|
|
sges_in_segment--;
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-04 20:57:11 +08:00
|
|
|
|
chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (!chain_req)
|
|
|
|
|
return -1;
|
|
|
|
|
chain = chain_req->chain_buffer;
|
|
|
|
|
chain_dma = chain_req->chain_buffer_dma;
|
|
|
|
|
} while (1);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
fill_in_last_segment:
|
|
|
|
|
|
|
|
|
|
/* fill the last segment */
|
2015-06-30 14:55:01 +08:00
|
|
|
|
while (sges_left > 0) {
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (sges_left == 1)
|
|
|
|
|
_base_add_sg_single_ieee(sg_local,
|
|
|
|
|
simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
|
|
|
|
|
sg_dma_address(sg_scmd));
|
|
|
|
|
else
|
|
|
|
|
_base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
|
|
|
|
|
sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
|
|
|
|
|
sg_scmd = sg_next(sg_scmd);
|
|
|
|
|
sg_local += ioc->sge_size_ieee;
|
|
|
|
|
sges_left--;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_build_sg_ieee - build generic sg for IEEE format
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @psge: virtual address for SGE
|
|
|
|
|
* @data_out_dma: physical address for WRITES
|
|
|
|
|
* @data_out_sz: data xfer size for WRITES
|
|
|
|
|
* @data_in_dma: physical address for READS
|
|
|
|
|
* @data_in_sz: data xfer size for READS
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
|
|
|
|
|
dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
|
|
|
|
|
size_t data_in_sz)
|
|
|
|
|
{
|
|
|
|
|
u8 sgl_flags;
|
|
|
|
|
|
|
|
|
|
if (!data_out_sz && !data_in_sz) {
|
|
|
|
|
_base_build_zero_len_sge_ieee(ioc, psge);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (data_out_sz && data_in_sz) {
|
|
|
|
|
/* WRITE sgel first */
|
|
|
|
|
sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
|
|
|
|
|
MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
|
|
|
|
|
_base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
|
|
|
|
|
data_out_dma);
|
|
|
|
|
|
|
|
|
|
/* incr sgel */
|
|
|
|
|
psge += ioc->sge_size_ieee;
|
|
|
|
|
|
|
|
|
|
/* READ sgel last */
|
|
|
|
|
sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
|
|
|
|
|
_base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
|
|
|
|
|
data_in_dma);
|
|
|
|
|
} else if (data_out_sz) /* WRITE */ {
|
|
|
|
|
sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
|
|
|
|
|
MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
|
|
|
|
|
MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
|
|
|
|
|
_base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
|
|
|
|
|
data_out_dma);
|
|
|
|
|
} else if (data_in_sz) /* READ */ {
|
|
|
|
|
sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
|
|
|
|
|
MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
|
|
|
|
|
MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
|
|
|
|
|
_base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
|
|
|
|
|
data_in_dma);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_config_dma_addressing - set dma addressing
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @pdev: PCI device struct
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 for success, non-zero for failure.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
static int
|
|
|
|
|
_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
|
|
|
|
|
{
|
2018-10-11 15:35:25 +08:00
|
|
|
|
u64 required_mask, coherent_mask;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
struct sysinfo s;
|
2019-07-30 15:43:57 +08:00
|
|
|
|
/* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
|
|
|
|
|
int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64;
|
2014-09-12 18:05:31 +08:00
|
|
|
|
|
2018-02-07 18:51:46 +08:00
|
|
|
|
if (ioc->is_mcpu_endpoint)
|
|
|
|
|
goto try_32bit;
|
|
|
|
|
|
2018-10-11 15:35:25 +08:00
|
|
|
|
required_mask = dma_get_required_mask(&pdev->dev);
|
|
|
|
|
if (sizeof(dma_addr_t) == 4 || required_mask == 32)
|
|
|
|
|
goto try_32bit;
|
|
|
|
|
|
2014-09-12 18:05:31 +08:00
|
|
|
|
if (ioc->dma_mask)
|
2019-07-30 15:43:57 +08:00
|
|
|
|
coherent_mask = DMA_BIT_MASK(dma_mask);
|
2014-09-12 18:05:31 +08:00
|
|
|
|
else
|
2018-10-11 15:35:25 +08:00
|
|
|
|
coherent_mask = DMA_BIT_MASK(32);
|
|
|
|
|
|
2019-07-30 15:43:57 +08:00
|
|
|
|
if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
|
2018-10-11 15:35:25 +08:00
|
|
|
|
dma_set_coherent_mask(&pdev->dev, coherent_mask))
|
|
|
|
|
goto try_32bit;
|
|
|
|
|
|
|
|
|
|
ioc->base_add_sg_single = &_base_add_sg_single_64;
|
|
|
|
|
ioc->sge_size = sizeof(Mpi2SGESimple64_t);
|
2019-07-30 15:43:57 +08:00
|
|
|
|
ioc->dma_mask = dma_mask;
|
2018-10-11 15:35:25 +08:00
|
|
|
|
goto out;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2018-02-07 18:51:46 +08:00
|
|
|
|
try_32bit:
|
2018-10-11 15:35:25 +08:00
|
|
|
|
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
2018-10-11 15:35:25 +08:00
|
|
|
|
ioc->base_add_sg_single = &_base_add_sg_single_32;
|
|
|
|
|
ioc->sge_size = sizeof(Mpi2SGESimple32_t);
|
|
|
|
|
ioc->dma_mask = 32;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
out:
|
|
|
|
|
si_meminfo(&s);
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
|
|
|
|
|
ioc->dma_mask, convert_to_kb(s.totalram));
|
2014-09-12 18:05:31 +08:00
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2014-09-12 18:05:31 +08:00
|
|
|
|
static int
|
|
|
|
|
_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
|
|
|
|
|
struct pci_dev *pdev)
|
|
|
|
|
{
|
2019-07-30 15:43:57 +08:00
|
|
|
|
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) {
|
2014-09-12 18:05:31 +08:00
|
|
|
|
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
|
|
|
|
|
return -ENODEV;
|
|
|
|
|
}
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_check_enable_msix - checks MSIX capabable.
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
|
|
|
|
* Check to see if card is capable of MSIX, and set number
|
|
|
|
|
* of available msix vectors
|
|
|
|
|
*/
|
|
|
|
|
static int
|
|
|
|
|
_base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
|
|
|
|
int base;
|
|
|
|
|
u16 message_control;
|
|
|
|
|
|
2015-11-11 20:00:26 +08:00
|
|
|
|
/* Check whether controller SAS2008 B0 controller,
|
|
|
|
|
* if it is SAS2008 B0 controller use IO-APIC instead of MSIX
|
|
|
|
|
*/
|
|
|
|
|
if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
|
|
|
|
|
ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
|
|
|
|
|
if (!base) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dfailprintk(ioc, ioc_info(ioc, "msix not supported\n"));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* get msix vector count */
|
2015-11-11 20:00:26 +08:00
|
|
|
|
/* NUMA_IO not supported for older controllers */
|
|
|
|
|
if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
|
|
|
|
|
ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
|
|
|
|
|
ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
|
|
|
|
|
ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
|
|
|
|
|
ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
|
|
|
|
|
ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
|
|
|
|
|
ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
|
|
|
|
|
ioc->msix_vector_count = 1;
|
|
|
|
|
else {
|
|
|
|
|
pci_read_config_word(ioc->pdev, base + 2, &message_control);
|
|
|
|
|
ioc->msix_vector_count = (message_control & 0x3FF) + 1;
|
|
|
|
|
}
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n",
|
|
|
|
|
ioc->msix_vector_count));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_free_irq - free irq
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
|
|
|
|
* Freeing respective reply_queue from the list.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
|
|
|
|
struct adapter_reply_queue *reply_q, *next;
|
|
|
|
|
|
|
|
|
|
if (list_empty(&ioc->reply_queue_list))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
|
|
|
|
|
list_del(&reply_q->list);
|
2019-06-24 22:42:55 +08:00
|
|
|
|
if (ioc->smp_affinity_enable)
|
2019-05-31 20:14:40 +08:00
|
|
|
|
irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
|
|
|
|
|
reply_q->msix_index), NULL);
|
2017-02-22 18:31:41 +08:00
|
|
|
|
free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
|
|
|
|
|
reply_q);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
kfree(reply_q);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_request_irq - request irq
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @index: msix index into vector table
|
|
|
|
|
*
|
|
|
|
|
* Inserting respective reply_queue into the list.
|
|
|
|
|
*/
|
|
|
|
|
static int
|
2017-02-22 18:31:41 +08:00
|
|
|
|
_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
{
|
2017-02-22 18:31:41 +08:00
|
|
|
|
struct pci_dev *pdev = ioc->pdev;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
struct adapter_reply_queue *reply_q;
|
|
|
|
|
int r;
|
|
|
|
|
|
|
|
|
|
reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
|
|
|
|
|
if (!reply_q) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "unable to allocate memory %zu!\n",
|
|
|
|
|
sizeof(struct adapter_reply_queue));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
reply_q->ioc = ioc;
|
|
|
|
|
reply_q->msix_index = index;
|
2015-01-12 14:09:03 +08:00
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
atomic_set(&reply_q->busy, 0);
|
|
|
|
|
if (ioc->msix_enable)
|
|
|
|
|
snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
|
2015-11-11 20:00:35 +08:00
|
|
|
|
ioc->driver_name, ioc->id, index);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
else
|
|
|
|
|
snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
|
2015-11-11 20:00:35 +08:00
|
|
|
|
ioc->driver_name, ioc->id);
|
2017-02-22 18:31:41 +08:00
|
|
|
|
r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
|
|
|
|
|
IRQF_SHARED, reply_q->name, reply_q);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (r) {
|
2018-09-17 23:01:11 +08:00
|
|
|
|
pr_err("%s: unable to allocate interrupt %d!\n",
|
2017-02-22 18:31:41 +08:00
|
|
|
|
reply_q->name, pci_irq_vector(pdev, index));
|
2016-02-11 17:32:55 +08:00
|
|
|
|
kfree(reply_q);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -EBUSY;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&reply_q->list);
|
|
|
|
|
list_add_tail(&reply_q->list, &ioc->reply_queue_list);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_assign_reply_queues - assigning msix index for each cpu
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
|
|
|
|
* The enduser would need to set the affinity via /proc/irq/#/smp_affinity
|
|
|
|
|
*
|
|
|
|
|
* It would nice if we could call irq_set_affinity, however it is not
|
|
|
|
|
* an exported symbol
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
2014-01-04 08:16:56 +08:00
|
|
|
|
unsigned int cpu, nr_cpus, nr_msix, index = 0;
|
2015-01-12 14:09:03 +08:00
|
|
|
|
struct adapter_reply_queue *reply_q;
|
2019-05-31 20:14:40 +08:00
|
|
|
|
int local_numa_node;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
if (!_base_is_controller_msix_enabled(ioc))
|
|
|
|
|
return;
|
2019-06-24 22:42:56 +08:00
|
|
|
|
|
|
|
|
|
if (ioc->msix_load_balance)
|
2019-02-15 15:40:28 +08:00
|
|
|
|
return;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
|
|
|
|
|
|
2014-01-04 08:16:56 +08:00
|
|
|
|
nr_cpus = num_online_cpus();
|
|
|
|
|
nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
|
|
|
|
|
ioc->facts.MaxMSIxVectors);
|
|
|
|
|
if (!nr_msix)
|
|
|
|
|
return;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2019-06-24 22:42:55 +08:00
|
|
|
|
if (ioc->smp_affinity_enable) {
|
2019-05-31 20:14:40 +08:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* set irq affinity to local numa node for those irqs
|
|
|
|
|
* corresponding to high iops queues.
|
|
|
|
|
*/
|
|
|
|
|
if (ioc->high_iops_queues) {
|
|
|
|
|
local_numa_node = dev_to_node(&ioc->pdev->dev);
|
|
|
|
|
for (index = 0; index < ioc->high_iops_queues;
|
|
|
|
|
index++) {
|
|
|
|
|
irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
|
|
|
|
|
index), cpumask_of_node(local_numa_node));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-02-22 18:31:41 +08:00
|
|
|
|
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
|
2019-05-31 20:14:40 +08:00
|
|
|
|
const cpumask_t *mask;
|
|
|
|
|
|
|
|
|
|
if (reply_q->msix_index < ioc->high_iops_queues)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
mask = pci_irq_get_affinity(ioc->pdev,
|
|
|
|
|
reply_q->msix_index);
|
2017-02-22 18:31:41 +08:00
|
|
|
|
if (!mask) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_warn(ioc, "no affinity for msi %x\n",
|
|
|
|
|
reply_q->msix_index);
|
2019-05-31 20:14:40 +08:00
|
|
|
|
goto fall_back;
|
2017-02-22 18:31:41 +08:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-19 23:22:05 +08:00
|
|
|
|
for_each_cpu_and(cpu, mask, cpu_online_mask) {
|
|
|
|
|
if (cpu >= ioc->cpu_msix_table_sz)
|
|
|
|
|
break;
|
2017-02-22 18:31:41 +08:00
|
|
|
|
ioc->cpu_msix_table[cpu] = reply_q->msix_index;
|
2018-01-19 23:22:05 +08:00
|
|
|
|
}
|
2017-02-22 18:31:41 +08:00
|
|
|
|
}
|
|
|
|
|
return;
|
|
|
|
|
}
|
2019-05-31 20:14:40 +08:00
|
|
|
|
|
|
|
|
|
fall_back:
|
2014-01-04 08:16:56 +08:00
|
|
|
|
cpu = cpumask_first(cpu_online_mask);
|
2019-05-31 20:14:40 +08:00
|
|
|
|
nr_msix -= ioc->high_iops_queues;
|
|
|
|
|
index = 0;
|
2014-01-04 08:16:56 +08:00
|
|
|
|
|
2015-01-12 14:09:03 +08:00
|
|
|
|
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
|
2014-01-04 08:16:56 +08:00
|
|
|
|
unsigned int i, group = nr_cpus / nr_msix;
|
|
|
|
|
|
2019-05-31 20:14:40 +08:00
|
|
|
|
if (reply_q->msix_index < ioc->high_iops_queues)
|
|
|
|
|
continue;
|
|
|
|
|
|
2015-01-12 14:09:03 +08:00
|
|
|
|
if (cpu >= nr_cpus)
|
|
|
|
|
break;
|
|
|
|
|
|
2014-01-04 08:16:56 +08:00
|
|
|
|
if (index < nr_cpus % nr_msix)
|
|
|
|
|
group++;
|
|
|
|
|
|
|
|
|
|
for (i = 0 ; i < group ; i++) {
|
2017-02-22 18:31:41 +08:00
|
|
|
|
ioc->cpu_msix_table[cpu] = reply_q->msix_index;
|
2014-01-04 08:16:56 +08:00
|
|
|
|
cpu = cpumask_next(cpu, cpu_online_mask);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
2014-01-04 08:16:56 +08:00
|
|
|
|
index++;
|
2015-01-12 14:09:03 +08:00
|
|
|
|
}
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
|
2019-05-31 20:14:36 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_check_and_enable_high_iops_queues - enable high iops mode
|
|
|
|
|
* @ ioc - per adapter object
|
|
|
|
|
* @ hba_msix_vector_count - msix vectors supported by HBA
|
|
|
|
|
*
|
|
|
|
|
* Enable high iops queues only if
|
|
|
|
|
* - HBA is a SEA/AERO controller and
|
|
|
|
|
* - MSI-Xs vector supported by the HBA is 128 and
|
|
|
|
|
* - total CPU count in the system >=16 and
|
|
|
|
|
* - loaded driver with default max_msix_vectors module parameter and
|
|
|
|
|
* - system booted in non kdump mode
|
|
|
|
|
*
|
|
|
|
|
* returns nothing.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
|
|
|
|
|
int hba_msix_vector_count)
|
|
|
|
|
{
|
2019-06-24 22:42:54 +08:00
|
|
|
|
u16 lnksta, speed;
|
2019-05-31 20:14:42 +08:00
|
|
|
|
|
|
|
|
|
if (perf_mode == MPT_PERF_MODE_IOPS ||
|
|
|
|
|
perf_mode == MPT_PERF_MODE_LATENCY) {
|
|
|
|
|
ioc->high_iops_queues = 0;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (perf_mode == MPT_PERF_MODE_DEFAULT) {
|
|
|
|
|
|
2019-06-24 22:42:54 +08:00
|
|
|
|
pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta);
|
|
|
|
|
speed = lnksta & PCI_EXP_LNKSTA_CLS;
|
2019-05-31 20:14:42 +08:00
|
|
|
|
|
2019-06-24 22:42:54 +08:00
|
|
|
|
if (speed < 0x4) {
|
2019-05-31 20:14:42 +08:00
|
|
|
|
ioc->high_iops_queues = 0;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-05-31 20:14:36 +08:00
|
|
|
|
|
|
|
|
|
if (!reset_devices && ioc->is_aero_ioc &&
|
|
|
|
|
hba_msix_vector_count == MPT3SAS_GEN35_MAX_MSIX_QUEUES &&
|
|
|
|
|
num_online_cpus() >= MPT3SAS_HIGH_IOPS_REPLY_QUEUES &&
|
|
|
|
|
max_msix_vectors == -1)
|
|
|
|
|
ioc->high_iops_queues = MPT3SAS_HIGH_IOPS_REPLY_QUEUES;
|
|
|
|
|
else
|
|
|
|
|
ioc->high_iops_queues = 0;
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_disable_msix - disables msix
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
|
|
|
|
if (!ioc->msix_enable)
|
|
|
|
|
return;
|
2019-05-31 20:14:40 +08:00
|
|
|
|
pci_free_irq_vectors(ioc->pdev);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
ioc->msix_enable = 0;
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-31 20:14:40 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_alloc_irq_vectors - allocate msix vectors
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
|
|
|
|
*/
|
|
|
|
|
static int
|
|
|
|
|
_base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
|
|
|
|
int i, irq_flags = PCI_IRQ_MSIX;
|
|
|
|
|
struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues };
|
|
|
|
|
struct irq_affinity *descp = &desc;
|
|
|
|
|
|
2019-06-24 22:42:55 +08:00
|
|
|
|
if (ioc->smp_affinity_enable)
|
2019-05-31 20:14:40 +08:00
|
|
|
|
irq_flags |= PCI_IRQ_AFFINITY;
|
|
|
|
|
else
|
|
|
|
|
descp = NULL;
|
|
|
|
|
|
|
|
|
|
ioc_info(ioc, " %d %d\n", ioc->high_iops_queues,
|
2019-09-13 21:04:49 +08:00
|
|
|
|
ioc->reply_queue_count);
|
2019-05-31 20:14:40 +08:00
|
|
|
|
|
|
|
|
|
i = pci_alloc_irq_vectors_affinity(ioc->pdev,
|
|
|
|
|
ioc->high_iops_queues,
|
2019-09-13 21:04:49 +08:00
|
|
|
|
ioc->reply_queue_count, irq_flags, descp);
|
2019-05-31 20:14:40 +08:00
|
|
|
|
|
|
|
|
|
return i;
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_enable_msix - enables msix, failback to io_apic
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
|
|
|
|
*/
|
|
|
|
|
static int
|
|
|
|
|
_base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
|
|
|
|
int r;
|
2016-10-26 16:04:33 +08:00
|
|
|
|
int i, local_max_msix_vectors;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
u8 try_msix = 0;
|
|
|
|
|
|
2019-06-24 22:42:56 +08:00
|
|
|
|
ioc->msix_load_balance = false;
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (msix_disable == -1 || msix_disable == 0)
|
|
|
|
|
try_msix = 1;
|
|
|
|
|
|
|
|
|
|
if (!try_msix)
|
|
|
|
|
goto try_ioapic;
|
|
|
|
|
|
|
|
|
|
if (_base_check_enable_msix(ioc) != 0)
|
|
|
|
|
goto try_ioapic;
|
|
|
|
|
|
2019-05-31 20:14:36 +08:00
|
|
|
|
ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count);
|
|
|
|
|
pr_info("\t no of cores: %d, max_msix_vectors: %d\n",
|
|
|
|
|
ioc->cpu_count, max_msix_vectors);
|
2019-05-31 20:14:42 +08:00
|
|
|
|
if (ioc->is_aero_ioc)
|
|
|
|
|
_base_check_and_enable_high_iops_queues(ioc,
|
|
|
|
|
ioc->msix_vector_count);
|
2019-05-31 20:14:36 +08:00
|
|
|
|
ioc->reply_queue_count =
|
|
|
|
|
min_t(int, ioc->cpu_count + ioc->high_iops_queues,
|
|
|
|
|
ioc->msix_vector_count);
|
2013-08-14 20:53:20 +08:00
|
|
|
|
|
2014-09-12 18:05:31 +08:00
|
|
|
|
if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
|
2017-10-10 21:11:16 +08:00
|
|
|
|
local_max_msix_vectors = (reset_devices) ? 1 : 8;
|
2016-10-26 16:04:33 +08:00
|
|
|
|
else
|
|
|
|
|
local_max_msix_vectors = max_msix_vectors;
|
2014-09-12 18:05:31 +08:00
|
|
|
|
|
2017-02-22 18:31:41 +08:00
|
|
|
|
if (local_max_msix_vectors > 0)
|
2016-10-26 16:04:33 +08:00
|
|
|
|
ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
|
2013-08-14 20:53:20 +08:00
|
|
|
|
ioc->reply_queue_count);
|
2017-02-22 18:31:41 +08:00
|
|
|
|
else if (local_max_msix_vectors == 0)
|
2014-09-12 18:05:31 +08:00
|
|
|
|
goto try_ioapic;
|
2013-08-14 20:53:20 +08:00
|
|
|
|
|
2019-06-24 22:42:56 +08:00
|
|
|
|
/*
|
|
|
|
|
* Enable msix_load_balance only if combined reply queue mode is
|
|
|
|
|
* disabled on SAS3 & above generation HBA devices.
|
|
|
|
|
*/
|
|
|
|
|
if (!ioc->combined_reply_queue &&
|
|
|
|
|
ioc->hba_mpi_version_belonged != MPI2_VERSION) {
|
2019-12-26 19:13:30 +08:00
|
|
|
|
ioc_info(ioc,
|
|
|
|
|
"combined ReplyQueue is off, Enabling msix load balance\n");
|
2019-06-24 22:42:56 +08:00
|
|
|
|
ioc->msix_load_balance = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* smp affinity setting is not need when msix load balance
|
|
|
|
|
* is enabled.
|
|
|
|
|
*/
|
|
|
|
|
if (ioc->msix_load_balance)
|
2019-06-24 22:42:55 +08:00
|
|
|
|
ioc->smp_affinity_enable = 0;
|
2016-02-09 00:43:39 +08:00
|
|
|
|
|
2019-05-31 20:14:40 +08:00
|
|
|
|
r = _base_alloc_irq_vectors(ioc);
|
2017-02-22 18:31:41 +08:00
|
|
|
|
if (r < 0) {
|
2019-12-26 19:13:30 +08:00
|
|
|
|
ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
goto try_ioapic;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ioc->msix_enable = 1;
|
2017-02-22 18:31:41 +08:00
|
|
|
|
ioc->reply_queue_count = r;
|
|
|
|
|
for (i = 0; i < ioc->reply_queue_count; i++) {
|
|
|
|
|
r = _base_request_irq(ioc, i);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (r) {
|
|
|
|
|
_base_free_irq(ioc);
|
|
|
|
|
_base_disable_msix(ioc);
|
|
|
|
|
goto try_ioapic;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-31 20:14:36 +08:00
|
|
|
|
ioc_info(ioc, "High IOPs queues : %s\n",
|
|
|
|
|
ioc->high_iops_queues ? "enabled" : "disabled");
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
/* failback to io_apic interrupt routing */
|
|
|
|
|
try_ioapic:
|
2019-05-31 20:14:36 +08:00
|
|
|
|
ioc->high_iops_queues = 0;
|
|
|
|
|
ioc_info(ioc, "High IOPs queues : disabled\n");
|
2014-09-12 18:05:31 +08:00
|
|
|
|
ioc->reply_queue_count = 1;
|
2017-02-22 18:31:41 +08:00
|
|
|
|
r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
|
|
|
|
|
if (r < 0) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dfailprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
|
|
|
|
|
r));
|
2017-02-22 18:31:41 +08:00
|
|
|
|
} else
|
|
|
|
|
r = _base_request_irq(ioc, 0);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
return r;
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-30 14:54:50 +08:00
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_unmap_resources - free controller resources
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*/
|
2016-07-29 12:38:22 +08:00
|
|
|
|
static void
|
2015-06-30 14:54:50 +08:00
|
|
|
|
mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
|
|
|
|
struct pci_dev *pdev = ioc->pdev;
|
|
|
|
|
|
2018-09-17 23:01:13 +08:00
|
|
|
|
dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
|
2015-06-30 14:54:50 +08:00
|
|
|
|
|
|
|
|
|
_base_free_irq(ioc);
|
|
|
|
|
_base_disable_msix(ioc);
|
|
|
|
|
|
2018-05-31 18:34:51 +08:00
|
|
|
|
kfree(ioc->replyPostRegisterIndex);
|
|
|
|
|
ioc->replyPostRegisterIndex = NULL;
|
|
|
|
|
|
2015-06-30 14:54:50 +08:00
|
|
|
|
|
|
|
|
|
if (ioc->chip_phys) {
|
|
|
|
|
iounmap(ioc->chip);
|
|
|
|
|
ioc->chip_phys = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (pci_is_enabled(pdev)) {
|
|
|
|
|
pci_release_selected_regions(ioc->pdev, ioc->bars);
|
|
|
|
|
pci_disable_pcie_error_reporting(pdev);
|
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-03 21:59:54 +08:00
|
|
|
|
static int
|
|
|
|
|
_base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_check_for_fault_and_issue_reset - check if IOC is in fault state
|
|
|
|
|
* and if it is in fault state then issue diag reset.
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
|
|
|
|
* Returns: 0 for success, non-zero for failure.
|
|
|
|
|
*/
|
|
|
|
|
static int
|
|
|
|
|
_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
|
|
|
|
u32 ioc_state;
|
|
|
|
|
int rc = -EFAULT;
|
|
|
|
|
|
|
|
|
|
dinitprintk(ioc, pr_info("%s\n", __func__));
|
|
|
|
|
if (ioc->pci_error_recovery)
|
|
|
|
|
return 0;
|
|
|
|
|
ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
|
|
|
|
|
dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state));
|
|
|
|
|
|
|
|
|
|
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
|
2019-12-26 19:13:29 +08:00
|
|
|
|
mpt3sas_print_fault_code(ioc, ioc_state &
|
2019-08-03 21:59:54 +08:00
|
|
|
|
MPI2_DOORBELL_DATA_MASK);
|
|
|
|
|
rc = _base_diag_reset(ioc);
|
2019-12-26 19:13:27 +08:00
|
|
|
|
} else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
|
|
|
|
|
MPI2_IOC_STATE_COREDUMP) {
|
2019-12-26 19:13:29 +08:00
|
|
|
|
mpt3sas_print_coredump_info(ioc, ioc_state &
|
2019-12-26 19:13:27 +08:00
|
|
|
|
MPI2_DOORBELL_DATA_MASK);
|
|
|
|
|
mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
|
|
|
|
|
rc = _base_diag_reset(ioc);
|
2019-08-03 21:59:54 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 for success, non-zero for failure.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
int
|
|
|
|
|
mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
|
|
|
|
struct pci_dev *pdev = ioc->pdev;
|
|
|
|
|
u32 memap_sz;
|
|
|
|
|
u32 pio_sz;
|
2019-08-03 21:59:54 +08:00
|
|
|
|
int i, r = 0, rc;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
u64 pio_chip = 0;
|
2018-03-01 21:07:07 +08:00
|
|
|
|
phys_addr_t chip_phys = 0;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
struct adapter_reply_queue *reply_q;
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
|
|
|
|
|
if (pci_enable_device_mem(pdev)) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_warn(ioc, "pci_enable_device_mem: failed\n");
|
2013-08-09 04:45:39 +08:00
|
|
|
|
ioc->bars = 0;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -ENODEV;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (pci_request_selected_regions(pdev, ioc->bars,
|
2015-11-11 20:00:35 +08:00
|
|
|
|
ioc->driver_name)) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_warn(ioc, "pci_request_selected_regions: failed\n");
|
2013-08-09 04:45:39 +08:00
|
|
|
|
ioc->bars = 0;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
r = -ENODEV;
|
|
|
|
|
goto out_fail;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* AER (Advanced Error Reporting) hooks */
|
|
|
|
|
pci_enable_pcie_error_reporting(pdev);
|
|
|
|
|
|
|
|
|
|
pci_set_master(pdev);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (_base_config_dma_addressing(ioc, pdev) != 0) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
r = -ENODEV;
|
|
|
|
|
goto out_fail;
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-15 12:49:56 +08:00
|
|
|
|
for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
|
|
|
|
|
(!memap_sz || !pio_sz); i++) {
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
|
|
|
|
|
if (pio_sz)
|
|
|
|
|
continue;
|
|
|
|
|
pio_chip = (u64)pci_resource_start(pdev, i);
|
|
|
|
|
pio_sz = pci_resource_len(pdev, i);
|
|
|
|
|
} else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
|
|
|
|
|
if (memap_sz)
|
|
|
|
|
continue;
|
|
|
|
|
ioc->chip_phys = pci_resource_start(pdev, i);
|
2018-03-01 21:07:07 +08:00
|
|
|
|
chip_phys = ioc->chip_phys;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
memap_sz = pci_resource_len(pdev, i);
|
|
|
|
|
ioc->chip = ioremap(ioc->chip_phys, memap_sz);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-15 12:49:56 +08:00
|
|
|
|
if (ioc->chip == NULL) {
|
2019-12-26 19:13:30 +08:00
|
|
|
|
ioc_err(ioc,
|
|
|
|
|
"unable to map adapter memory! or resource not found\n");
|
2015-07-15 12:49:56 +08:00
|
|
|
|
r = -EINVAL;
|
|
|
|
|
goto out_fail;
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
_base_mask_interrupts(ioc);
|
2014-09-12 18:05:31 +08:00
|
|
|
|
|
2016-07-29 12:38:21 +08:00
|
|
|
|
r = _base_get_ioc_facts(ioc);
|
2019-08-03 21:59:54 +08:00
|
|
|
|
if (r) {
|
|
|
|
|
rc = _base_check_for_fault_and_issue_reset(ioc);
|
|
|
|
|
if (rc || (_base_get_ioc_facts(ioc)))
|
|
|
|
|
goto out_fail;
|
|
|
|
|
}
|
2014-09-12 18:05:31 +08:00
|
|
|
|
|
|
|
|
|
if (!ioc->rdpq_array_enable_assigned) {
|
|
|
|
|
ioc->rdpq_array_enable = ioc->rdpq_array_capable;
|
|
|
|
|
ioc->rdpq_array_enable_assigned = 1;
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
r = _base_enable_msix(ioc);
|
|
|
|
|
if (r)
|
|
|
|
|
goto out_fail;
|
|
|
|
|
|
scsi: mpt3sas: Irq poll to avoid CPU hard lockups
Issue Description:
We have seen cpu lock up issue from fields if system has greater (more than
96) logical cpu count. SAS3.0 controller (Invader series) supports at max
96 msix vector and SAS3.5 product (Ventura) supports at max 128 msix
vectors.
This may be a generic issue (if PCI device supports completion on multiple
reply queues). Let me explain it w.r.t to mpt3sas supported h/w just to
simplify the problem and possible changes to handle such issues. IT HBA
(mpt3sas) supports multiple reply queues in completion path. Driver creates
MSI-x vectors for controller as "min of (FW supported Reply queue, Logical
CPUs)". If submitter is not interrupted via completion on same CPU, there
is a loop in the IO path. This behavior can cause hard/soft CPU lockups, IO
timeout, system sluggish etc.
Example - one CPU (e.g. CPU A) is busy submitting the IOs and another CPU
(e.g. CPU B) is busy with processing the corresponding IO's reply
descriptors from reply descriptor queue upon receiving the interrupts from
HBA. If the CPU A is continuously pumping the IOs then always CPU B (which
is executing the ISR) will see the valid reply descriptors in the reply
descriptor queue and it will be continuously processing those reply
descriptor in a loop without quitting the ISR handler.
Mpt3sas driver will exit ISR handler if it finds unused reply descriptor in
the reply descriptor queue. Since CPU A will be continuously sending the
IOs, CPU B may always see a valid reply descriptor (posted by HBA Firmware
after processing the IO) in the reply descriptor queue. In worst case,
driver will not quit from this loop in the ISR handler. Eventually, CPU
lockup will be detected by watchdog.
Above mentioned behavior is not common if "rq_affinity" set to 2 or
affinity_hint is honored by irqbalance as "exact". If rq_affinity is set
to 2, submitter will be always interrupted via completion on same CPU. If
irqbalance is using "exact" policy, interrupt will be delivered to
submitter CPU.
If CPU counts to MSI-X vectors (reply descriptor Queues) count ratio is not
1:1, we still have exposure of issue explained above and for that we don't
have any solution.
Exposure of soft/hard lockup if CPU count is more than MSI-x supported by
device.
If CPUs count to MSI-x vectors count ratio is not 1:1, (Other way, if CPU
counts to MSI-x vector count ratio is something like X:1, where X > 1) then
'exact' irqbalance policy OR rq_affinity = 2 won't help to avoid CPU
hard/soft lockups. There won't be any one to one mapping between CPU to
MSI-x vector instead one MSI-x interrupt (or reply descriptor queue) is
shared with group/set of CPUs and there is a possibility of having a loop
in the IO path within that CPU group and may observe lockups.
For example: Consider a system having two NUMA nodes and each node having
four logical CPUs and also consider that number of MSI-x vectors enabled on
the HBA is two, then CPUs count to MSI-x vector count ratio as 4:1. e.g.
MSIx vector 0 is affinity to CPU 0, CPU 1, CPU 2 & CPU 3 of NUMA node 0 and
MSI-x vector 1 is affinity to CPU 4, CPU 5, CPU 6 & CPU 7 of NUMA node 1.
numactl --hardware
available: 2 nodes (0-1)
node 0 cpus: 0 1 2 3 --> MSI-x 0
node 0 size: 65536 MB
node 0 free: 63176 MB
node 1 cpus: 4 5 6 7 -->MSI-x 1
node 1 size: 65536 MB
node 1 free: 63176 MB
Assume that user started an application which uses all the CPUs of NUMA
node 0 for issuing the IOs. Only one CPU from affinity list (it can be any
cpu since this behavior depends upon irqbalance) CPU0 will receive the
interrupts from MSIx vector 0 for all the IOs. Eventually, CPU 0 IO
submission percentage will be decreasing and ISR processing percentage will
be increasing as it is more busy with processing the interrupts. Gradually
IO submission percentage on CPU 0 will be zero and it's ISR processing
percentage will be 100 percentage as IO loop has already formed within the
NUMA node 0, i.e. CPU 1, CPU 2 & CPU 3 will be continuously busy with
submitting the heavy IOs and only CPU 0 is busy in the ISR path as it
always find the valid reply descriptor in the reply descriptor
queue. Eventually, we will observe the hard lockup here.
Chances of occurring of hard/soft lockups are directly proportional to
value of X. If value of X is high, then chances of observing CPU lockups is
high.
Solution: Use IRQ poll interface defined in " irq_poll.c". mpt3sas driver
will execute ISR routine in Softirq context and it will always quit the
loop based on budget provided in IRQ poll interface.
In these scenarios (i.e. where CPUs count to MSI-X vectors count ratio is
X:1 (where X > 1)), IRQ poll interface will avoid CPU hard lockups due to
voluntary exit from the reply queue processing based on budget. Note -
Only one MSI-x vector is busy doing processing.
Irqstat output:
IRQs / 1 second(s)
IRQ# TOTAL NODE0 NODE1 NODE2 NODE3 NAME
44 122871 122871 0 0 0 IR-PCI-MSI-edge mpt3sas0-msix0
45 0 0 0 0 0 IR-PCI-MSI-edge mpt3sas0-msix1
We use this approach only if cpu count is more than FW supported MSI-x
vector
Signed-off-by: Suganath Prabu <suganath-prabu.subramani@broadcom.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-15 15:40:27 +08:00
|
|
|
|
if (!ioc->is_driver_loading)
|
|
|
|
|
_base_init_irqpolls(ioc);
|
2015-06-30 14:54:47 +08:00
|
|
|
|
/* Use the Combined reply queue feature only for SAS3 C0 & higher
|
|
|
|
|
* revision HBAs and also only when reply queue count is greater than 8
|
|
|
|
|
*/
|
2018-05-31 18:34:51 +08:00
|
|
|
|
if (ioc->combined_reply_queue) {
|
2015-06-30 14:54:47 +08:00
|
|
|
|
/* Determine the Supplemental Reply Post Host Index Registers
|
|
|
|
|
* Addresse. Supplemental Reply Post Host Index Registers
|
|
|
|
|
* starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
|
|
|
|
|
* each register is at offset bytes of
|
|
|
|
|
* MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
|
|
|
|
|
*/
|
|
|
|
|
ioc->replyPostRegisterIndex = kcalloc(
|
2016-10-26 16:04:38 +08:00
|
|
|
|
ioc->combined_reply_index_count,
|
2015-06-30 14:54:47 +08:00
|
|
|
|
sizeof(resource_size_t *), GFP_KERNEL);
|
|
|
|
|
if (!ioc->replyPostRegisterIndex) {
|
2019-12-26 19:13:30 +08:00
|
|
|
|
ioc_err(ioc,
|
|
|
|
|
"allocation for replyPostRegisterIndex failed!\n");
|
2015-06-30 14:54:47 +08:00
|
|
|
|
r = -ENOMEM;
|
|
|
|
|
goto out_fail;
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-26 16:04:38 +08:00
|
|
|
|
for (i = 0; i < ioc->combined_reply_index_count; i++) {
|
2015-06-30 14:54:47 +08:00
|
|
|
|
ioc->replyPostRegisterIndex[i] = (resource_size_t *)
|
2018-04-24 17:28:30 +08:00
|
|
|
|
((u8 __force *)&ioc->chip->Doorbell +
|
2015-06-30 14:54:47 +08:00
|
|
|
|
MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
|
|
|
|
|
(i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
|
|
|
|
|
}
|
2018-05-31 18:34:51 +08:00
|
|
|
|
}
|
2015-06-30 14:54:47 +08:00
|
|
|
|
|
2016-07-31 00:06:26 +08:00
|
|
|
|
if (ioc->is_warpdrive) {
|
|
|
|
|
ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
|
|
|
|
|
&ioc->chip->ReplyPostHostIndex;
|
|
|
|
|
|
|
|
|
|
for (i = 1; i < ioc->cpu_msix_table_sz; i++)
|
|
|
|
|
ioc->reply_post_host_index[i] =
|
|
|
|
|
(resource_size_t __iomem *)
|
|
|
|
|
((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
|
|
|
|
|
* 4)));
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
|
2018-09-17 23:01:11 +08:00
|
|
|
|
pr_info("%s: %s enabled: IRQ %d\n",
|
|
|
|
|
reply_q->name,
|
|
|
|
|
ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
|
|
|
|
|
pci_irq_vector(ioc->pdev, reply_q->msix_index));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
|
|
|
|
|
&chip_phys, ioc->chip, memap_sz);
|
|
|
|
|
ioc_info(ioc, "ioport(0x%016llx), size(%d)\n",
|
|
|
|
|
(unsigned long long)pio_chip, pio_sz);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
/* Save PCI configuration state for recovery from PCI AER/EEH errors */
|
|
|
|
|
pci_save_state(pdev);
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
out_fail:
|
2015-06-30 14:54:50 +08:00
|
|
|
|
mpt3sas_base_unmap_resources(ioc);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return r;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_get_msg_frame - obtain request mf pointer
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @smid: system request message index(smid zero is invalid)
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: virt pointer to message frame.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
void *
|
|
|
|
|
mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
|
|
|
|
|
{
|
|
|
|
|
return (void *)(ioc->request + (smid * ioc->request_sz));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @smid: system request message index
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: virt pointer to sense buffer.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
void *
|
|
|
|
|
mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
|
|
|
|
|
{
|
|
|
|
|
return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @smid: system request message index
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: phys pointer to the low 32bit address of the sense buffer.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
__le32
|
|
|
|
|
mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
|
|
|
|
|
{
|
|
|
|
|
return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
|
|
|
|
|
SCSI_SENSE_BUFFERSIZE));
|
|
|
|
|
}
|
|
|
|
|
|
2017-10-31 20:32:28 +08:00
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @smid: system request message index
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: virt pointer to a PCIe SGL.
|
2017-10-31 20:32:28 +08:00
|
|
|
|
*/
|
|
|
|
|
void *
|
|
|
|
|
mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
|
|
|
|
|
{
|
2018-01-04 20:57:11 +08:00
|
|
|
|
return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
|
2017-10-31 20:32:28 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @smid: system request message index
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: phys pointer to the address of the PCIe buffer.
|
2017-10-31 20:32:28 +08:00
|
|
|
|
*/
|
2017-11-06 21:35:16 +08:00
|
|
|
|
dma_addr_t
|
2017-10-31 20:32:28 +08:00
|
|
|
|
mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
|
|
|
|
|
{
|
2018-01-04 20:57:11 +08:00
|
|
|
|
return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
|
2017-10-31 20:32:28 +08:00
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @phys_addr: lower 32 physical addr of the reply
|
|
|
|
|
*
|
|
|
|
|
* Converts 32bit lower physical addr into a virt address.
|
|
|
|
|
*/
|
|
|
|
|
void *
|
|
|
|
|
mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
|
|
|
|
|
{
|
|
|
|
|
if (!phys_addr)
|
|
|
|
|
return NULL;
|
|
|
|
|
return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-31 20:14:37 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_get_msix_index - get the msix index
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @scmd: scsi_cmnd object
|
|
|
|
|
*
|
|
|
|
|
* returns msix index of general reply queues,
|
|
|
|
|
* i.e. reply queue on which IO request's reply
|
|
|
|
|
* should be posted by the HBA firmware.
|
|
|
|
|
*/
|
2016-01-28 14:37:06 +08:00
|
|
|
|
static inline u8
|
2019-05-31 20:14:37 +08:00
|
|
|
|
_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
|
|
|
|
|
struct scsi_cmnd *scmd)
|
2016-01-28 14:37:06 +08:00
|
|
|
|
{
|
2019-02-15 15:40:28 +08:00
|
|
|
|
/* Enables reply_queue load balancing */
|
|
|
|
|
if (ioc->msix_load_balance)
|
|
|
|
|
return ioc->reply_queue_count ?
|
|
|
|
|
base_mod64(atomic64_add_return(1,
|
|
|
|
|
&ioc->total_io_cnt), ioc->reply_queue_count) : 0;
|
|
|
|
|
|
2016-01-28 14:37:06 +08:00
|
|
|
|
return ioc->cpu_msix_table[raw_smp_processor_id()];
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-26 19:13:32 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_sdev_nr_inflight_request -get number of inflight requests
|
|
|
|
|
* of a request queue.
|
|
|
|
|
* @q: request_queue object
|
|
|
|
|
*
|
|
|
|
|
* returns number of inflight request of a request queue.
|
|
|
|
|
*/
|
|
|
|
|
inline unsigned long
|
|
|
|
|
_base_sdev_nr_inflight_request(struct request_queue *q)
|
|
|
|
|
{
|
|
|
|
|
struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[0];
|
|
|
|
|
|
|
|
|
|
return atomic_read(&hctx->nr_active);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2019-05-31 20:14:38 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_get_high_iops_msix_index - get the msix index of
|
|
|
|
|
* high iops queues
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @scmd: scsi_cmnd object
|
|
|
|
|
*
|
|
|
|
|
* Returns: msix index of high iops reply queues.
|
|
|
|
|
* i.e. high iops reply queue on which IO request's
|
|
|
|
|
* reply should be posted by the HBA firmware.
|
|
|
|
|
*/
|
|
|
|
|
static inline u8
|
|
|
|
|
_base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,
|
|
|
|
|
struct scsi_cmnd *scmd)
|
|
|
|
|
{
|
|
|
|
|
/**
|
|
|
|
|
* Round robin the IO interrupts among the high iops
|
|
|
|
|
* reply queues in terms of batch count 16 when outstanding
|
|
|
|
|
* IOs on the target device is >=8.
|
|
|
|
|
*/
|
2019-12-26 19:13:32 +08:00
|
|
|
|
if (_base_sdev_nr_inflight_request(scmd->device->request_queue) >
|
2019-05-31 20:14:38 +08:00
|
|
|
|
MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
|
|
|
|
|
return base_mod64((
|
|
|
|
|
atomic64_add_return(1, &ioc->high_iops_outstanding) /
|
|
|
|
|
MPT3SAS_HIGH_IOPS_BATCH_COUNT),
|
|
|
|
|
MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
|
|
|
|
|
|
|
|
|
|
return _base_get_msix_index(ioc, scmd);
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_get_smid - obtain a free smid from internal queue
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @cb_idx: callback index
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: smid (zero is invalid)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
u16
|
|
|
|
|
mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
|
|
|
|
|
{
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
struct request_tracker *request;
|
|
|
|
|
u16 smid;
|
|
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
|
|
|
|
|
if (list_empty(&ioc->internal_free_list)) {
|
|
|
|
|
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: smid not available\n", __func__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
request = list_entry(ioc->internal_free_list.next,
|
|
|
|
|
struct request_tracker, tracker_list);
|
|
|
|
|
request->cb_idx = cb_idx;
|
|
|
|
|
smid = request->smid;
|
|
|
|
|
list_del(&request->tracker_list);
|
|
|
|
|
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
|
|
|
|
|
return smid;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @cb_idx: callback index
|
|
|
|
|
* @scmd: pointer to scsi command object
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: smid (zero is invalid)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
u16
|
|
|
|
|
mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
|
|
|
|
|
struct scsi_cmnd *scmd)
|
|
|
|
|
{
|
2018-01-04 20:57:11 +08:00
|
|
|
|
struct scsiio_tracker *request = scsi_cmd_priv(scmd);
|
|
|
|
|
unsigned int tag = scmd->request->tag;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
u16 smid;
|
|
|
|
|
|
2018-01-04 20:57:11 +08:00
|
|
|
|
smid = tag + 1;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
request->cb_idx = cb_idx;
|
2018-01-04 20:57:11 +08:00
|
|
|
|
request->smid = smid;
|
2019-05-31 20:14:39 +08:00
|
|
|
|
request->scmd = scmd;
|
2018-01-04 20:57:11 +08:00
|
|
|
|
INIT_LIST_HEAD(&request->chain_list);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return smid;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @cb_idx: callback index
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: smid (zero is invalid)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
u16
|
|
|
|
|
mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
|
|
|
|
|
{
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
struct request_tracker *request;
|
|
|
|
|
u16 smid;
|
|
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
|
|
|
|
|
if (list_empty(&ioc->hpr_free_list)) {
|
|
|
|
|
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
request = list_entry(ioc->hpr_free_list.next,
|
|
|
|
|
struct request_tracker, tracker_list);
|
|
|
|
|
request->cb_idx = cb_idx;
|
|
|
|
|
smid = request->smid;
|
|
|
|
|
list_del(&request->tracker_list);
|
|
|
|
|
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
|
|
|
|
|
return smid;
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-04 20:57:03 +08:00
|
|
|
|
static void
|
|
|
|
|
_base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
|
|
|
|
/*
|
|
|
|
|
* See _wait_for_commands_to_complete() call with regards to this code.
|
|
|
|
|
*/
|
|
|
|
|
if (ioc->shost_recovery && ioc->pending_io_count) {
|
2018-06-24 22:03:26 +08:00
|
|
|
|
ioc->pending_io_count = scsi_host_busy(ioc->shost);
|
2018-01-04 20:57:10 +08:00
|
|
|
|
if (ioc->pending_io_count == 0)
|
2018-01-04 20:57:03 +08:00
|
|
|
|
wake_up(&ioc->reset_wq);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-04 20:57:11 +08:00
|
|
|
|
void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
|
|
|
|
|
struct scsiio_tracker *st)
|
|
|
|
|
{
|
|
|
|
|
if (WARN_ON(st->smid == 0))
|
|
|
|
|
return;
|
|
|
|
|
st->cb_idx = 0xFF;
|
|
|
|
|
st->direct_io = 0;
|
2019-05-31 20:14:39 +08:00
|
|
|
|
st->scmd = NULL;
|
2018-04-24 17:28:32 +08:00
|
|
|
|
atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
|
scsi: mpt3sas: Fix calltrace observed while running IO & reset
Below kernel BUG was observed while running IOs with host reset (issued
from application),
mpt3sas_cm0: diag reset: SUCCESS
------------[ cut here ]------------
WARNING: CPU: 12 PID: 4336 at drivers/scsi/mpt3sas/mpt3sas_base.c:3282 mpt3sas_base_clear_st+0x3d/0x40 [mpt3sas]
Modules linked in: macsec tcp_diag udp_diag inet_diag unix_diag af_packet_diag netlink_diag binfmt_misc fuse xt_CHECKSUM ipt_MASQUERADE nf_nat_masquerade_ipv4 tun devlink ip6t_rpfilter ipt_REJECT nf_reject_ipv4 ip6t_REJECT nf_reject_ipv6 xt_conntrack ip_set nfnetlink ebtable_nat ebtable_broute bridge stp llc ip6table_nat nf_conntrack_ipv6 nf_defrag_ipv6 nf_nat_ipv6 ip6table_mangle ip6table_security ip6table_raw iptable_nat nf_conntrack_ipv4 nf_defrag_ipv4 nf_nat_ipv4 nf_nat nf_conntrack iptable_mangle iptable_security iptable_raw ebtable_filter ebtables ip6table_filter ip6_tables iptable_filter sunrpc vfat fat sb_edac intel_powerclamp coretemp intel_rapl iosf_mbi kvm_intel kvm irqbypass crc32_pclmul ghash_clmulni_intel aesni_intel lrw gf128mul glue_helper ablk_helper cryptd iTCO_wdt iTCO_vendor_support
dcdbas pcspkr joydev ipmi_ssif ses enclosure sg ipmi_devintf acpi_pad ipmi_msghandler acpi_power_meter mei_me lpc_ich wmi mei shpchp ip_tables xfs libcrc32c sd_mod crc_t10dif crct10dif_generic ata_generic pata_acpi uas usb_storage mgag200 i2c_algo_bit drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops ttm drm ata_piix mpt3sas libata crct10dif_pclmul crct10dif_common tg3 crc32c_intel i2c_core raid_class ptp scsi_transport_sas pps_core dm_mirror dm_region_hash dm_log dm_mod
CPU: 12 PID: 4336 Comm: python Kdump: loaded Tainted: G W ------------ 3.10.0-875.el7.brdc.x86_64 #1
Hardware name: Dell Inc. PowerEdge R820/0YWR73, BIOS 1.5.0 03/08/2013
Call Trace:
[<ffffffff9cf16583>] dump_stack+0x19/0x1b
[<ffffffff9c891698>] __warn+0xd8/0x100
[<ffffffff9c8917dd>] warn_slowpath_null+0x1d/0x20
[<ffffffffc04f3f4d>] mpt3sas_base_clear_st+0x3d/0x40 [mpt3sas]
[<ffffffffc05047d2>] _scsih_flush_running_cmds+0x92/0xe0 [mpt3sas]
[<ffffffffc05095db>] mpt3sas_scsih_reset_handler+0x43b/0xaf0 [mpt3sas]
[<ffffffff9c894829>] ? vprintk_default+0x29/0x40
[<ffffffff9cf10531>] ? printk+0x60/0x77
[<ffffffffc04f06c8>] ? _base_diag_reset+0x238/0x340 [mpt3sas]
[<ffffffffc04f794d>] mpt3sas_base_hard_reset_handler+0x1ad/0x420 [mpt3sas]
[<ffffffffc05132b9>] _ctl_ioctl_main.isra.12+0x11b9/0x1200 [mpt3sas]
[<ffffffffc068d585>] ? xfs_file_aio_write+0x155/0x1b0 [xfs]
[<ffffffff9ca1a4e3>] ? do_sync_write+0x93/0xe0
[<ffffffffc051337a>] _ctl_ioctl+0x1a/0x20 [mpt3sas]
[<ffffffff9ca2fe90>] do_vfs_ioctl+0x350/0x560
[<ffffffff9ca1dec1>] ? __sb_end_write+0x31/0x60
[<ffffffff9ca30141>] SyS_ioctl+0xa1/0xc0
[<ffffffff9cf28715>] ? system_call_after_swapgs+0xa2/0x146
[<ffffffff9cf287d5>] system_call_fastpath+0x1c/0x21
[<ffffffff9cf28721>] ? system_call_after_swapgs+0xae/0x146
---[ end trace 5dac5b98d89aaa3c ]---
------------[ cut here ]------------
kernel BUG at block/blk-core.c:1476!
invalid opcode: 0000 [#1] SMP
Modules linked in: macsec tcp_diag udp_diag inet_diag unix_diag af_packet_diag netlink_diag binfmt_misc fuse xt_CHECKSUM ipt_MASQUERADE nf_nat_masquerade_ipv4 tun devlink ip6t_rpfilter ipt_REJECT nf_reject_ipv4 ip6t_REJECT nf_reject_ipv6 xt_conntrack ip_set nfnetlink ebtable_nat ebtable_broute bridge stp llc ip6table_nat nf_conntrack_ipv6 nf_defrag_ipv6 nf_nat_ipv6 ip6table_mangle ip6table_security ip6table_raw iptable_nat nf_conntrack_ipv4 nf_defrag_ipv4 nf_nat_ipv4 nf_nat nf_conntrack iptable_mangle iptable_security iptable_raw ebtable_filter ebtables ip6table_filter ip6_tables iptable_filter sunrpc vfat fat sb_edac intel_powerclamp coretemp intel_rapl iosf_mbi kvm_intel kvm irqbypass crc32_pclmul ghash_clmulni_intel aesni_intel lrw gf128mul glue_helper ablk_helper cryptd iTCO_wdt iTCO_vendor_support
dcdbas pcspkr joydev ipmi_ssif ses enclosure sg ipmi_devintf acpi_pad ipmi_msghandler acpi_power_meter mei_me lpc_ich wmi mei shpchp ip_tables xfs libcrc32c sd_mod crc_t10dif crct10dif_generic ata_generic pata_acpi uas usb_storage mgag200 i2c_algo_bit drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops ttm drm ata_piix mpt3sas libata crct10dif_pclmul crct10dif_common tg3 crc32c_intel i2c_core raid_class ptp scsi_transport_sas pps_core dm_mirror dm_region_hash dm_log dm_mod
CPU: 12 PID: 4336 Comm: python Kdump: loaded Tainted: G W ------------ 3.10.0-875.el7.brdc.x86_64 #1
Hardware name: Dell Inc. PowerEdge R820/0YWR73, BIOS 1.5.0 03/08/2013
task: ffff903fc96e0fd0 ti: ffff903fb1eec000 task.ti: ffff903fb1eec000
RIP: 0010:[<ffffffff9cb19ec0>] [<ffffffff9cb19ec0>] blk_requeue_request+0x90/0xa0
RSP: 0018:ffff903c6b783dc0 EFLAGS: 00010087
RAX: ffff903bb67026d0 RBX: ffff903b7d6a6140 RCX: dead000000000200
RDX: ffff903bb67026d0 RSI: ffff903bb6702580 RDI: ffff903bb67026d0
RBP: ffff903c6b783dd8 R08: ffff903bb67026d0 R09: ffffd97e80000000
R10: ffff903c658bac00 R11: 0000000000000000 R12: ffff903bb6702580
R13: ffff903fa9a292f0 R14: 0000000000000246 R15: 0000000000001057
FS: 00007f7026f5b740(0000) GS:ffff903c6b780000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007f298877c004 CR3: 00000000caf36000 CR4: 00000000000607e0
Call Trace:
<IRQ>
[<ffffffff9cca68ff>] __scsi_queue_insert+0xbf/0x110
[<ffffffff9cca79ca>] scsi_io_completion+0x5da/0x6a0
[<ffffffff9cc9ca3c>] scsi_finish_command+0xdc/0x140
[<ffffffff9cca6aa2>] scsi_softirq_done+0x132/0x160
[<ffffffff9cb240c6>] blk_done_softirq+0x96/0xc0
[<ffffffff9c89a905>] __do_softirq+0xf5/0x280
[<ffffffff9cf2bd2c>] call_softirq+0x1c/0x30
[<ffffffff9c82d625>] do_softirq+0x65/0xa0
[<ffffffff9c89ac85>] irq_exit+0x105/0x110
[<ffffffff9cf2d0a8>] smp_apic_timer_interrupt+0x48/0x60
[<ffffffff9cf297f2>] apic_timer_interrupt+0x162/0x170
<EOI>
[<ffffffff9cca5f41>] ? scsi_done+0x21/0x60
[<ffffffff9cb5ac18>] ? delay_tsc+0x38/0x60
[<ffffffff9cb5ab5d>] __const_udelay+0x2d/0x30
[<ffffffffc04effde>] _base_handshake_req_reply_wait+0x8e/0x4a0 [mpt3sas]
[<ffffffffc04f0b13>] _base_get_ioc_facts+0x123/0x590 [mpt3sas]
[<ffffffffc04f06c8>] ? _base_diag_reset+0x238/0x340 [mpt3sas]
[<ffffffffc04f7993>] mpt3sas_base_hard_reset_handler+0x1f3/0x420 [mpt3sas]
[<ffffffffc05132b9>] _ctl_ioctl_main.isra.12+0x11b9/0x1200 [mpt3sas]
[<ffffffffc068d585>] ? xfs_file_aio_write+0x155/0x1b0 [xfs]
[<ffffffff9ca1a4e3>] ? do_sync_write+0x93/0xe0
[<ffffffffc051337a>] _ctl_ioctl+0x1a/0x20 [mpt3sas]
[<ffffffff9ca2fe90>] do_vfs_ioctl+0x350/0x560
[<ffffffff9ca1dec1>] ? __sb_end_write+0x31/0x60
[<ffffffff9ca30141>] SyS_ioctl+0xa1/0xc0
[<ffffffff9cf28715>] ? system_call_after_swapgs+0xa2/0x146
[<ffffffff9cf287d5>] system_call_fastpath+0x1c/0x21
[<ffffffff9cf28721>] ? system_call_after_swapgs+0xae/0x146
Code: 83 c3 10 4c 89 e2 4c 89 ee e8 8d 21 04 00 48 8b 03 48 85 c0 75 e5 41 f6 44 24 4a 10 74 ad 4c 89 e6 4c 89 ef e8 b2 42 00 00 eb a0 <0f> 0b 0f 1f 40 00 66 2e 0f 1f 84 00 00 00 00 00 66 66 66 66 90
RIP [<ffffffff9cb19ec0>] blk_requeue_request+0x90/0xa0
RSP <ffff903c6b783dc0>
As a part of host reset operation, driver will flushout all IOs outstanding
at driver level with "DID_RESET" result. To find which are all commands
outstanding at the driver level, driver loops with smid starting from one
to HBA queue depth and calls mpt3sas_scsih_scsi_lookup_get() to get scmd as
shown below
for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (!scmd)
continue;
But in mpt3sas_scsih_scsi_lookup_get() function, driver returns some scsi
cmnds which are not outstanding at the driver level (possibly request is
constructed at block layer since QUEUE_FLAG_QUIESCED is not set. Even if
driver uses scsi_block_requests and scsi_unblock_requests, issue still
persists as they will be just blocking further IO from scsi layer and not
from block layer) and these commands are flushed with DID_RESET host bytes
thus resulting into above kernel BUG.
This issue got introduced by commit dbec4c9040ed ("scsi: mpt3sas: lockless
command submission").
To fix this issue, we have modified the mpt3sas_scsih_scsi_lookup_get() to
check for smid equals to zero (note: whenever any scsi cmnd is processing
at the driver level then smid for that scsi cmnd will be non-zero, always
it starts from one) before it returns the scmd pointer to the caller. If
smid is zero then this function returns scmd pointer as NULL and driver
won't flushout those scsi cmnds at driver level with DID_RESET host byte
thus this issue will not be observed.
[mkp: amended with updated fix from Sreekanth]
Signed-off-by: Sreekanth Reddy <sreekanth.reddy@broadcom.com>
Fixes: dbec4c9040ed ("scsi: mpt3sas: lockless command submission")
Cc: stable@vger.kernel.org # v4.16+
Reviewed-by: Tomas Henzl <thenzl@redhat.com>
Reviewed-by: Bart Van Assche <bart.vanassche@wdc.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-07-13 00:53:29 +08:00
|
|
|
|
st->smid = 0;
|
2018-01-04 20:57:11 +08:00
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_free_smid - put smid back on free_list
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @smid: system request message index
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
|
|
|
|
|
{
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
if (smid < ioc->hi_priority_smid) {
|
2018-01-04 20:57:11 +08:00
|
|
|
|
struct scsiio_tracker *st;
|
2019-03-04 20:26:35 +08:00
|
|
|
|
void *request;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2018-01-04 20:57:11 +08:00
|
|
|
|
st = _get_st_from_smid(ioc, smid);
|
|
|
|
|
if (!st) {
|
|
|
|
|
_base_recovery_check(ioc);
|
|
|
|
|
return;
|
|
|
|
|
}
|
2019-03-04 20:26:35 +08:00
|
|
|
|
|
|
|
|
|
/* Clear MPI request frame */
|
|
|
|
|
request = mpt3sas_base_get_msg_frame(ioc, smid);
|
|
|
|
|
memset(request, 0, ioc->request_sz);
|
|
|
|
|
|
2018-01-04 20:57:11 +08:00
|
|
|
|
mpt3sas_base_clear_st(ioc, st);
|
2018-01-04 20:57:03 +08:00
|
|
|
|
_base_recovery_check(ioc);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return;
|
2018-01-04 20:57:11 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
|
|
|
|
|
if (smid < ioc->internal_smid) {
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/* hi-priority */
|
|
|
|
|
i = smid - ioc->hi_priority_smid;
|
|
|
|
|
ioc->hpr_lookup[i].cb_idx = 0xFF;
|
|
|
|
|
list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
|
|
|
|
|
} else if (smid <= ioc->hba_queue_depth) {
|
|
|
|
|
/* internal queue */
|
|
|
|
|
i = smid - ioc->internal_smid;
|
|
|
|
|
ioc->internal_lookup[i].cb_idx = 0xFF;
|
|
|
|
|
list_add(&ioc->internal_lookup[i].tracker_list,
|
|
|
|
|
&ioc->internal_free_list);
|
|
|
|
|
}
|
|
|
|
|
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-07 18:51:49 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_mpi_ep_writeq - 32 bit write to MMIO
|
|
|
|
|
* @b: data payload
|
|
|
|
|
* @addr: address in MMIO space
|
|
|
|
|
* @writeq_lock: spin lock
|
|
|
|
|
*
|
|
|
|
|
* This special handling for MPI EP to take care of 32 bit
|
|
|
|
|
* environment where its not quarenteed to send the entire word
|
|
|
|
|
* in one transfer.
|
|
|
|
|
*/
|
|
|
|
|
static inline void
|
|
|
|
|
_base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
|
|
|
|
|
spinlock_t *writeq_lock)
|
|
|
|
|
{
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
|
|
spin_lock_irqsave(writeq_lock, flags);
|
2018-07-31 13:36:36 +08:00
|
|
|
|
__raw_writel((u32)(b), addr);
|
|
|
|
|
__raw_writel((u32)(b >> 32), (addr + 4));
|
2018-02-07 18:51:49 +08:00
|
|
|
|
spin_unlock_irqrestore(writeq_lock, flags);
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_writeq - 64 bit write to MMIO
|
|
|
|
|
* @b: data payload
|
|
|
|
|
* @addr: address in MMIO space
|
|
|
|
|
* @writeq_lock: spin lock
|
|
|
|
|
*
|
|
|
|
|
* Glue for handling an atomic 64 bit word to MMIO. This special handling takes
|
|
|
|
|
* care of 32 bit environment where its not quarenteed to send the entire word
|
|
|
|
|
* in one transfer.
|
|
|
|
|
*/
|
|
|
|
|
#if defined(writeq) && defined(CONFIG_64BIT)
|
|
|
|
|
static inline void
|
|
|
|
|
_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
|
|
|
|
|
{
|
2018-12-16 20:08:21 +08:00
|
|
|
|
wmb();
|
2018-07-31 13:36:36 +08:00
|
|
|
|
__raw_writeq(b, addr);
|
2018-12-16 20:08:21 +08:00
|
|
|
|
barrier();
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
#else
|
|
|
|
|
static inline void
|
|
|
|
|
_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
|
|
|
|
|
{
|
2018-02-07 18:51:49 +08:00
|
|
|
|
_base_mpi_ep_writeq(b, addr, writeq_lock);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2019-05-31 20:14:39 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_set_and_get_msix_index - get the msix index and assign to msix_io
|
|
|
|
|
* variable of scsi tracker
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @smid: system request message index
|
|
|
|
|
*
|
|
|
|
|
* returns msix index.
|
|
|
|
|
*/
|
|
|
|
|
static u8
|
|
|
|
|
_base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid)
|
|
|
|
|
{
|
|
|
|
|
struct scsiio_tracker *st = NULL;
|
|
|
|
|
|
|
|
|
|
if (smid < ioc->hi_priority_smid)
|
|
|
|
|
st = _get_st_from_smid(ioc, smid);
|
|
|
|
|
|
|
|
|
|
if (st == NULL)
|
|
|
|
|
return _base_get_msix_index(ioc, NULL);
|
|
|
|
|
|
|
|
|
|
st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd);
|
|
|
|
|
return st->msix_io;
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-07 18:51:49 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @smid: system request message index
|
|
|
|
|
* @handle: device handle
|
|
|
|
|
*/
|
|
|
|
|
static void
|
2019-05-31 20:14:34 +08:00
|
|
|
|
_base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc,
|
|
|
|
|
u16 smid, u16 handle)
|
2018-02-07 18:51:49 +08:00
|
|
|
|
{
|
|
|
|
|
Mpi2RequestDescriptorUnion_t descriptor;
|
|
|
|
|
u64 *request = (u64 *)&descriptor;
|
|
|
|
|
void *mpi_req_iomem;
|
|
|
|
|
__le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
|
|
|
|
|
|
|
|
|
|
_clone_sg_entries(ioc, (void *) mfp, smid);
|
2018-04-24 17:28:30 +08:00
|
|
|
|
mpi_req_iomem = (void __force *)ioc->chip +
|
2018-02-07 18:51:49 +08:00
|
|
|
|
MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
|
|
|
|
|
_base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
|
|
|
|
|
ioc->request_sz);
|
|
|
|
|
descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
|
2019-05-31 20:14:39 +08:00
|
|
|
|
descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
|
2018-02-07 18:51:49 +08:00
|
|
|
|
descriptor.SCSIIO.SMID = cpu_to_le16(smid);
|
|
|
|
|
descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
|
|
|
|
|
descriptor.SCSIIO.LMID = 0;
|
|
|
|
|
_base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
|
|
|
|
|
&ioc->scsi_lookup_lock);
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/**
|
2016-10-26 16:04:40 +08:00
|
|
|
|
* _base_put_smid_scsi_io - send SCSI_IO request to firmware
|
2012-11-30 10:14:21 +08:00
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @smid: system request message index
|
|
|
|
|
* @handle: device handle
|
|
|
|
|
*/
|
2016-10-26 16:04:40 +08:00
|
|
|
|
static void
|
|
|
|
|
_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
{
|
|
|
|
|
Mpi2RequestDescriptorUnion_t descriptor;
|
|
|
|
|
u64 *request = (u64 *)&descriptor;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
|
2019-05-31 20:14:39 +08:00
|
|
|
|
descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
descriptor.SCSIIO.SMID = cpu_to_le16(smid);
|
|
|
|
|
descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
|
|
|
|
|
descriptor.SCSIIO.LMID = 0;
|
|
|
|
|
_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
|
|
|
|
|
&ioc->scsi_lookup_lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
2019-05-31 20:14:34 +08:00
|
|
|
|
* _base_put_smid_fast_path - send fast path request to firmware
|
2012-11-30 10:14:21 +08:00
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @smid: system request message index
|
|
|
|
|
* @handle: device handle
|
|
|
|
|
*/
|
2019-05-31 20:14:34 +08:00
|
|
|
|
static void
|
|
|
|
|
_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
|
2012-11-30 10:14:21 +08:00
|
|
|
|
u16 handle)
|
|
|
|
|
{
|
|
|
|
|
Mpi2RequestDescriptorUnion_t descriptor;
|
|
|
|
|
u64 *request = (u64 *)&descriptor;
|
|
|
|
|
|
|
|
|
|
descriptor.SCSIIO.RequestFlags =
|
|
|
|
|
MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
|
2019-05-31 20:14:39 +08:00
|
|
|
|
descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
descriptor.SCSIIO.SMID = cpu_to_le16(smid);
|
|
|
|
|
descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
|
|
|
|
|
descriptor.SCSIIO.LMID = 0;
|
|
|
|
|
_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
|
|
|
|
|
&ioc->scsi_lookup_lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
2019-05-31 20:14:34 +08:00
|
|
|
|
* _base_put_smid_hi_priority - send Task Management request to firmware
|
2012-11-30 10:14:21 +08:00
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @smid: system request message index
|
2016-01-28 14:37:06 +08:00
|
|
|
|
* @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
2019-05-31 20:14:34 +08:00
|
|
|
|
static void
|
|
|
|
|
_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
|
2016-01-28 14:37:06 +08:00
|
|
|
|
u16 msix_task)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
{
|
|
|
|
|
Mpi2RequestDescriptorUnion_t descriptor;
|
2018-02-07 18:51:49 +08:00
|
|
|
|
void *mpi_req_iomem;
|
|
|
|
|
u64 *request;
|
|
|
|
|
|
|
|
|
|
if (ioc->is_mcpu_endpoint) {
|
|
|
|
|
__le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
|
|
|
|
|
|
|
|
|
|
/* TBD 256 is offset within sys register. */
|
2018-04-24 17:28:30 +08:00
|
|
|
|
mpi_req_iomem = (void __force *)ioc->chip
|
|
|
|
|
+ MPI_FRAME_START_OFFSET
|
2018-02-07 18:51:49 +08:00
|
|
|
|
+ (smid * ioc->request_sz);
|
|
|
|
|
_base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
|
|
|
|
|
ioc->request_sz);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
request = (u64 *)&descriptor;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
descriptor.HighPriority.RequestFlags =
|
|
|
|
|
MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
|
2016-01-28 14:37:06 +08:00
|
|
|
|
descriptor.HighPriority.MSIxIndex = msix_task;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
descriptor.HighPriority.SMID = cpu_to_le16(smid);
|
|
|
|
|
descriptor.HighPriority.LMID = 0;
|
|
|
|
|
descriptor.HighPriority.Reserved1 = 0;
|
2018-02-07 18:51:49 +08:00
|
|
|
|
if (ioc->is_mcpu_endpoint)
|
|
|
|
|
_base_mpi_ep_writeq(*request,
|
|
|
|
|
&ioc->chip->RequestDescriptorPostLow,
|
|
|
|
|
&ioc->scsi_lookup_lock);
|
|
|
|
|
else
|
|
|
|
|
_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
|
|
|
|
|
&ioc->scsi_lookup_lock);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
|
2017-10-31 20:32:29 +08:00
|
|
|
|
/**
|
2018-02-14 18:16:37 +08:00
|
|
|
|
* mpt3sas_base_put_smid_nvme_encap - send NVMe encapsulated request to
|
2017-10-31 20:32:29 +08:00
|
|
|
|
* firmware
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @smid: system request message index
|
|
|
|
|
*/
|
2018-02-14 18:16:37 +08:00
|
|
|
|
void
|
|
|
|
|
mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
|
2017-10-31 20:32:29 +08:00
|
|
|
|
{
|
|
|
|
|
Mpi2RequestDescriptorUnion_t descriptor;
|
|
|
|
|
u64 *request = (u64 *)&descriptor;
|
|
|
|
|
|
|
|
|
|
descriptor.Default.RequestFlags =
|
|
|
|
|
MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
|
2019-05-31 20:14:39 +08:00
|
|
|
|
descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
|
2017-10-31 20:32:29 +08:00
|
|
|
|
descriptor.Default.SMID = cpu_to_le16(smid);
|
|
|
|
|
descriptor.Default.LMID = 0;
|
|
|
|
|
descriptor.Default.DescriptorTypeDependent = 0;
|
|
|
|
|
_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
|
|
|
|
|
&ioc->scsi_lookup_lock);
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/**
|
2019-05-31 20:14:34 +08:00
|
|
|
|
* _base_put_smid_default - Default, primarily used for config pages
|
2012-11-30 10:14:21 +08:00
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @smid: system request message index
|
|
|
|
|
*/
|
2019-05-31 20:14:34 +08:00
|
|
|
|
static void
|
|
|
|
|
_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
{
|
|
|
|
|
Mpi2RequestDescriptorUnion_t descriptor;
|
2018-02-07 18:51:49 +08:00
|
|
|
|
void *mpi_req_iomem;
|
|
|
|
|
u64 *request;
|
|
|
|
|
|
|
|
|
|
if (ioc->is_mcpu_endpoint) {
|
|
|
|
|
__le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
|
|
|
|
|
|
|
|
|
|
_clone_sg_entries(ioc, (void *) mfp, smid);
|
|
|
|
|
/* TBD 256 is offset within sys register */
|
2018-04-24 17:28:30 +08:00
|
|
|
|
mpi_req_iomem = (void __force *)ioc->chip +
|
2018-02-07 18:51:49 +08:00
|
|
|
|
MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
|
|
|
|
|
_base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
|
|
|
|
|
ioc->request_sz);
|
|
|
|
|
}
|
|
|
|
|
request = (u64 *)&descriptor;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
|
2019-05-31 20:14:39 +08:00
|
|
|
|
descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
descriptor.Default.SMID = cpu_to_le16(smid);
|
|
|
|
|
descriptor.Default.LMID = 0;
|
|
|
|
|
descriptor.Default.DescriptorTypeDependent = 0;
|
2018-02-07 18:51:49 +08:00
|
|
|
|
if (ioc->is_mcpu_endpoint)
|
|
|
|
|
_base_mpi_ep_writeq(*request,
|
|
|
|
|
&ioc->chip->RequestDescriptorPostLow,
|
|
|
|
|
&ioc->scsi_lookup_lock);
|
|
|
|
|
else
|
|
|
|
|
_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
|
|
|
|
|
&ioc->scsi_lookup_lock);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
|
2019-05-31 20:14:35 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using
|
|
|
|
|
* Atomic Request Descriptor
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @smid: system request message index
|
|
|
|
|
* @handle: device handle, unused in this function, for function type match
|
|
|
|
|
*
|
|
|
|
|
* Return nothing.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
|
|
|
|
|
u16 handle)
|
|
|
|
|
{
|
|
|
|
|
Mpi26AtomicRequestDescriptor_t descriptor;
|
|
|
|
|
u32 *request = (u32 *)&descriptor;
|
|
|
|
|
|
|
|
|
|
descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
|
2019-05-31 20:14:39 +08:00
|
|
|
|
descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
|
2019-05-31 20:14:35 +08:00
|
|
|
|
descriptor.SMID = cpu_to_le16(smid);
|
|
|
|
|
|
|
|
|
|
writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_put_smid_fast_path_atomic - send fast path request to firmware
|
|
|
|
|
* using Atomic Request Descriptor
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @smid: system request message index
|
|
|
|
|
* @handle: device handle, unused in this function, for function type match
|
|
|
|
|
* Return nothing
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
|
|
|
|
|
u16 handle)
|
|
|
|
|
{
|
|
|
|
|
Mpi26AtomicRequestDescriptor_t descriptor;
|
|
|
|
|
u32 *request = (u32 *)&descriptor;
|
|
|
|
|
|
|
|
|
|
descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
|
2019-05-31 20:14:39 +08:00
|
|
|
|
descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
|
2019-05-31 20:14:35 +08:00
|
|
|
|
descriptor.SMID = cpu_to_le16(smid);
|
|
|
|
|
|
|
|
|
|
writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_put_smid_hi_priority_atomic - send Task Management request to
|
|
|
|
|
* firmware using Atomic Request Descriptor
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @smid: system request message index
|
|
|
|
|
* @msix_task: msix_task will be same as msix of IO incase of task abort else 0
|
|
|
|
|
*
|
|
|
|
|
* Return nothing.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
|
|
|
|
|
u16 msix_task)
|
|
|
|
|
{
|
|
|
|
|
Mpi26AtomicRequestDescriptor_t descriptor;
|
|
|
|
|
u32 *request = (u32 *)&descriptor;
|
|
|
|
|
|
|
|
|
|
descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
|
|
|
|
|
descriptor.MSIxIndex = msix_task;
|
|
|
|
|
descriptor.SMID = cpu_to_le16(smid);
|
|
|
|
|
|
|
|
|
|
writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_put_smid_default - Default, primarily used for config pages
|
|
|
|
|
* use Atomic Request Descriptor
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @smid: system request message index
|
|
|
|
|
*
|
|
|
|
|
* Return nothing.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
|
|
|
|
|
{
|
|
|
|
|
Mpi26AtomicRequestDescriptor_t descriptor;
|
|
|
|
|
u32 *request = (u32 *)&descriptor;
|
|
|
|
|
|
|
|
|
|
descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
|
2019-05-31 20:14:39 +08:00
|
|
|
|
descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
|
2019-05-31 20:14:35 +08:00
|
|
|
|
descriptor.SMID = cpu_to_le16(smid);
|
|
|
|
|
|
|
|
|
|
writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
|
|
|
|
|
}
|
|
|
|
|
|
2014-09-12 18:05:30 +08:00
|
|
|
|
/**
|
2015-11-11 20:00:32 +08:00
|
|
|
|
* _base_display_OEMs_branding - Display branding string
|
2014-09-12 18:05:30 +08:00
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*/
|
|
|
|
|
static void
|
2015-11-11 20:00:32 +08:00
|
|
|
|
_base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
|
2014-09-12 18:05:30 +08:00
|
|
|
|
{
|
|
|
|
|
if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
|
|
|
|
|
return;
|
|
|
|
|
|
2015-11-11 20:00:32 +08:00
|
|
|
|
switch (ioc->pdev->subsystem_vendor) {
|
|
|
|
|
case PCI_VENDOR_ID_INTEL:
|
|
|
|
|
switch (ioc->pdev->device) {
|
|
|
|
|
case MPI2_MFGPAGE_DEVID_SAS2008:
|
|
|
|
|
switch (ioc->pdev->subsystem_device) {
|
|
|
|
|
case MPT2SAS_INTEL_RMS2LL080_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT2SAS_INTEL_RMS2LL080_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPT2SAS_INTEL_RMS2LL040_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT2SAS_INTEL_RMS2LL040_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPT2SAS_INTEL_SSD910_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT2SAS_INTEL_SSD910_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
default:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
|
|
|
|
|
ioc->pdev->subsystem_device);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
}
|
2019-02-16 05:54:14 +08:00
|
|
|
|
break;
|
2015-11-11 20:00:32 +08:00
|
|
|
|
case MPI2_MFGPAGE_DEVID_SAS2308_2:
|
|
|
|
|
switch (ioc->pdev->subsystem_device) {
|
|
|
|
|
case MPT2SAS_INTEL_RS25GB008_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT2SAS_INTEL_RS25GB008_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPT2SAS_INTEL_RMS25JB080_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT2SAS_INTEL_RMS25JB080_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPT2SAS_INTEL_RMS25JB040_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT2SAS_INTEL_RMS25JB040_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPT2SAS_INTEL_RMS25KB080_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT2SAS_INTEL_RMS25KB080_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPT2SAS_INTEL_RMS25KB040_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT2SAS_INTEL_RMS25KB040_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPT2SAS_INTEL_RMS25LB040_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT2SAS_INTEL_RMS25LB040_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPT2SAS_INTEL_RMS25LB080_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT2SAS_INTEL_RMS25LB080_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
default:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
|
|
|
|
|
ioc->pdev->subsystem_device);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
}
|
2019-02-16 05:54:14 +08:00
|
|
|
|
break;
|
2015-11-11 20:00:32 +08:00
|
|
|
|
case MPI25_MFGPAGE_DEVID_SAS3008:
|
|
|
|
|
switch (ioc->pdev->subsystem_device) {
|
|
|
|
|
case MPT3SAS_INTEL_RMS3JC080_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT3SAS_INTEL_RMS3JC080_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case MPT3SAS_INTEL_RS3GC008_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT3SAS_INTEL_RS3GC008_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPT3SAS_INTEL_RS3FC044_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT3SAS_INTEL_RS3FC044_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPT3SAS_INTEL_RS3UC080_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT3SAS_INTEL_RS3UC080_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
default:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
|
|
|
|
|
ioc->pdev->subsystem_device);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
}
|
2014-09-12 18:05:30 +08:00
|
|
|
|
break;
|
|
|
|
|
default:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
|
|
|
|
|
ioc->pdev->subsystem_device);
|
2014-09-12 18:05:30 +08:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
break;
|
2015-11-11 20:00:32 +08:00
|
|
|
|
case PCI_VENDOR_ID_DELL:
|
|
|
|
|
switch (ioc->pdev->device) {
|
|
|
|
|
case MPI2_MFGPAGE_DEVID_SAS2008:
|
|
|
|
|
switch (ioc->pdev->subsystem_device) {
|
|
|
|
|
case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPT2SAS_DELL_PERC_H200_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT2SAS_DELL_PERC_H200_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPT2SAS_DELL_6GBPS_SAS_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT2SAS_DELL_6GBPS_SAS_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
default:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
|
|
|
|
|
ioc->pdev->subsystem_device);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case MPI25_MFGPAGE_DEVID_SAS3008:
|
|
|
|
|
switch (ioc->pdev->subsystem_device) {
|
|
|
|
|
case MPT3SAS_DELL_12G_HBA_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT3SAS_DELL_12G_HBA_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
default:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
|
|
|
|
|
ioc->pdev->subsystem_device);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
}
|
2015-06-30 14:54:56 +08:00
|
|
|
|
break;
|
|
|
|
|
default:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n",
|
|
|
|
|
ioc->pdev->subsystem_device);
|
2015-06-30 14:54:56 +08:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
break;
|
2015-11-11 20:00:32 +08:00
|
|
|
|
case PCI_VENDOR_ID_CISCO:
|
|
|
|
|
switch (ioc->pdev->device) {
|
|
|
|
|
case MPI25_MFGPAGE_DEVID_SAS3008:
|
|
|
|
|
switch (ioc->pdev->subsystem_device) {
|
|
|
|
|
case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
default:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
|
|
|
|
|
ioc->pdev->subsystem_device);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
}
|
2015-06-30 14:55:02 +08:00
|
|
|
|
break;
|
2015-11-11 20:00:32 +08:00
|
|
|
|
case MPI25_MFGPAGE_DEVID_SAS3108_1:
|
|
|
|
|
switch (ioc->pdev->subsystem_device) {
|
|
|
|
|
case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
default:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
|
|
|
|
|
ioc->pdev->subsystem_device);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
}
|
2015-06-30 14:54:57 +08:00
|
|
|
|
break;
|
|
|
|
|
default:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n",
|
|
|
|
|
ioc->pdev->subsystem_device);
|
2015-06-30 14:54:57 +08:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
break;
|
2015-11-11 20:00:32 +08:00
|
|
|
|
case MPT2SAS_HP_3PAR_SSVID:
|
|
|
|
|
switch (ioc->pdev->device) {
|
|
|
|
|
case MPI2_MFGPAGE_DEVID_SAS2004:
|
|
|
|
|
switch (ioc->pdev->subsystem_device) {
|
|
|
|
|
case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
default:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
|
|
|
|
|
ioc->pdev->subsystem_device);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
}
|
2019-02-16 05:54:14 +08:00
|
|
|
|
break;
|
2015-11-11 20:00:32 +08:00
|
|
|
|
case MPI2_MFGPAGE_DEVID_SAS2308_2:
|
|
|
|
|
switch (ioc->pdev->subsystem_device) {
|
|
|
|
|
case MPT2SAS_HP_2_4_INTERNAL_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT2SAS_HP_2_4_INTERNAL_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s\n",
|
|
|
|
|
MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
default:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
|
|
|
|
|
ioc->pdev->subsystem_device);
|
2015-11-11 20:00:32 +08:00
|
|
|
|
break;
|
|
|
|
|
}
|
2019-02-16 05:54:14 +08:00
|
|
|
|
break;
|
2015-06-30 14:55:02 +08:00
|
|
|
|
default:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n",
|
|
|
|
|
ioc->pdev->subsystem_device);
|
2015-06-30 14:55:02 +08:00
|
|
|
|
break;
|
|
|
|
|
}
|
2015-06-30 14:54:57 +08:00
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-06-30 14:54:56 +08:00
|
|
|
|
|
2018-04-24 17:28:39 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_display_fwpkg_version - sends FWUpload request to pull FWPkg
|
|
|
|
|
* version from FW Image Header.
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 for success, non-zero for failure.
|
2018-04-24 17:28:39 +08:00
|
|
|
|
*/
|
|
|
|
|
static int
|
|
|
|
|
_base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
2019-09-13 21:04:47 +08:00
|
|
|
|
Mpi2FWImageHeader_t *fw_img_hdr;
|
|
|
|
|
Mpi26ComponentImageHeader_t *cmp_img_hdr;
|
2018-04-24 17:28:39 +08:00
|
|
|
|
Mpi25FWUploadRequest_t *mpi_request;
|
|
|
|
|
Mpi2FWUploadReply_t mpi_reply;
|
|
|
|
|
int r = 0;
|
2019-09-13 21:04:47 +08:00
|
|
|
|
u32 package_version = 0;
|
2018-04-24 17:28:39 +08:00
|
|
|
|
void *fwpkg_data = NULL;
|
|
|
|
|
dma_addr_t fwpkg_data_dma;
|
|
|
|
|
u16 smid, ioc_status;
|
|
|
|
|
size_t data_length;
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
|
2018-04-24 17:28:39 +08:00
|
|
|
|
|
|
|
|
|
if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: internal command already in use\n", __func__);
|
2018-04-24 17:28:39 +08:00
|
|
|
|
return -EAGAIN;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
data_length = sizeof(Mpi2FWImageHeader_t);
|
2018-10-11 15:35:25 +08:00
|
|
|
|
fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
|
|
|
|
|
&fwpkg_data_dma, GFP_KERNEL);
|
2018-04-24 17:28:39 +08:00
|
|
|
|
if (!fwpkg_data) {
|
2019-12-26 19:13:30 +08:00
|
|
|
|
ioc_err(ioc,
|
|
|
|
|
"Memory allocation for fwpkg data failed at %s:%d/%s()!\n",
|
2018-09-17 23:01:09 +08:00
|
|
|
|
__FILE__, __LINE__, __func__);
|
2018-04-24 17:28:39 +08:00
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
|
|
|
|
|
if (!smid) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
|
2018-04-24 17:28:39 +08:00
|
|
|
|
r = -EAGAIN;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ioc->base_cmds.status = MPT3_CMD_PENDING;
|
|
|
|
|
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
|
|
|
|
|
ioc->base_cmds.smid = smid;
|
|
|
|
|
memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t));
|
|
|
|
|
mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD;
|
|
|
|
|
mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH;
|
|
|
|
|
mpi_request->ImageSize = cpu_to_le32(data_length);
|
|
|
|
|
ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
|
|
|
|
|
data_length);
|
|
|
|
|
init_completion(&ioc->base_cmds.done);
|
2019-05-31 20:14:34 +08:00
|
|
|
|
ioc->put_smid_default(ioc, smid);
|
2018-04-24 17:28:39 +08:00
|
|
|
|
/* Wait for 15 seconds */
|
|
|
|
|
wait_for_completion_timeout(&ioc->base_cmds.done,
|
|
|
|
|
FW_IMG_HDR_READ_TIMEOUT*HZ);
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s: complete\n", __func__);
|
2018-04-24 17:28:39 +08:00
|
|
|
|
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: timeout\n", __func__);
|
2018-04-24 17:28:39 +08:00
|
|
|
|
_debug_dump_mf(mpi_request,
|
|
|
|
|
sizeof(Mpi25FWUploadRequest_t)/4);
|
|
|
|
|
r = -ETIME;
|
|
|
|
|
} else {
|
|
|
|
|
memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t));
|
|
|
|
|
if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
|
|
|
|
|
memcpy(&mpi_reply, ioc->base_cmds.reply,
|
|
|
|
|
sizeof(Mpi2FWUploadReply_t));
|
|
|
|
|
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
|
|
|
|
|
MPI2_IOCSTATUS_MASK;
|
|
|
|
|
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
|
2019-09-13 21:04:47 +08:00
|
|
|
|
fw_img_hdr = (Mpi2FWImageHeader_t *)fwpkg_data;
|
|
|
|
|
if (le32_to_cpu(fw_img_hdr->Signature) ==
|
|
|
|
|
MPI26_IMAGE_HEADER_SIGNATURE0_MPI26) {
|
|
|
|
|
cmp_img_hdr =
|
|
|
|
|
(Mpi26ComponentImageHeader_t *)
|
|
|
|
|
(fwpkg_data);
|
|
|
|
|
package_version =
|
|
|
|
|
le32_to_cpu(
|
|
|
|
|
cmp_img_hdr->ApplicationSpecific);
|
|
|
|
|
} else
|
|
|
|
|
package_version =
|
|
|
|
|
le32_to_cpu(
|
|
|
|
|
fw_img_hdr->PackageVersion.Word);
|
|
|
|
|
if (package_version)
|
|
|
|
|
ioc_info(ioc,
|
|
|
|
|
"FW Package Ver(%02d.%02d.%02d.%02d)\n",
|
|
|
|
|
((package_version) & 0xFF000000) >> 24,
|
|
|
|
|
((package_version) & 0x00FF0000) >> 16,
|
|
|
|
|
((package_version) & 0x0000FF00) >> 8,
|
|
|
|
|
(package_version) & 0x000000FF);
|
2018-04-24 17:28:39 +08:00
|
|
|
|
} else {
|
|
|
|
|
_debug_dump_mf(&mpi_reply,
|
|
|
|
|
sizeof(Mpi2FWUploadReply_t)/4);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
ioc->base_cmds.status = MPT3_CMD_NOT_USED;
|
|
|
|
|
out:
|
|
|
|
|
if (fwpkg_data)
|
2018-10-11 15:35:25 +08:00
|
|
|
|
dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
|
2018-04-24 17:28:39 +08:00
|
|
|
|
fwpkg_data_dma);
|
|
|
|
|
return r;
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_display_ioc_capabilities - Disply IOC's capabilities.
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
|
|
|
|
int i = 0;
|
|
|
|
|
char desc[16];
|
|
|
|
|
u32 iounit_pg1_flags;
|
|
|
|
|
u32 bios_version;
|
|
|
|
|
|
|
|
|
|
bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
|
|
|
|
|
strncpy(desc, ioc->manu_pg0.ChipName, 16);
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
|
|
|
|
|
desc,
|
|
|
|
|
(ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
|
|
|
|
|
(ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
|
|
|
|
|
(ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
|
|
|
|
|
ioc->facts.FWVersion.Word & 0x000000FF,
|
|
|
|
|
ioc->pdev->revision,
|
|
|
|
|
(bios_version & 0xFF000000) >> 24,
|
|
|
|
|
(bios_version & 0x00FF0000) >> 16,
|
|
|
|
|
(bios_version & 0x0000FF00) >> 8,
|
|
|
|
|
bios_version & 0x000000FF);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2015-11-11 20:00:32 +08:00
|
|
|
|
_base_display_OEMs_branding(ioc);
|
2014-09-12 18:05:30 +08:00
|
|
|
|
|
2017-10-31 20:32:28 +08:00
|
|
|
|
if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
|
|
|
|
|
pr_info("%sNVMe", i ? "," : "");
|
|
|
|
|
i++;
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "Protocol=(");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
pr_cont("Initiator");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
i++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
pr_cont("%sTarget", i ? "," : "");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
i++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
i = 0;
|
2018-09-17 23:01:09 +08:00
|
|
|
|
pr_cont("), Capabilities=(");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2015-11-11 20:00:28 +08:00
|
|
|
|
if (!ioc->hide_ir_msg) {
|
|
|
|
|
if (ioc->facts.IOCCapabilities &
|
2012-11-30 10:14:21 +08:00
|
|
|
|
MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
pr_cont("Raid");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
i++;
|
2015-11-11 20:00:28 +08:00
|
|
|
|
}
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
pr_cont("%sTLR", i ? "," : "");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
i++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
pr_cont("%sMulticast", i ? "," : "");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
i++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ioc->facts.IOCCapabilities &
|
|
|
|
|
MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
pr_cont("%sBIDI Target", i ? "," : "");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
i++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
pr_cont("%sEEDP", i ? "," : "");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
i++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ioc->facts.IOCCapabilities &
|
|
|
|
|
MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
pr_cont("%sSnapshot Buffer", i ? "," : "");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
i++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ioc->facts.IOCCapabilities &
|
|
|
|
|
MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
pr_cont("%sDiag Trace Buffer", i ? "," : "");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
i++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ioc->facts.IOCCapabilities &
|
|
|
|
|
MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
pr_cont("%sDiag Extended Buffer", i ? "," : "");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
i++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ioc->facts.IOCCapabilities &
|
|
|
|
|
MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
pr_cont("%sTask Set Full", i ? "," : "");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
i++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
|
|
|
|
|
if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
pr_cont("%sNCQ", i ? "," : "");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
i++;
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
pr_cont(")\n");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_update_missing_delay - change the missing delay timers
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @device_missing_delay: amount of time till device is reported missing
|
|
|
|
|
* @io_missing_delay: interval IO is returned when there is a missing device
|
|
|
|
|
*
|
|
|
|
|
* Passed on the command line, this function will modify the device missing
|
|
|
|
|
* delay, as well as the io missing delay. This should be called at driver
|
|
|
|
|
* load time.
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
|
|
|
|
|
u16 device_missing_delay, u8 io_missing_delay)
|
|
|
|
|
{
|
|
|
|
|
u16 dmd, dmd_new, dmd_orignal;
|
|
|
|
|
u8 io_missing_delay_original;
|
|
|
|
|
u16 sz;
|
|
|
|
|
Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
|
|
|
|
|
Mpi2ConfigReply_t mpi_reply;
|
|
|
|
|
u8 num_phys = 0;
|
|
|
|
|
u16 ioc_status;
|
|
|
|
|
|
|
|
|
|
mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
|
|
|
|
|
if (!num_phys)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
|
|
|
|
|
sizeof(Mpi2SasIOUnit1PhyData_t));
|
|
|
|
|
sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
|
|
|
|
|
if (!sas_iounit_pg1) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "failure at %s:%d/%s()!\n",
|
|
|
|
|
__FILE__, __LINE__, __func__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
|
|
|
|
|
sas_iounit_pg1, sz))) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "failure at %s:%d/%s()!\n",
|
|
|
|
|
__FILE__, __LINE__, __func__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
|
|
|
|
|
MPI2_IOCSTATUS_MASK;
|
|
|
|
|
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "failure at %s:%d/%s()!\n",
|
|
|
|
|
__FILE__, __LINE__, __func__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* device missing delay */
|
|
|
|
|
dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
|
|
|
|
|
if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
|
|
|
|
|
dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
|
|
|
|
|
else
|
|
|
|
|
dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
|
|
|
|
|
dmd_orignal = dmd;
|
|
|
|
|
if (device_missing_delay > 0x7F) {
|
|
|
|
|
dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
|
|
|
|
|
device_missing_delay;
|
|
|
|
|
dmd = dmd / 16;
|
|
|
|
|
dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
|
|
|
|
|
} else
|
|
|
|
|
dmd = device_missing_delay;
|
|
|
|
|
sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
|
|
|
|
|
|
|
|
|
|
/* io missing delay */
|
|
|
|
|
io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
|
|
|
|
|
sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
|
|
|
|
|
|
|
|
|
|
if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
|
|
|
|
|
sz)) {
|
|
|
|
|
if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
|
|
|
|
|
dmd_new = (dmd &
|
|
|
|
|
MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
|
|
|
|
|
else
|
|
|
|
|
dmd_new =
|
|
|
|
|
dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n",
|
|
|
|
|
dmd_orignal, dmd_new);
|
|
|
|
|
ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n",
|
|
|
|
|
io_missing_delay_original,
|
|
|
|
|
io_missing_delay);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
ioc->device_missing_delay = dmd_new;
|
|
|
|
|
ioc->io_missing_delay = io_missing_delay;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
kfree(sas_iounit_pg1);
|
|
|
|
|
}
|
2018-06-16 05:42:01 +08:00
|
|
|
|
|
2019-05-31 20:14:42 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_update_ioc_page1_inlinewith_perf_mode - Update IOC Page1 fields
|
|
|
|
|
* according to performance mode.
|
|
|
|
|
* @ioc : per adapter object
|
|
|
|
|
*
|
|
|
|
|
* Return nothing.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
|
|
|
|
Mpi2IOCPage1_t ioc_pg1;
|
|
|
|
|
Mpi2ConfigReply_t mpi_reply;
|
|
|
|
|
|
|
|
|
|
mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy);
|
|
|
|
|
memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(Mpi2IOCPage1_t));
|
|
|
|
|
|
|
|
|
|
switch (perf_mode) {
|
|
|
|
|
case MPT_PERF_MODE_DEFAULT:
|
|
|
|
|
case MPT_PERF_MODE_BALANCED:
|
|
|
|
|
if (ioc->high_iops_queues) {
|
|
|
|
|
ioc_info(ioc,
|
|
|
|
|
"Enable interrupt coalescing only for first\t"
|
|
|
|
|
"%d reply queues\n",
|
|
|
|
|
MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
|
|
|
|
|
/*
|
|
|
|
|
* If 31st bit is zero then interrupt coalescing is
|
|
|
|
|
* enabled for all reply descriptor post queues.
|
|
|
|
|
* If 31st bit is set to one then user can
|
|
|
|
|
* enable/disable interrupt coalescing on per reply
|
|
|
|
|
* descriptor post queue group(8) basis. So to enable
|
|
|
|
|
* interrupt coalescing only on first reply descriptor
|
|
|
|
|
* post queue group 31st bit and zero th bit is enabled.
|
|
|
|
|
*/
|
|
|
|
|
ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 |
|
|
|
|
|
((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES/8) - 1));
|
|
|
|
|
mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
|
|
|
|
|
ioc_info(ioc, "performance mode: balanced\n");
|
|
|
|
|
return;
|
|
|
|
|
}
|
2019-06-11 23:02:19 +08:00
|
|
|
|
/* Fall through */
|
2019-05-31 20:14:42 +08:00
|
|
|
|
case MPT_PERF_MODE_LATENCY:
|
|
|
|
|
/*
|
|
|
|
|
* Enable interrupt coalescing on all reply queues
|
|
|
|
|
* with timeout value 0xA
|
|
|
|
|
*/
|
|
|
|
|
ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa);
|
|
|
|
|
ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
|
|
|
|
|
ioc_pg1.ProductSpecific = 0;
|
|
|
|
|
mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
|
|
|
|
|
ioc_info(ioc, "performance mode: latency\n");
|
|
|
|
|
break;
|
|
|
|
|
case MPT_PERF_MODE_IOPS:
|
|
|
|
|
/*
|
|
|
|
|
* Enable interrupt coalescing on all reply queues.
|
|
|
|
|
*/
|
|
|
|
|
ioc_info(ioc,
|
|
|
|
|
"performance mode: iops with coalescing timeout: 0x%x\n",
|
|
|
|
|
le32_to_cpu(ioc_pg1.CoalescingTimeout));
|
|
|
|
|
ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
|
|
|
|
|
ioc_pg1.ProductSpecific = 0;
|
|
|
|
|
mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_static_config_pages - static start of day config pages
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
|
|
|
|
Mpi2ConfigReply_t mpi_reply;
|
|
|
|
|
u32 iounit_pg1_flags;
|
|
|
|
|
|
2018-04-24 17:28:41 +08:00
|
|
|
|
ioc->nvme_abort_timeout = 30;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
|
|
|
|
|
if (ioc->ir_firmware)
|
|
|
|
|
mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
|
|
|
|
|
&ioc->manu_pg10);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Ensure correct T10 PI operation if vendor left EEDPTagMode
|
|
|
|
|
* flag unset in NVDATA.
|
|
|
|
|
*/
|
|
|
|
|
mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
|
2018-10-31 21:23:37 +08:00
|
|
|
|
if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
|
2012-11-30 10:14:21 +08:00
|
|
|
|
pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
|
|
|
|
|
ioc->name);
|
|
|
|
|
ioc->manu_pg11.EEDPTagMode &= ~0x3;
|
|
|
|
|
ioc->manu_pg11.EEDPTagMode |= 0x1;
|
|
|
|
|
mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
|
|
|
|
|
&ioc->manu_pg11);
|
|
|
|
|
}
|
2018-04-24 17:28:41 +08:00
|
|
|
|
if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK)
|
|
|
|
|
ioc->tm_custom_handling = 1;
|
|
|
|
|
else {
|
|
|
|
|
ioc->tm_custom_handling = 0;
|
|
|
|
|
if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT)
|
|
|
|
|
ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT;
|
|
|
|
|
else if (ioc->manu_pg11.NVMeAbortTO >
|
|
|
|
|
NVME_TASK_ABORT_MAX_TIMEOUT)
|
|
|
|
|
ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT;
|
|
|
|
|
else
|
|
|
|
|
ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO;
|
|
|
|
|
}
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
|
|
|
|
|
mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
|
|
|
|
|
mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
|
|
|
|
|
mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
|
|
|
|
|
mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
|
2015-01-12 14:08:56 +08:00
|
|
|
|
mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
_base_display_ioc_capabilities(ioc);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Enable task_set_full handling in iounit_pg1 when the
|
|
|
|
|
* facts capabilities indicate that its supported.
|
|
|
|
|
*/
|
|
|
|
|
iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
|
|
|
|
|
if ((ioc->facts.IOCCapabilities &
|
|
|
|
|
MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
|
|
|
|
|
iounit_pg1_flags &=
|
|
|
|
|
~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
|
|
|
|
|
else
|
|
|
|
|
iounit_pg1_flags |=
|
|
|
|
|
MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
|
|
|
|
|
ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
|
|
|
|
|
mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
|
2015-01-12 14:08:56 +08:00
|
|
|
|
|
|
|
|
|
if (ioc->iounit_pg8.NumSensors)
|
|
|
|
|
ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
|
2019-05-31 20:14:42 +08:00
|
|
|
|
if (ioc->is_aero_ioc)
|
|
|
|
|
_base_update_ioc_page1_inlinewith_perf_mode(ioc);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
|
2018-04-24 17:28:38 +08:00
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_free_enclosure_list - release memory
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
|
|
|
|
* Free memory allocated during encloure add.
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
|
|
|
|
struct _enclosure_node *enclosure_dev, *enclosure_dev_next;
|
|
|
|
|
|
|
|
|
|
/* Free enclosure list */
|
|
|
|
|
list_for_each_entry_safe(enclosure_dev,
|
|
|
|
|
enclosure_dev_next, &ioc->enclosure_list, list) {
|
|
|
|
|
list_del(&enclosure_dev->list);
|
|
|
|
|
kfree(enclosure_dev);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_release_memory_pools - release memory
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
|
|
|
|
* Free memory allocated from _base_allocate_memory_pools.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
2014-09-12 18:05:31 +08:00
|
|
|
|
int i = 0;
|
2018-04-24 17:28:32 +08:00
|
|
|
|
int j = 0;
|
|
|
|
|
struct chain_tracker *ct;
|
2014-09-12 18:05:31 +08:00
|
|
|
|
struct reply_post_struct *rps;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
if (ioc->request) {
|
2018-10-11 15:35:25 +08:00
|
|
|
|
dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz,
|
2012-11-30 10:14:21 +08:00
|
|
|
|
ioc->request, ioc->request_dma);
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dexitprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "request_pool(0x%p): free\n",
|
|
|
|
|
ioc->request));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
ioc->request = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ioc->sense) {
|
2017-07-06 16:13:07 +08:00
|
|
|
|
dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
|
|
|
|
|
dma_pool_destroy(ioc->sense_dma_pool);
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dexitprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "sense_pool(0x%p): free\n",
|
|
|
|
|
ioc->sense));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
ioc->sense = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ioc->reply) {
|
2017-07-06 16:13:07 +08:00
|
|
|
|
dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
|
|
|
|
|
dma_pool_destroy(ioc->reply_dma_pool);
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dexitprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "reply_pool(0x%p): free\n",
|
|
|
|
|
ioc->reply));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
ioc->reply = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ioc->reply_free) {
|
2017-07-06 16:13:07 +08:00
|
|
|
|
dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
|
2012-11-30 10:14:21 +08:00
|
|
|
|
ioc->reply_free_dma);
|
2017-07-06 16:13:07 +08:00
|
|
|
|
dma_pool_destroy(ioc->reply_free_dma_pool);
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dexitprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "reply_free_pool(0x%p): free\n",
|
|
|
|
|
ioc->reply_free));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
ioc->reply_free = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2014-09-12 18:05:31 +08:00
|
|
|
|
if (ioc->reply_post) {
|
|
|
|
|
do {
|
|
|
|
|
rps = &ioc->reply_post[i];
|
|
|
|
|
if (rps->reply_post_free) {
|
2017-07-06 16:13:07 +08:00
|
|
|
|
dma_pool_free(
|
2014-09-12 18:05:31 +08:00
|
|
|
|
ioc->reply_post_free_dma_pool,
|
|
|
|
|
rps->reply_post_free,
|
|
|
|
|
rps->reply_post_free_dma);
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dexitprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "reply_post_free_pool(0x%p): free\n",
|
|
|
|
|
rps->reply_post_free));
|
2014-09-12 18:05:31 +08:00
|
|
|
|
rps->reply_post_free = NULL;
|
|
|
|
|
}
|
|
|
|
|
} while (ioc->rdpq_array_enable &&
|
|
|
|
|
(++i < ioc->reply_queue_count));
|
2018-04-24 17:28:31 +08:00
|
|
|
|
if (ioc->reply_post_free_array &&
|
|
|
|
|
ioc->rdpq_array_enable) {
|
|
|
|
|
dma_pool_free(ioc->reply_post_free_array_dma_pool,
|
|
|
|
|
ioc->reply_post_free_array,
|
|
|
|
|
ioc->reply_post_free_array_dma);
|
|
|
|
|
ioc->reply_post_free_array = NULL;
|
|
|
|
|
}
|
|
|
|
|
dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
|
2017-07-06 16:13:07 +08:00
|
|
|
|
dma_pool_destroy(ioc->reply_post_free_dma_pool);
|
2014-09-12 18:05:31 +08:00
|
|
|
|
kfree(ioc->reply_post);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
|
2017-10-31 20:32:28 +08:00
|
|
|
|
if (ioc->pcie_sgl_dma_pool) {
|
|
|
|
|
for (i = 0; i < ioc->scsiio_depth; i++) {
|
2018-01-04 20:57:11 +08:00
|
|
|
|
dma_pool_free(ioc->pcie_sgl_dma_pool,
|
|
|
|
|
ioc->pcie_sg_lookup[i].pcie_sgl,
|
|
|
|
|
ioc->pcie_sg_lookup[i].pcie_sgl_dma);
|
2017-10-31 20:32:28 +08:00
|
|
|
|
}
|
|
|
|
|
if (ioc->pcie_sgl_dma_pool)
|
2017-11-21 03:32:46 +08:00
|
|
|
|
dma_pool_destroy(ioc->pcie_sgl_dma_pool);
|
2017-10-31 20:32:28 +08:00
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (ioc->config_page) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dexitprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "config_page(0x%p): free\n",
|
|
|
|
|
ioc->config_page));
|
2018-10-11 15:35:25 +08:00
|
|
|
|
dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz,
|
2012-11-30 10:14:21 +08:00
|
|
|
|
ioc->config_page, ioc->config_page_dma);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kfree(ioc->hpr_lookup);
|
|
|
|
|
kfree(ioc->internal_lookup);
|
|
|
|
|
if (ioc->chain_lookup) {
|
2018-04-24 17:28:32 +08:00
|
|
|
|
for (i = 0; i < ioc->scsiio_depth; i++) {
|
2018-04-24 17:28:33 +08:00
|
|
|
|
for (j = ioc->chains_per_prp_buffer;
|
|
|
|
|
j < ioc->chains_needed_per_io; j++) {
|
2018-04-24 17:28:32 +08:00
|
|
|
|
ct = &ioc->chain_lookup[i].chains_per_smid[j];
|
|
|
|
|
if (ct && ct->chain_buffer)
|
|
|
|
|
dma_pool_free(ioc->chain_dma_pool,
|
|
|
|
|
ct->chain_buffer,
|
|
|
|
|
ct->chain_buffer_dma);
|
|
|
|
|
}
|
|
|
|
|
kfree(ioc->chain_lookup[i].chains_per_smid);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
2017-07-06 16:13:07 +08:00
|
|
|
|
dma_pool_destroy(ioc->chain_dma_pool);
|
2018-04-24 17:28:32 +08:00
|
|
|
|
kfree(ioc->chain_lookup);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
ioc->chain_lookup = NULL;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-24 17:28:34 +08:00
|
|
|
|
/**
|
|
|
|
|
* is_MSB_are_same - checks whether all reply queues in a set are
|
|
|
|
|
* having same upper 32bits in their base memory address.
|
|
|
|
|
* @reply_pool_start_address: Base address of a reply queue set
|
|
|
|
|
* @pool_sz: Size of single Reply Descriptor Post Queues pool size
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 1 if reply queues in a set have a same upper 32bits in their base
|
|
|
|
|
* memory address, else 0.
|
2018-04-24 17:28:34 +08:00
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
is_MSB_are_same(long reply_pool_start_address, u32 pool_sz)
|
|
|
|
|
{
|
|
|
|
|
long reply_pool_end_address;
|
|
|
|
|
|
|
|
|
|
reply_pool_end_address = reply_pool_start_address + pool_sz;
|
|
|
|
|
|
|
|
|
|
if (upper_32_bits(reply_pool_start_address) ==
|
|
|
|
|
upper_32_bits(reply_pool_end_address))
|
|
|
|
|
return 1;
|
|
|
|
|
else
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_allocate_memory_pools - allocate start of day memory pools
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 success, anything else error.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
static int
|
2016-07-29 12:38:21 +08:00
|
|
|
|
_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
{
|
|
|
|
|
struct mpt3sas_facts *facts;
|
|
|
|
|
u16 max_sge_elements;
|
|
|
|
|
u16 chains_needed_per_io;
|
2018-04-24 17:28:31 +08:00
|
|
|
|
u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
u32 retry_sz;
|
2017-10-31 20:32:28 +08:00
|
|
|
|
u16 max_request_credit, nvme_blocks_needed;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
unsigned short sg_tablesize;
|
|
|
|
|
u16 sge_size;
|
2018-04-24 17:28:32 +08:00
|
|
|
|
int i, j;
|
|
|
|
|
struct chain_tracker *ct;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
retry_sz = 0;
|
|
|
|
|
facts = &ioc->facts;
|
|
|
|
|
|
|
|
|
|
/* command line tunables for max sgl entries */
|
|
|
|
|
if (max_sgl_entries != -1)
|
|
|
|
|
sg_tablesize = max_sgl_entries;
|
2015-11-11 20:00:24 +08:00
|
|
|
|
else {
|
|
|
|
|
if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
|
|
|
|
|
sg_tablesize = MPT2SAS_SG_DEPTH;
|
|
|
|
|
else
|
|
|
|
|
sg_tablesize = MPT3SAS_SG_DEPTH;
|
|
|
|
|
}
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2017-10-10 21:11:16 +08:00
|
|
|
|
/* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */
|
|
|
|
|
if (reset_devices)
|
|
|
|
|
sg_tablesize = min_t(unsigned short, sg_tablesize,
|
|
|
|
|
MPT_KDUMP_MIN_PHYS_SEGMENTS);
|
|
|
|
|
|
2018-02-07 18:51:46 +08:00
|
|
|
|
if (ioc->is_mcpu_endpoint)
|
|
|
|
|
ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
|
|
|
|
|
else {
|
|
|
|
|
if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
|
|
|
|
|
sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
|
|
|
|
|
else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
|
|
|
|
|
sg_tablesize = min_t(unsigned short, sg_tablesize,
|
|
|
|
|
SG_MAX_SEGMENTS);
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n",
|
|
|
|
|
sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
|
2018-02-07 18:51:46 +08:00
|
|
|
|
}
|
|
|
|
|
ioc->shost->sg_tablesize = sg_tablesize;
|
2015-01-12 14:09:00 +08:00
|
|
|
|
}
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2016-01-28 14:37:02 +08:00
|
|
|
|
ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
|
|
|
|
|
(facts->RequestCredit / 4));
|
|
|
|
|
if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
|
|
|
|
|
if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
|
|
|
|
|
INTERNAL_SCSIIO_CMDS_COUNT)) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n",
|
|
|
|
|
facts->RequestCredit);
|
2016-01-28 14:37:02 +08:00
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
ioc->internal_depth = 10;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ioc->hi_priority_depth = ioc->internal_depth - (5);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/* command line tunables for max controller queue depth */
|
|
|
|
|
if (max_queue_depth != -1 && max_queue_depth != 0) {
|
|
|
|
|
max_request_credit = min_t(u16, max_queue_depth +
|
2016-01-28 14:37:02 +08:00
|
|
|
|
ioc->internal_depth, facts->RequestCredit);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
|
|
|
|
|
max_request_credit = MAX_HBA_QUEUE_DEPTH;
|
2017-10-10 21:11:16 +08:00
|
|
|
|
} else if (reset_devices)
|
|
|
|
|
max_request_credit = min_t(u16, facts->RequestCredit,
|
|
|
|
|
(MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth));
|
|
|
|
|
else
|
2012-11-30 10:14:21 +08:00
|
|
|
|
max_request_credit = min_t(u16, facts->RequestCredit,
|
|
|
|
|
MAX_HBA_QUEUE_DEPTH);
|
|
|
|
|
|
2016-01-28 14:37:02 +08:00
|
|
|
|
/* Firmware maintains additional facts->HighPriorityCredit number of
|
|
|
|
|
* credits for HiPriprity Request messages, so hba queue depth will be
|
|
|
|
|
* sum of max_request_credit and high priority queue depth.
|
|
|
|
|
*/
|
|
|
|
|
ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
/* request frame size */
|
|
|
|
|
ioc->request_sz = facts->IOCRequestFrameSize * 4;
|
|
|
|
|
|
|
|
|
|
/* reply frame size */
|
|
|
|
|
ioc->reply_sz = facts->ReplyFrameSize * 4;
|
|
|
|
|
|
2016-01-28 14:37:04 +08:00
|
|
|
|
/* chain segment size */
|
|
|
|
|
if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
|
|
|
|
|
if (facts->IOCMaxChainSegmentSize)
|
|
|
|
|
ioc->chain_segment_sz =
|
|
|
|
|
facts->IOCMaxChainSegmentSize *
|
|
|
|
|
MAX_CHAIN_ELEMT_SZ;
|
|
|
|
|
else
|
|
|
|
|
/* set to 128 bytes size if IOCMaxChainSegmentSize is zero */
|
|
|
|
|
ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
|
|
|
|
|
MAX_CHAIN_ELEMT_SZ;
|
|
|
|
|
} else
|
|
|
|
|
ioc->chain_segment_sz = ioc->request_sz;
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/* calculate the max scatter element size */
|
|
|
|
|
sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
|
|
|
|
|
|
|
|
|
|
retry_allocation:
|
|
|
|
|
total_sz = 0;
|
|
|
|
|
/* calculate number of sg elements left over in the 1st frame */
|
|
|
|
|
max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
|
|
|
|
|
sizeof(Mpi2SGEIOUnion_t)) + sge_size);
|
|
|
|
|
ioc->max_sges_in_main_message = max_sge_elements/sge_size;
|
|
|
|
|
|
|
|
|
|
/* now do the same for a chain buffer */
|
2016-01-28 14:37:04 +08:00
|
|
|
|
max_sge_elements = ioc->chain_segment_sz - sge_size;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
|
|
|
|
|
*/
|
|
|
|
|
chains_needed_per_io = ((ioc->shost->sg_tablesize -
|
|
|
|
|
ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
|
|
|
|
|
+ 1;
|
|
|
|
|
if (chains_needed_per_io > facts->MaxChainDepth) {
|
|
|
|
|
chains_needed_per_io = facts->MaxChainDepth;
|
|
|
|
|
ioc->shost->sg_tablesize = min_t(u16,
|
|
|
|
|
ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
|
|
|
|
|
* chains_needed_per_io), ioc->shost->sg_tablesize);
|
|
|
|
|
}
|
|
|
|
|
ioc->chains_needed_per_io = chains_needed_per_io;
|
|
|
|
|
|
|
|
|
|
/* reply free queue sizing - taking into account for 64 FW events */
|
|
|
|
|
ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
|
|
|
|
|
|
2018-02-07 18:51:46 +08:00
|
|
|
|
/* mCPU manage single counters for simplicity */
|
|
|
|
|
if (ioc->is_mcpu_endpoint)
|
|
|
|
|
ioc->reply_post_queue_depth = ioc->reply_free_queue_depth;
|
|
|
|
|
else {
|
|
|
|
|
/* calculate reply descriptor post queue depth */
|
|
|
|
|
ioc->reply_post_queue_depth = ioc->hba_queue_depth +
|
|
|
|
|
ioc->reply_free_queue_depth + 1;
|
|
|
|
|
/* align the reply post queue on the next 16 count boundary */
|
|
|
|
|
if (ioc->reply_post_queue_depth % 16)
|
|
|
|
|
ioc->reply_post_queue_depth += 16 -
|
|
|
|
|
(ioc->reply_post_queue_depth % 16);
|
|
|
|
|
}
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
if (ioc->reply_post_queue_depth >
|
|
|
|
|
facts->MaxReplyDescriptorPostQueueDepth) {
|
|
|
|
|
ioc->reply_post_queue_depth =
|
|
|
|
|
facts->MaxReplyDescriptorPostQueueDepth -
|
|
|
|
|
(facts->MaxReplyDescriptorPostQueueDepth % 16);
|
|
|
|
|
ioc->hba_queue_depth =
|
|
|
|
|
((ioc->reply_post_queue_depth - 64) / 2) - 1;
|
|
|
|
|
ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-26 19:13:30 +08:00
|
|
|
|
ioc_info(ioc,
|
|
|
|
|
"scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), "
|
|
|
|
|
"sge_per_io(%d), chains_per_io(%d)\n",
|
|
|
|
|
ioc->max_sges_in_main_message,
|
|
|
|
|
ioc->max_sges_in_chain_message,
|
|
|
|
|
ioc->shost->sg_tablesize,
|
|
|
|
|
ioc->chains_needed_per_io);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2014-09-12 18:05:31 +08:00
|
|
|
|
/* reply post queue, 16 byte align */
|
|
|
|
|
reply_post_free_sz = ioc->reply_post_queue_depth *
|
|
|
|
|
sizeof(Mpi2DefaultReplyDescriptor_t);
|
|
|
|
|
|
|
|
|
|
sz = reply_post_free_sz;
|
|
|
|
|
if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
|
|
|
|
|
sz *= ioc->reply_queue_count;
|
|
|
|
|
|
|
|
|
|
ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
|
|
|
|
|
(ioc->reply_queue_count):1,
|
|
|
|
|
sizeof(struct reply_post_struct), GFP_KERNEL);
|
|
|
|
|
|
|
|
|
|
if (!ioc->reply_post) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "reply_post_free pool: kcalloc failed\n");
|
2014-09-12 18:05:31 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
2017-07-06 16:13:07 +08:00
|
|
|
|
ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool",
|
|
|
|
|
&ioc->pdev->dev, sz, 16, 0);
|
2014-09-12 18:05:31 +08:00
|
|
|
|
if (!ioc->reply_post_free_dma_pool) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "reply_post_free pool: dma_pool_create failed\n");
|
2014-09-12 18:05:31 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
i = 0;
|
|
|
|
|
do {
|
|
|
|
|
ioc->reply_post[i].reply_post_free =
|
2018-10-02 13:48:04 +08:00
|
|
|
|
dma_pool_zalloc(ioc->reply_post_free_dma_pool,
|
2014-09-12 18:05:31 +08:00
|
|
|
|
GFP_KERNEL,
|
|
|
|
|
&ioc->reply_post[i].reply_post_free_dma);
|
|
|
|
|
if (!ioc->reply_post[i].reply_post_free) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "reply_post_free pool: dma_pool_alloc failed\n");
|
2014-09-12 18:05:31 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dinitprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "reply post free pool (0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
|
|
|
|
|
ioc->reply_post[i].reply_post_free,
|
|
|
|
|
ioc->reply_post_queue_depth,
|
|
|
|
|
8, sz / 1024));
|
|
|
|
|
dinitprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "reply_post_free_dma = (0x%llx)\n",
|
|
|
|
|
(u64)ioc->reply_post[i].reply_post_free_dma));
|
2014-09-12 18:05:31 +08:00
|
|
|
|
total_sz += sz;
|
|
|
|
|
} while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
|
|
|
|
|
|
2019-07-30 15:43:57 +08:00
|
|
|
|
if (ioc->dma_mask > 32) {
|
2014-09-12 18:05:31 +08:00
|
|
|
|
if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_warn(ioc, "no suitable consistent DMA mask for %s\n",
|
|
|
|
|
pci_name(ioc->pdev));
|
2014-09-12 18:05:31 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
ioc->scsiio_depth = ioc->hba_queue_depth -
|
|
|
|
|
ioc->hi_priority_depth - ioc->internal_depth;
|
|
|
|
|
|
|
|
|
|
/* set the scsi host can_queue depth
|
|
|
|
|
* with some internal commands that could be outstanding
|
|
|
|
|
*/
|
2016-01-28 14:37:02 +08:00
|
|
|
|
ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dinitprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
|
|
|
|
|
ioc->shost->can_queue));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* contiguous pool for request and chains, 16 byte align, one extra "
|
|
|
|
|
* "frame for smid=0
|
|
|
|
|
*/
|
|
|
|
|
ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
|
|
|
|
|
sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
|
|
|
|
|
|
|
|
|
|
/* hi-priority queue */
|
|
|
|
|
sz += (ioc->hi_priority_depth * ioc->request_sz);
|
|
|
|
|
|
|
|
|
|
/* internal queue */
|
|
|
|
|
sz += (ioc->internal_depth * ioc->request_sz);
|
|
|
|
|
|
|
|
|
|
ioc->request_dma_sz = sz;
|
2018-10-11 15:35:25 +08:00
|
|
|
|
ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz,
|
|
|
|
|
&ioc->request_dma, GFP_KERNEL);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (!ioc->request) {
|
2018-10-11 15:35:25 +08:00
|
|
|
|
ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n",
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc->hba_queue_depth, ioc->chains_needed_per_io,
|
|
|
|
|
ioc->request_sz, sz / 1024);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
|
|
|
|
|
goto out;
|
2016-01-28 14:37:02 +08:00
|
|
|
|
retry_sz = 64;
|
|
|
|
|
ioc->hba_queue_depth -= retry_sz;
|
2016-02-18 16:39:45 +08:00
|
|
|
|
_base_release_memory_pools(ioc);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
goto retry_allocation;
|
|
|
|
|
}
|
2019-08-03 21:59:47 +08:00
|
|
|
|
memset(ioc->request, 0, sz);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
if (retry_sz)
|
2018-10-11 15:35:25 +08:00
|
|
|
|
ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc->hba_queue_depth, ioc->chains_needed_per_io,
|
|
|
|
|
ioc->request_sz, sz / 1024);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
/* hi-priority queue */
|
|
|
|
|
ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
|
|
|
|
|
ioc->request_sz);
|
|
|
|
|
ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
|
|
|
|
|
ioc->request_sz);
|
|
|
|
|
|
|
|
|
|
/* internal queue */
|
|
|
|
|
ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
|
|
|
|
|
ioc->request_sz);
|
|
|
|
|
ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
|
|
|
|
|
ioc->request_sz);
|
|
|
|
|
|
2019-12-26 19:13:30 +08:00
|
|
|
|
ioc_info(ioc,
|
|
|
|
|
"request pool(0x%p) - dma(0x%llx): "
|
|
|
|
|
"depth(%d), frame_size(%d), pool_size(%d kB)\n",
|
|
|
|
|
ioc->request, (unsigned long long) ioc->request_dma,
|
|
|
|
|
ioc->hba_queue_depth, ioc->request_sz,
|
|
|
|
|
(ioc->hba_queue_depth * ioc->request_sz) / 1024);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
total_sz += sz;
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dinitprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "scsiio(0x%p): depth(%d)\n",
|
|
|
|
|
ioc->request, ioc->scsiio_depth));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
|
2018-04-24 17:28:32 +08:00
|
|
|
|
sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
|
|
|
|
|
ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (!ioc->chain_lookup) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "chain_lookup: __get_free_pages failed\n");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
2018-04-24 17:28:32 +08:00
|
|
|
|
|
|
|
|
|
sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker);
|
|
|
|
|
for (i = 0; i < ioc->scsiio_depth; i++) {
|
|
|
|
|
ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
|
|
|
|
|
if (!ioc->chain_lookup[i].chains_per_smid) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "chain_lookup: kzalloc failed\n");
|
2018-04-24 17:28:32 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/* initialize hi-priority queue smid's */
|
|
|
|
|
ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
|
|
|
|
|
sizeof(struct request_tracker), GFP_KERNEL);
|
|
|
|
|
if (!ioc->hpr_lookup) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "hpr_lookup: kcalloc failed\n");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
ioc->hi_priority_smid = ioc->scsiio_depth + 1;
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dinitprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n",
|
|
|
|
|
ioc->hi_priority,
|
|
|
|
|
ioc->hi_priority_depth, ioc->hi_priority_smid));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
/* initialize internal queue smid's */
|
|
|
|
|
ioc->internal_lookup = kcalloc(ioc->internal_depth,
|
|
|
|
|
sizeof(struct request_tracker), GFP_KERNEL);
|
|
|
|
|
if (!ioc->internal_lookup) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "internal_lookup: kcalloc failed\n");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dinitprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
|
|
|
|
|
ioc->internal,
|
|
|
|
|
ioc->internal_depth, ioc->internal_smid));
|
2017-10-31 20:32:28 +08:00
|
|
|
|
/*
|
|
|
|
|
* The number of NVMe page sized blocks needed is:
|
|
|
|
|
* (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
|
|
|
|
|
* ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry
|
|
|
|
|
* that is placed in the main message frame. 8 is the size of each PRP
|
|
|
|
|
* entry or PRP list pointer entry. 8 is subtracted from page_size
|
|
|
|
|
* because of the PRP list pointer entry at the end of a page, so this
|
|
|
|
|
* is not counted as a PRP entry. The 1 added page is a round up.
|
|
|
|
|
*
|
|
|
|
|
* To avoid allocation failures due to the amount of memory that could
|
|
|
|
|
* be required for NVMe PRP's, only each set of NVMe blocks will be
|
|
|
|
|
* contiguous, so a new set is allocated for each possible I/O.
|
|
|
|
|
*/
|
2018-04-24 17:28:33 +08:00
|
|
|
|
ioc->chains_per_prp_buffer = 0;
|
2017-10-31 20:32:28 +08:00
|
|
|
|
if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
|
|
|
|
|
nvme_blocks_needed =
|
|
|
|
|
(ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
|
|
|
|
|
nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
|
|
|
|
|
nvme_blocks_needed++;
|
|
|
|
|
|
2018-01-04 20:57:11 +08:00
|
|
|
|
sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
|
|
|
|
|
ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
|
|
|
|
|
if (!ioc->pcie_sg_lookup) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n");
|
2018-01-04 20:57:11 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
2017-10-31 20:32:28 +08:00
|
|
|
|
sz = nvme_blocks_needed * ioc->page_size;
|
|
|
|
|
ioc->pcie_sgl_dma_pool =
|
2017-11-21 03:32:46 +08:00
|
|
|
|
dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
|
2017-10-31 20:32:28 +08:00
|
|
|
|
if (!ioc->pcie_sgl_dma_pool) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n");
|
2017-10-31 20:32:28 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
2018-04-24 17:28:33 +08:00
|
|
|
|
|
|
|
|
|
ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
|
|
|
|
|
ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer,
|
|
|
|
|
ioc->chains_needed_per_io);
|
|
|
|
|
|
2017-10-31 20:32:28 +08:00
|
|
|
|
for (i = 0; i < ioc->scsiio_depth; i++) {
|
2018-01-04 20:57:11 +08:00
|
|
|
|
ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
|
|
|
|
|
ioc->pcie_sgl_dma_pool, GFP_KERNEL,
|
|
|
|
|
&ioc->pcie_sg_lookup[i].pcie_sgl_dma);
|
|
|
|
|
if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
|
2017-10-31 20:32:28 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
2018-04-24 17:28:33 +08:00
|
|
|
|
for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
|
|
|
|
|
ct = &ioc->chain_lookup[i].chains_per_smid[j];
|
|
|
|
|
ct->chain_buffer =
|
|
|
|
|
ioc->pcie_sg_lookup[i].pcie_sgl +
|
|
|
|
|
(j * ioc->chain_segment_sz);
|
|
|
|
|
ct->chain_buffer_dma =
|
|
|
|
|
ioc->pcie_sg_lookup[i].pcie_sgl_dma +
|
|
|
|
|
(j * ioc->chain_segment_sz);
|
|
|
|
|
}
|
2017-10-31 20:32:28 +08:00
|
|
|
|
}
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dinitprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
|
|
|
|
|
ioc->scsiio_depth, sz,
|
|
|
|
|
(sz * ioc->scsiio_depth) / 1024));
|
|
|
|
|
dinitprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n",
|
|
|
|
|
ioc->chains_per_prp_buffer));
|
2017-10-31 20:32:28 +08:00
|
|
|
|
total_sz += sz * ioc->scsiio_depth;
|
|
|
|
|
}
|
2018-04-24 17:28:33 +08:00
|
|
|
|
|
|
|
|
|
ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
|
|
|
|
|
ioc->chain_segment_sz, 16, 0);
|
|
|
|
|
if (!ioc->chain_dma_pool) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "chain_dma_pool: dma_pool_create failed\n");
|
2018-04-24 17:28:33 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
for (i = 0; i < ioc->scsiio_depth; i++) {
|
|
|
|
|
for (j = ioc->chains_per_prp_buffer;
|
|
|
|
|
j < ioc->chains_needed_per_io; j++) {
|
|
|
|
|
ct = &ioc->chain_lookup[i].chains_per_smid[j];
|
|
|
|
|
ct->chain_buffer = dma_pool_alloc(
|
|
|
|
|
ioc->chain_dma_pool, GFP_KERNEL,
|
|
|
|
|
&ct->chain_buffer_dma);
|
|
|
|
|
if (!ct->chain_buffer) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
|
2018-04-24 17:28:33 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
total_sz += ioc->chain_segment_sz;
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dinitprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
|
|
|
|
|
ioc->chain_depth, ioc->chain_segment_sz,
|
|
|
|
|
(ioc->chain_depth * ioc->chain_segment_sz) / 1024));
|
2018-04-24 17:28:33 +08:00
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/* sense buffers, 4 byte align */
|
|
|
|
|
sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
|
2017-07-06 16:13:07 +08:00
|
|
|
|
ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
|
|
|
|
|
4, 0);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (!ioc->sense_dma_pool) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "sense pool: dma_pool_create failed\n");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
2017-07-06 16:13:07 +08:00
|
|
|
|
ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
|
2012-11-30 10:14:21 +08:00
|
|
|
|
&ioc->sense_dma);
|
|
|
|
|
if (!ioc->sense) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "sense pool: dma_pool_alloc failed\n");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
2018-04-24 17:28:34 +08:00
|
|
|
|
/* sense buffer requires to be in same 4 gb region.
|
|
|
|
|
* Below function will check the same.
|
|
|
|
|
* In case of failure, new pci pool will be created with updated
|
|
|
|
|
* alignment. Older allocation and pool will be destroyed.
|
|
|
|
|
* Alignment will be used such a way that next allocation if
|
|
|
|
|
* success, will always meet same 4gb region requirement.
|
|
|
|
|
* Actual requirement is not alignment, but we need start and end of
|
|
|
|
|
* DMA address must have same upper 32 bit address.
|
|
|
|
|
*/
|
|
|
|
|
if (!is_MSB_are_same((long)ioc->sense, sz)) {
|
|
|
|
|
//Release Sense pool & Reallocate
|
|
|
|
|
dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
|
|
|
|
|
dma_pool_destroy(ioc->sense_dma_pool);
|
|
|
|
|
ioc->sense = NULL;
|
|
|
|
|
|
|
|
|
|
ioc->sense_dma_pool =
|
|
|
|
|
dma_pool_create("sense pool", &ioc->pdev->dev, sz,
|
|
|
|
|
roundup_pow_of_two(sz), 0);
|
|
|
|
|
if (!ioc->sense_dma_pool) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "sense pool: pci_pool_create failed\n");
|
2018-04-24 17:28:34 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
|
|
|
|
|
&ioc->sense_dma);
|
|
|
|
|
if (!ioc->sense) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "sense pool: pci_pool_alloc failed\n");
|
2018-04-24 17:28:34 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-12-26 19:13:30 +08:00
|
|
|
|
ioc_info(ioc,
|
|
|
|
|
"sense pool(0x%p)- dma(0x%llx): depth(%d),"
|
|
|
|
|
"element_size(%d), pool_size(%d kB)\n",
|
|
|
|
|
ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth,
|
|
|
|
|
SCSI_SENSE_BUFFERSIZE, sz / 1024);
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
total_sz += sz;
|
|
|
|
|
|
|
|
|
|
/* reply pool, 4 byte align */
|
|
|
|
|
sz = ioc->reply_free_queue_depth * ioc->reply_sz;
|
2017-07-06 16:13:07 +08:00
|
|
|
|
ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz,
|
|
|
|
|
4, 0);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (!ioc->reply_dma_pool) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "reply pool: dma_pool_create failed\n");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
2017-07-06 16:13:07 +08:00
|
|
|
|
ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
|
2012-11-30 10:14:21 +08:00
|
|
|
|
&ioc->reply_dma);
|
|
|
|
|
if (!ioc->reply) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "reply pool: dma_pool_alloc failed\n");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
|
|
|
|
|
ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dinitprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
|
|
|
|
|
ioc->reply, ioc->reply_free_queue_depth,
|
|
|
|
|
ioc->reply_sz, sz / 1024));
|
|
|
|
|
dinitprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "reply_dma(0x%llx)\n",
|
|
|
|
|
(unsigned long long)ioc->reply_dma));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
total_sz += sz;
|
|
|
|
|
|
|
|
|
|
/* reply free queue, 16 byte align */
|
|
|
|
|
sz = ioc->reply_free_queue_depth * 4;
|
2017-07-06 16:13:07 +08:00
|
|
|
|
ioc->reply_free_dma_pool = dma_pool_create("reply_free pool",
|
|
|
|
|
&ioc->pdev->dev, sz, 16, 0);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (!ioc->reply_free_dma_pool) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "reply_free pool: dma_pool_create failed\n");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
2018-10-02 13:48:04 +08:00
|
|
|
|
ioc->reply_free = dma_pool_zalloc(ioc->reply_free_dma_pool, GFP_KERNEL,
|
2012-11-30 10:14:21 +08:00
|
|
|
|
&ioc->reply_free_dma);
|
|
|
|
|
if (!ioc->reply_free) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "reply_free pool: dma_pool_alloc failed\n");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dinitprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
|
|
|
|
|
ioc->reply_free, ioc->reply_free_queue_depth,
|
|
|
|
|
4, sz / 1024));
|
|
|
|
|
dinitprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "reply_free_dma (0x%llx)\n",
|
|
|
|
|
(unsigned long long)ioc->reply_free_dma));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
total_sz += sz;
|
|
|
|
|
|
2018-04-24 17:28:31 +08:00
|
|
|
|
if (ioc->rdpq_array_enable) {
|
|
|
|
|
reply_post_free_array_sz = ioc->reply_queue_count *
|
|
|
|
|
sizeof(Mpi2IOCInitRDPQArrayEntry);
|
|
|
|
|
ioc->reply_post_free_array_dma_pool =
|
|
|
|
|
dma_pool_create("reply_post_free_array pool",
|
|
|
|
|
&ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
|
|
|
|
|
if (!ioc->reply_post_free_array_dma_pool) {
|
|
|
|
|
dinitprintk(ioc,
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "reply_post_free_array pool: dma_pool_create failed\n"));
|
2018-04-24 17:28:31 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
ioc->reply_post_free_array =
|
|
|
|
|
dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
|
|
|
|
|
GFP_KERNEL, &ioc->reply_post_free_array_dma);
|
|
|
|
|
if (!ioc->reply_post_free_array) {
|
|
|
|
|
dinitprintk(ioc,
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "reply_post_free_array pool: dma_pool_alloc failed\n"));
|
2018-04-24 17:28:31 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
}
|
2012-11-30 10:14:21 +08:00
|
|
|
|
ioc->config_page_sz = 512;
|
2018-10-11 15:35:25 +08:00
|
|
|
|
ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
|
|
|
|
|
ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (!ioc->config_page) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "config page: dma_pool_alloc failed\n");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
2019-12-26 19:13:30 +08:00
|
|
|
|
|
|
|
|
|
ioc_info(ioc, "config page(0x%p) - dma(0x%llx): size(%d)\n",
|
|
|
|
|
ioc->config_page, (unsigned long long)ioc->config_page_dma,
|
|
|
|
|
ioc->config_page_sz);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
total_sz += ioc->config_page_sz;
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
|
|
|
|
|
total_sz / 1024);
|
|
|
|
|
ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
|
|
|
|
|
ioc->shost->can_queue, facts->RequestCredit);
|
|
|
|
|
ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n",
|
|
|
|
|
ioc->shost->sg_tablesize);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
|
|
|
|
|
* @ioc: Pointer to MPT_ADAPTER structure
|
|
|
|
|
* @cooked: Request raw or cooked IOC state
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: all IOC Doorbell register bits if cooked==0, else just the
|
2012-11-30 10:14:21 +08:00
|
|
|
|
* Doorbell bits in MPI_IOC_STATE_MASK.
|
|
|
|
|
*/
|
|
|
|
|
u32
|
|
|
|
|
mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
|
|
|
|
|
{
|
|
|
|
|
u32 s, sc;
|
|
|
|
|
|
2018-12-07 15:28:34 +08:00
|
|
|
|
s = ioc->base_readl(&ioc->chip->Doorbell);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
sc = s & MPI2_IOC_STATE_MASK;
|
|
|
|
|
return cooked ? sc : s;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_wait_on_iocstate - waiting on a particular ioc state
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* @ioc: ?
|
2012-11-30 10:14:21 +08:00
|
|
|
|
* @ioc_state: controller state { READY, OPERATIONAL, or RESET }
|
|
|
|
|
* @timeout: timeout in second
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 for success, non-zero for failure.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
static int
|
2016-07-29 12:38:21 +08:00
|
|
|
|
_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
{
|
|
|
|
|
u32 count, cntdn;
|
|
|
|
|
u32 current_state;
|
|
|
|
|
|
|
|
|
|
count = 0;
|
2016-07-29 12:38:21 +08:00
|
|
|
|
cntdn = 1000 * timeout;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
do {
|
|
|
|
|
current_state = mpt3sas_base_get_iocstate(ioc, 1);
|
|
|
|
|
if (current_state == ioc_state)
|
|
|
|
|
return 0;
|
|
|
|
|
if (count && current_state == MPI2_IOC_STATE_FAULT)
|
|
|
|
|
break;
|
2019-12-26 19:13:27 +08:00
|
|
|
|
if (count && current_state == MPI2_IOC_STATE_COREDUMP)
|
|
|
|
|
break;
|
2016-07-29 12:38:21 +08:00
|
|
|
|
|
|
|
|
|
usleep_range(1000, 1500);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
count++;
|
|
|
|
|
} while (--cntdn);
|
|
|
|
|
|
|
|
|
|
return current_state;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
|
|
|
|
|
* a write to the doorbell)
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 for success, non-zero for failure.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*
|
|
|
|
|
* Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
|
|
|
|
|
*/
|
2015-06-30 14:54:48 +08:00
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
static int
|
2016-07-29 12:38:21 +08:00
|
|
|
|
_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
{
|
|
|
|
|
u32 cntdn, count;
|
|
|
|
|
u32 int_status;
|
|
|
|
|
|
|
|
|
|
count = 0;
|
2016-07-29 12:38:21 +08:00
|
|
|
|
cntdn = 1000 * timeout;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
do {
|
2018-12-07 15:28:34 +08:00
|
|
|
|
int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dhsprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
|
|
|
|
|
__func__, count, timeout));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
2016-07-29 12:38:21 +08:00
|
|
|
|
|
|
|
|
|
usleep_range(1000, 1500);
|
|
|
|
|
count++;
|
|
|
|
|
} while (--cntdn);
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
|
|
|
|
|
__func__, count, int_status);
|
2016-07-29 12:38:21 +08:00
|
|
|
|
return -EFAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
_base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
|
|
|
|
|
{
|
|
|
|
|
u32 cntdn, count;
|
|
|
|
|
u32 int_status;
|
|
|
|
|
|
|
|
|
|
count = 0;
|
|
|
|
|
cntdn = 2000 * timeout;
|
|
|
|
|
do {
|
2018-12-07 15:28:34 +08:00
|
|
|
|
int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
|
2016-07-29 12:38:21 +08:00
|
|
|
|
if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dhsprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
|
|
|
|
|
__func__, count, timeout));
|
2016-07-29 12:38:21 +08:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
udelay(500);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
count++;
|
|
|
|
|
} while (--cntdn);
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
|
|
|
|
|
__func__, count, int_status);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -EFAULT;
|
2016-07-29 12:38:21 +08:00
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @timeout: timeout in second
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 for success, non-zero for failure.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*
|
|
|
|
|
* Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
|
|
|
|
|
* doorbell.
|
|
|
|
|
*/
|
|
|
|
|
static int
|
2016-07-29 12:38:21 +08:00
|
|
|
|
_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
{
|
|
|
|
|
u32 cntdn, count;
|
|
|
|
|
u32 int_status;
|
|
|
|
|
u32 doorbell;
|
|
|
|
|
|
|
|
|
|
count = 0;
|
2016-07-29 12:38:21 +08:00
|
|
|
|
cntdn = 1000 * timeout;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
do {
|
2018-12-07 15:28:34 +08:00
|
|
|
|
int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dhsprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
|
|
|
|
|
__func__, count, timeout));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return 0;
|
|
|
|
|
} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
|
2018-12-07 15:28:34 +08:00
|
|
|
|
doorbell = ioc->base_readl(&ioc->chip->Doorbell);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if ((doorbell & MPI2_IOC_STATE_MASK) ==
|
|
|
|
|
MPI2_IOC_STATE_FAULT) {
|
2019-12-26 19:13:29 +08:00
|
|
|
|
mpt3sas_print_fault_code(ioc, doorbell);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -EFAULT;
|
|
|
|
|
}
|
2019-12-26 19:13:27 +08:00
|
|
|
|
if ((doorbell & MPI2_IOC_STATE_MASK) ==
|
|
|
|
|
MPI2_IOC_STATE_COREDUMP) {
|
2019-12-26 19:13:29 +08:00
|
|
|
|
mpt3sas_print_coredump_info(ioc, doorbell);
|
2019-12-26 19:13:27 +08:00
|
|
|
|
return -EFAULT;
|
|
|
|
|
}
|
2012-11-30 10:14:21 +08:00
|
|
|
|
} else if (int_status == 0xFFFFFFFF)
|
|
|
|
|
goto out;
|
|
|
|
|
|
2016-07-29 12:38:21 +08:00
|
|
|
|
usleep_range(1000, 1500);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
count++;
|
|
|
|
|
} while (--cntdn);
|
|
|
|
|
|
|
|
|
|
out:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
|
|
|
|
|
__func__, count, int_status);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -EFAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @timeout: timeout in second
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 for success, non-zero for failure.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
static int
|
2016-07-29 12:38:21 +08:00
|
|
|
|
_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
{
|
|
|
|
|
u32 cntdn, count;
|
|
|
|
|
u32 doorbell_reg;
|
|
|
|
|
|
|
|
|
|
count = 0;
|
2016-07-29 12:38:21 +08:00
|
|
|
|
cntdn = 1000 * timeout;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
do {
|
2018-12-07 15:28:34 +08:00
|
|
|
|
doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dhsprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
|
|
|
|
|
__func__, count, timeout));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
2016-07-29 12:38:21 +08:00
|
|
|
|
|
|
|
|
|
usleep_range(1000, 1500);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
count++;
|
|
|
|
|
} while (--cntdn);
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
|
|
|
|
|
__func__, count, doorbell_reg);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -EFAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_send_ioc_reset - send doorbell reset
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
|
|
|
|
|
* @timeout: timeout in second
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 for success, non-zero for failure.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
static int
|
2016-07-29 12:38:21 +08:00
|
|
|
|
_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
{
|
|
|
|
|
u32 ioc_state;
|
|
|
|
|
int r = 0;
|
2019-12-26 19:13:27 +08:00
|
|
|
|
unsigned long flags;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: unknown reset_type\n", __func__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -EFAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!(ioc->facts.IOCCapabilities &
|
|
|
|
|
MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "sending message unit reset !!\n");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
|
|
|
|
|
&ioc->chip->Doorbell);
|
2016-07-29 12:38:21 +08:00
|
|
|
|
if ((_base_wait_for_doorbell_ack(ioc, 15))) {
|
2012-11-30 10:14:21 +08:00
|
|
|
|
r = -EFAULT;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
2019-12-26 19:13:27 +08:00
|
|
|
|
|
2016-07-29 12:38:21 +08:00
|
|
|
|
ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (ioc_state) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
|
|
|
|
|
__func__, ioc_state);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
r = -EFAULT;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
out:
|
2019-12-26 19:13:27 +08:00
|
|
|
|
if (r != 0) {
|
|
|
|
|
ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
|
|
|
|
|
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
|
|
|
|
|
/*
|
|
|
|
|
* Wait for IOC state CoreDump to clear only during
|
|
|
|
|
* HBA initialization & release time.
|
|
|
|
|
*/
|
|
|
|
|
if ((ioc_state & MPI2_IOC_STATE_MASK) ==
|
|
|
|
|
MPI2_IOC_STATE_COREDUMP && (ioc->is_driver_loading == 1 ||
|
|
|
|
|
ioc->fault_reset_work_q == NULL)) {
|
|
|
|
|
spin_unlock_irqrestore(
|
|
|
|
|
&ioc->ioc_reset_in_progress_lock, flags);
|
2019-12-26 19:13:29 +08:00
|
|
|
|
mpt3sas_print_coredump_info(ioc, ioc_state);
|
2019-12-26 19:13:27 +08:00
|
|
|
|
mpt3sas_base_wait_for_coredump_completion(ioc,
|
|
|
|
|
__func__);
|
|
|
|
|
spin_lock_irqsave(
|
|
|
|
|
&ioc->ioc_reset_in_progress_lock, flags);
|
|
|
|
|
}
|
|
|
|
|
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
|
|
|
|
|
}
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "message unit reset: %s\n",
|
|
|
|
|
r == 0 ? "SUCCESS" : "FAILED");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return r;
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-31 21:23:33 +08:00
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_wait_for_ioc - IOC's operational state is checked here.
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @wait_count: timeout in seconds
|
|
|
|
|
*
|
|
|
|
|
* Return: Waits up to timeout seconds for the IOC to
|
|
|
|
|
* become operational. Returns 0 if IOC is present
|
|
|
|
|
* and operational; otherwise returns -EFAULT.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
|
mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout)
|
|
|
|
|
{
|
|
|
|
|
int wait_state_count = 0;
|
|
|
|
|
u32 ioc_state;
|
|
|
|
|
|
2018-10-31 21:23:34 +08:00
|
|
|
|
do {
|
2018-10-31 21:23:33 +08:00
|
|
|
|
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
|
2018-10-31 21:23:34 +08:00
|
|
|
|
if (ioc_state == MPI2_IOC_STATE_OPERATIONAL)
|
|
|
|
|
break;
|
|
|
|
|
ssleep(1);
|
2018-10-31 21:23:33 +08:00
|
|
|
|
ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
|
2018-10-31 21:23:34 +08:00
|
|
|
|
__func__, ++wait_state_count);
|
|
|
|
|
} while (--timeout);
|
|
|
|
|
if (!timeout) {
|
|
|
|
|
ioc_err(ioc, "%s: failed due to ioc not operational\n", __func__);
|
|
|
|
|
return -EFAULT;
|
2018-10-31 21:23:33 +08:00
|
|
|
|
}
|
|
|
|
|
if (wait_state_count)
|
|
|
|
|
ioc_info(ioc, "ioc is operational\n");
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_handshake_req_reply_wait - send request thru doorbell interface
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @request_bytes: request length
|
|
|
|
|
* @request: pointer having request payload
|
|
|
|
|
* @reply_bytes: reply length
|
|
|
|
|
* @reply: pointer to reply payload
|
|
|
|
|
* @timeout: timeout in second
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 for success, non-zero for failure.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
static int
|
|
|
|
|
_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
|
2016-07-29 12:38:21 +08:00
|
|
|
|
u32 *request, int reply_bytes, u16 *reply, int timeout)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
{
|
|
|
|
|
MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
|
|
|
|
|
int i;
|
|
|
|
|
u8 failed;
|
|
|
|
|
__le32 *mfp;
|
|
|
|
|
|
|
|
|
|
/* make sure doorbell is not in use */
|
2018-12-07 15:28:34 +08:00
|
|
|
|
if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -EFAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* clear pending doorbell interrupts from previous state changes */
|
2018-12-07 15:28:34 +08:00
|
|
|
|
if (ioc->base_readl(&ioc->chip->HostInterruptStatus) &
|
2012-11-30 10:14:21 +08:00
|
|
|
|
MPI2_HIS_IOC2SYS_DB_STATUS)
|
|
|
|
|
writel(0, &ioc->chip->HostInterruptStatus);
|
|
|
|
|
|
|
|
|
|
/* send message to ioc */
|
|
|
|
|
writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
|
|
|
|
|
((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
|
|
|
|
|
&ioc->chip->Doorbell);
|
|
|
|
|
|
2016-07-29 12:38:21 +08:00
|
|
|
|
if ((_base_spin_on_doorbell_int(ioc, 5))) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
|
|
|
|
|
__LINE__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -EFAULT;
|
|
|
|
|
}
|
|
|
|
|
writel(0, &ioc->chip->HostInterruptStatus);
|
|
|
|
|
|
2016-07-29 12:38:21 +08:00
|
|
|
|
if ((_base_wait_for_doorbell_ack(ioc, 5))) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n",
|
|
|
|
|
__LINE__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -EFAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* send message 32-bits at a time */
|
|
|
|
|
for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
|
2018-07-31 13:36:36 +08:00
|
|
|
|
writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
|
2016-07-29 12:38:21 +08:00
|
|
|
|
if ((_base_wait_for_doorbell_ack(ioc, 5)))
|
2012-11-30 10:14:21 +08:00
|
|
|
|
failed = 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (failed) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n",
|
|
|
|
|
__LINE__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -EFAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* now wait for the reply */
|
2016-07-29 12:38:21 +08:00
|
|
|
|
if ((_base_wait_for_doorbell_int(ioc, timeout))) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
|
|
|
|
|
__LINE__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -EFAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* read the first two 16-bits, it gives the total length of the reply */
|
2018-12-07 15:28:34 +08:00
|
|
|
|
reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
& MPI2_DOORBELL_DATA_MASK);
|
|
|
|
|
writel(0, &ioc->chip->HostInterruptStatus);
|
2016-07-29 12:38:21 +08:00
|
|
|
|
if ((_base_wait_for_doorbell_int(ioc, 5))) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
|
|
|
|
|
__LINE__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -EFAULT;
|
|
|
|
|
}
|
2018-12-07 15:28:34 +08:00
|
|
|
|
reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
& MPI2_DOORBELL_DATA_MASK);
|
|
|
|
|
writel(0, &ioc->chip->HostInterruptStatus);
|
|
|
|
|
|
|
|
|
|
for (i = 2; i < default_reply->MsgLength * 2; i++) {
|
2016-07-29 12:38:21 +08:00
|
|
|
|
if ((_base_wait_for_doorbell_int(ioc, 5))) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
|
|
|
|
|
__LINE__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -EFAULT;
|
|
|
|
|
}
|
|
|
|
|
if (i >= reply_bytes/2) /* overflow case */
|
2018-12-07 15:28:34 +08:00
|
|
|
|
ioc->base_readl(&ioc->chip->Doorbell);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
else
|
2018-12-07 15:28:34 +08:00
|
|
|
|
reply[i] = le16_to_cpu(
|
|
|
|
|
ioc->base_readl(&ioc->chip->Doorbell)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
& MPI2_DOORBELL_DATA_MASK);
|
|
|
|
|
writel(0, &ioc->chip->HostInterruptStatus);
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-29 12:38:21 +08:00
|
|
|
|
_base_wait_for_doorbell_int(ioc, 5);
|
|
|
|
|
if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dhsprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "doorbell is in use (line=%d)\n",
|
|
|
|
|
__LINE__));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
writel(0, &ioc->chip->HostInterruptStatus);
|
|
|
|
|
|
|
|
|
|
if (ioc->logging_level & MPT_DEBUG_INIT) {
|
|
|
|
|
mfp = (__le32 *)reply;
|
|
|
|
|
pr_info("\toffset:data\n");
|
|
|
|
|
for (i = 0; i < reply_bytes/4; i++)
|
2019-12-26 19:13:30 +08:00
|
|
|
|
ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
|
2012-11-30 10:14:21 +08:00
|
|
|
|
le32_to_cpu(mfp[i]));
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_sas_iounit_control - send sas iounit control to FW
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @mpi_reply: the reply payload from FW
|
|
|
|
|
* @mpi_request: the request payload sent to FW
|
|
|
|
|
*
|
|
|
|
|
* The SAS IO Unit Control Request message allows the host to perform low-level
|
|
|
|
|
* operations, such as resets on the PHYs of the IO Unit, also allows the host
|
|
|
|
|
* to obtain the IOC assigned device handles for a device if it has other
|
|
|
|
|
* identifying information about the device, in addition allows the host to
|
|
|
|
|
* remove IOC resources associated with the device.
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 for success, non-zero for failure.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
int
|
|
|
|
|
mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
|
|
|
|
|
Mpi2SasIoUnitControlReply_t *mpi_reply,
|
|
|
|
|
Mpi2SasIoUnitControlRequest_t *mpi_request)
|
|
|
|
|
{
|
|
|
|
|
u16 smid;
|
2018-05-31 18:34:50 +08:00
|
|
|
|
u8 issue_reset = 0;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
int rc;
|
|
|
|
|
void *request;
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
mutex_lock(&ioc->base_cmds.mutex);
|
|
|
|
|
|
|
|
|
|
if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: base_cmd in use\n", __func__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
rc = -EAGAIN;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-31 21:23:33 +08:00
|
|
|
|
rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
|
|
|
|
|
if (rc)
|
|
|
|
|
goto out;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
|
|
|
|
|
if (!smid) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
rc = -EAGAIN;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
rc = 0;
|
|
|
|
|
ioc->base_cmds.status = MPT3_CMD_PENDING;
|
|
|
|
|
request = mpt3sas_base_get_msg_frame(ioc, smid);
|
|
|
|
|
ioc->base_cmds.smid = smid;
|
|
|
|
|
memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
|
|
|
|
|
if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
|
|
|
|
|
mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
|
|
|
|
|
ioc->ioc_link_reset_in_progress = 1;
|
|
|
|
|
init_completion(&ioc->base_cmds.done);
|
2019-05-31 20:14:34 +08:00
|
|
|
|
ioc->put_smid_default(ioc, smid);
|
2016-07-29 12:38:22 +08:00
|
|
|
|
wait_for_completion_timeout(&ioc->base_cmds.done,
|
2012-11-30 10:14:21 +08:00
|
|
|
|
msecs_to_jiffies(10000));
|
|
|
|
|
if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
|
|
|
|
|
mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
|
|
|
|
|
ioc->ioc_link_reset_in_progress)
|
|
|
|
|
ioc->ioc_link_reset_in_progress = 0;
|
|
|
|
|
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
|
2019-12-26 19:13:31 +08:00
|
|
|
|
mpt3sas_check_cmd_timeout(ioc, ioc->base_cmds.status,
|
|
|
|
|
mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)/4,
|
|
|
|
|
issue_reset);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
goto issue_host_reset;
|
|
|
|
|
}
|
|
|
|
|
if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
|
|
|
|
|
memcpy(mpi_reply, ioc->base_cmds.reply,
|
|
|
|
|
sizeof(Mpi2SasIoUnitControlReply_t));
|
|
|
|
|
else
|
|
|
|
|
memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
|
|
|
|
|
ioc->base_cmds.status = MPT3_CMD_NOT_USED;
|
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
|
|
issue_host_reset:
|
|
|
|
|
if (issue_reset)
|
2016-07-29 12:38:21 +08:00
|
|
|
|
mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
ioc->base_cmds.status = MPT3_CMD_NOT_USED;
|
|
|
|
|
rc = -EFAULT;
|
|
|
|
|
out:
|
|
|
|
|
mutex_unlock(&ioc->base_cmds.mutex);
|
|
|
|
|
return rc;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_scsi_enclosure_processor - sending request to sep device
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @mpi_reply: the reply payload from FW
|
|
|
|
|
* @mpi_request: the request payload sent to FW
|
|
|
|
|
*
|
|
|
|
|
* The SCSI Enclosure Processor request message causes the IOC to
|
|
|
|
|
* communicate with SES devices to control LED status signals.
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 for success, non-zero for failure.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
int
|
|
|
|
|
mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
|
|
|
|
|
Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
|
|
|
|
|
{
|
|
|
|
|
u16 smid;
|
2018-05-31 18:34:50 +08:00
|
|
|
|
u8 issue_reset = 0;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
int rc;
|
|
|
|
|
void *request;
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
mutex_lock(&ioc->base_cmds.mutex);
|
|
|
|
|
|
|
|
|
|
if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: base_cmd in use\n", __func__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
rc = -EAGAIN;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-31 21:23:33 +08:00
|
|
|
|
rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
|
|
|
|
|
if (rc)
|
|
|
|
|
goto out;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
|
|
|
|
|
if (!smid) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
rc = -EAGAIN;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
rc = 0;
|
|
|
|
|
ioc->base_cmds.status = MPT3_CMD_PENDING;
|
|
|
|
|
request = mpt3sas_base_get_msg_frame(ioc, smid);
|
|
|
|
|
ioc->base_cmds.smid = smid;
|
2019-08-03 21:59:47 +08:00
|
|
|
|
memset(request, 0, ioc->request_sz);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
|
|
|
|
|
init_completion(&ioc->base_cmds.done);
|
2019-05-31 20:14:34 +08:00
|
|
|
|
ioc->put_smid_default(ioc, smid);
|
2016-07-29 12:38:22 +08:00
|
|
|
|
wait_for_completion_timeout(&ioc->base_cmds.done,
|
2012-11-30 10:14:21 +08:00
|
|
|
|
msecs_to_jiffies(10000));
|
|
|
|
|
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
|
2019-12-26 19:13:31 +08:00
|
|
|
|
mpt3sas_check_cmd_timeout(ioc,
|
|
|
|
|
ioc->base_cmds.status, mpi_request,
|
|
|
|
|
sizeof(Mpi2SepRequest_t)/4, issue_reset);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
goto issue_host_reset;
|
|
|
|
|
}
|
|
|
|
|
if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
|
|
|
|
|
memcpy(mpi_reply, ioc->base_cmds.reply,
|
|
|
|
|
sizeof(Mpi2SepReply_t));
|
|
|
|
|
else
|
|
|
|
|
memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
|
|
|
|
|
ioc->base_cmds.status = MPT3_CMD_NOT_USED;
|
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
|
|
issue_host_reset:
|
|
|
|
|
if (issue_reset)
|
2016-07-29 12:38:21 +08:00
|
|
|
|
mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
ioc->base_cmds.status = MPT3_CMD_NOT_USED;
|
|
|
|
|
rc = -EFAULT;
|
|
|
|
|
out:
|
|
|
|
|
mutex_unlock(&ioc->base_cmds.mutex);
|
|
|
|
|
return rc;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_get_port_facts - obtain port facts reply and save in ioc
|
|
|
|
|
* @ioc: per adapter object
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* @port: ?
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 for success, non-zero for failure.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
static int
|
2016-07-29 12:38:21 +08:00
|
|
|
|
_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
{
|
|
|
|
|
Mpi2PortFactsRequest_t mpi_request;
|
|
|
|
|
Mpi2PortFactsReply_t mpi_reply;
|
|
|
|
|
struct mpt3sas_port_facts *pfacts;
|
|
|
|
|
int mpi_reply_sz, mpi_request_sz, r;
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
|
|
|
|
|
mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
|
|
|
|
|
memset(&mpi_request, 0, mpi_request_sz);
|
|
|
|
|
mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
|
|
|
|
|
mpi_request.PortNumber = port;
|
|
|
|
|
r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
|
2016-07-29 12:38:21 +08:00
|
|
|
|
(u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
if (r != 0) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return r;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pfacts = &ioc->pfacts[port];
|
|
|
|
|
memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
|
|
|
|
|
pfacts->PortNumber = mpi_reply.PortNumber;
|
|
|
|
|
pfacts->VP_ID = mpi_reply.VP_ID;
|
|
|
|
|
pfacts->VF_ID = mpi_reply.VF_ID;
|
|
|
|
|
pfacts->MaxPostedCmdBuffers =
|
|
|
|
|
le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-30 14:54:48 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @timeout:
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 for success, non-zero for failure.
|
2015-06-30 14:54:48 +08:00
|
|
|
|
*/
|
|
|
|
|
static int
|
2016-07-29 12:38:21 +08:00
|
|
|
|
_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
|
2015-06-30 14:54:48 +08:00
|
|
|
|
{
|
|
|
|
|
u32 ioc_state;
|
|
|
|
|
int rc;
|
|
|
|
|
|
2018-09-17 23:01:13 +08:00
|
|
|
|
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
|
2015-06-30 14:54:48 +08:00
|
|
|
|
|
|
|
|
|
if (ioc->pci_error_recovery) {
|
2018-09-17 23:01:13 +08:00
|
|
|
|
dfailprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "%s: host in pci error recovery\n",
|
|
|
|
|
__func__));
|
2015-06-30 14:54:48 +08:00
|
|
|
|
return -EFAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
|
2018-09-17 23:01:13 +08:00
|
|
|
|
dhsprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
|
|
|
|
|
__func__, ioc_state));
|
2015-06-30 14:54:48 +08:00
|
|
|
|
|
|
|
|
|
if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
|
|
|
|
|
(ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
if (ioc_state & MPI2_DOORBELL_USED) {
|
2018-09-17 23:01:13 +08:00
|
|
|
|
dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
|
2015-06-30 14:54:48 +08:00
|
|
|
|
goto issue_diag_reset;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
|
2019-12-26 19:13:29 +08:00
|
|
|
|
mpt3sas_print_fault_code(ioc, ioc_state &
|
2015-06-30 14:54:48 +08:00
|
|
|
|
MPI2_DOORBELL_DATA_MASK);
|
|
|
|
|
goto issue_diag_reset;
|
2019-12-26 19:13:27 +08:00
|
|
|
|
} else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
|
|
|
|
|
MPI2_IOC_STATE_COREDUMP) {
|
|
|
|
|
ioc_info(ioc,
|
|
|
|
|
"%s: Skipping the diag reset here. (ioc_state=0x%x)\n",
|
|
|
|
|
__func__, ioc_state);
|
|
|
|
|
return -EFAULT;
|
2015-06-30 14:54:48 +08:00
|
|
|
|
}
|
|
|
|
|
|
2016-07-29 12:38:21 +08:00
|
|
|
|
ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
|
2015-06-30 14:54:48 +08:00
|
|
|
|
if (ioc_state) {
|
2018-09-17 23:01:13 +08:00
|
|
|
|
dfailprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
|
|
|
|
|
__func__, ioc_state));
|
2015-06-30 14:54:48 +08:00
|
|
|
|
return -EFAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
issue_diag_reset:
|
2016-07-29 12:38:21 +08:00
|
|
|
|
rc = _base_diag_reset(ioc);
|
2015-06-30 14:54:48 +08:00
|
|
|
|
return rc;
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_get_ioc_facts - obtain ioc facts reply and save in ioc
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 for success, non-zero for failure.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
static int
|
2016-07-29 12:38:21 +08:00
|
|
|
|
_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
{
|
|
|
|
|
Mpi2IOCFactsRequest_t mpi_request;
|
|
|
|
|
Mpi2IOCFactsReply_t mpi_reply;
|
|
|
|
|
struct mpt3sas_facts *facts;
|
|
|
|
|
int mpi_reply_sz, mpi_request_sz, r;
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2016-07-29 12:38:21 +08:00
|
|
|
|
r = _base_wait_for_iocstate(ioc, 10);
|
2015-06-30 14:54:48 +08:00
|
|
|
|
if (r) {
|
2018-09-17 23:01:13 +08:00
|
|
|
|
dfailprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "%s: failed getting to correct state\n",
|
|
|
|
|
__func__));
|
2015-06-30 14:54:48 +08:00
|
|
|
|
return r;
|
|
|
|
|
}
|
2012-11-30 10:14:21 +08:00
|
|
|
|
mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
|
|
|
|
|
mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
|
|
|
|
|
memset(&mpi_request, 0, mpi_request_sz);
|
|
|
|
|
mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
|
|
|
|
|
r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
|
2016-07-29 12:38:21 +08:00
|
|
|
|
(u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
if (r != 0) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return r;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
facts = &ioc->facts;
|
|
|
|
|
memset(facts, 0, sizeof(struct mpt3sas_facts));
|
|
|
|
|
facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
|
|
|
|
|
facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
|
|
|
|
|
facts->VP_ID = mpi_reply.VP_ID;
|
|
|
|
|
facts->VF_ID = mpi_reply.VF_ID;
|
|
|
|
|
facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
|
|
|
|
|
facts->MaxChainDepth = mpi_reply.MaxChainDepth;
|
|
|
|
|
facts->WhoInit = mpi_reply.WhoInit;
|
|
|
|
|
facts->NumberOfPorts = mpi_reply.NumberOfPorts;
|
|
|
|
|
facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
|
2018-05-31 18:34:51 +08:00
|
|
|
|
if (ioc->msix_enable && (facts->MaxMSIxVectors <=
|
|
|
|
|
MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc)))
|
|
|
|
|
ioc->combined_reply_queue = 0;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
|
|
|
|
|
facts->MaxReplyDescriptorPostQueueDepth =
|
|
|
|
|
le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
|
|
|
|
|
facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
|
|
|
|
|
facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
|
|
|
|
|
if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
|
|
|
|
|
ioc->ir_firmware = 1;
|
2014-09-12 18:05:31 +08:00
|
|
|
|
if ((facts->IOCCapabilities &
|
2017-10-10 21:11:16 +08:00
|
|
|
|
MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
|
2014-09-12 18:05:31 +08:00
|
|
|
|
ioc->rdpq_array_capable = 1;
|
2019-05-31 20:14:35 +08:00
|
|
|
|
if ((facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
|
|
|
|
|
&& ioc->is_aero_ioc)
|
|
|
|
|
ioc->atomic_desc_capable = 1;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
|
|
|
|
|
facts->IOCRequestFrameSize =
|
|
|
|
|
le16_to_cpu(mpi_reply.IOCRequestFrameSize);
|
2016-01-28 14:37:04 +08:00
|
|
|
|
if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
|
|
|
|
|
facts->IOCMaxChainSegmentSize =
|
|
|
|
|
le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
|
|
|
|
|
}
|
2012-11-30 10:14:21 +08:00
|
|
|
|
facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
|
|
|
|
|
facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
|
|
|
|
|
ioc->shost->max_id = -1;
|
|
|
|
|
facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
|
|
|
|
|
facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
|
|
|
|
|
facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
|
|
|
|
|
facts->HighPriorityCredit =
|
|
|
|
|
le16_to_cpu(mpi_reply.HighPriorityCredit);
|
|
|
|
|
facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
|
|
|
|
|
facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
|
2017-10-31 20:32:28 +08:00
|
|
|
|
facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Get the Page Size from IOC Facts. If it's 0, default to 4k.
|
|
|
|
|
*/
|
|
|
|
|
ioc->page_size = 1 << facts->CurrentHostPageSize;
|
|
|
|
|
if (ioc->page_size == 1) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n");
|
2017-10-31 20:32:28 +08:00
|
|
|
|
ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
|
|
|
|
|
}
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dinitprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "CurrentHostPageSize(%d)\n",
|
|
|
|
|
facts->CurrentHostPageSize));
|
|
|
|
|
|
|
|
|
|
dinitprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n",
|
|
|
|
|
facts->RequestCredit, facts->MaxChainDepth));
|
|
|
|
|
dinitprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n",
|
|
|
|
|
facts->IOCRequestFrameSize * 4,
|
|
|
|
|
facts->ReplyFrameSize * 4));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_send_ioc_init - send ioc_init to firmware
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 for success, non-zero for failure.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
static int
|
2016-07-29 12:38:21 +08:00
|
|
|
|
_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
{
|
|
|
|
|
Mpi2IOCInitRequest_t mpi_request;
|
|
|
|
|
Mpi2IOCInitReply_t mpi_reply;
|
2014-09-12 18:05:31 +08:00
|
|
|
|
int i, r = 0;
|
2016-04-13 15:01:40 +08:00
|
|
|
|
ktime_t current_time;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
u16 ioc_status;
|
2014-09-12 18:05:31 +08:00
|
|
|
|
u32 reply_post_free_array_sz = 0;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
|
|
|
|
|
mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
|
|
|
|
|
mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
|
|
|
|
|
mpi_request.VF_ID = 0; /* TODO */
|
|
|
|
|
mpi_request.VP_ID = 0;
|
2015-11-11 20:00:22 +08:00
|
|
|
|
mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
|
2017-10-31 20:32:28 +08:00
|
|
|
|
mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
if (_base_is_controller_msix_enabled(ioc))
|
|
|
|
|
mpi_request.HostMSIxVectors = ioc->reply_queue_count;
|
|
|
|
|
mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
|
|
|
|
|
mpi_request.ReplyDescriptorPostQueueDepth =
|
|
|
|
|
cpu_to_le16(ioc->reply_post_queue_depth);
|
|
|
|
|
mpi_request.ReplyFreeQueueDepth =
|
|
|
|
|
cpu_to_le16(ioc->reply_free_queue_depth);
|
|
|
|
|
|
|
|
|
|
mpi_request.SenseBufferAddressHigh =
|
|
|
|
|
cpu_to_le32((u64)ioc->sense_dma >> 32);
|
|
|
|
|
mpi_request.SystemReplyAddressHigh =
|
|
|
|
|
cpu_to_le32((u64)ioc->reply_dma >> 32);
|
|
|
|
|
mpi_request.SystemRequestFrameBaseAddress =
|
|
|
|
|
cpu_to_le64((u64)ioc->request_dma);
|
|
|
|
|
mpi_request.ReplyFreeQueueAddress =
|
|
|
|
|
cpu_to_le64((u64)ioc->reply_free_dma);
|
|
|
|
|
|
2014-09-12 18:05:31 +08:00
|
|
|
|
if (ioc->rdpq_array_enable) {
|
|
|
|
|
reply_post_free_array_sz = ioc->reply_queue_count *
|
|
|
|
|
sizeof(Mpi2IOCInitRDPQArrayEntry);
|
2018-04-24 17:28:31 +08:00
|
|
|
|
memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz);
|
2014-09-12 18:05:31 +08:00
|
|
|
|
for (i = 0; i < ioc->reply_queue_count; i++)
|
2018-04-24 17:28:31 +08:00
|
|
|
|
ioc->reply_post_free_array[i].RDPQBaseAddress =
|
2014-09-12 18:05:31 +08:00
|
|
|
|
cpu_to_le64(
|
|
|
|
|
(u64)ioc->reply_post[i].reply_post_free_dma);
|
|
|
|
|
mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
|
|
|
|
|
mpi_request.ReplyDescriptorPostQueueAddress =
|
2018-04-24 17:28:31 +08:00
|
|
|
|
cpu_to_le64((u64)ioc->reply_post_free_array_dma);
|
2014-09-12 18:05:31 +08:00
|
|
|
|
} else {
|
|
|
|
|
mpi_request.ReplyDescriptorPostQueueAddress =
|
|
|
|
|
cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
|
|
|
|
|
}
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2019-12-26 19:13:27 +08:00
|
|
|
|
/*
|
|
|
|
|
* Set the flag to enable CoreDump state feature in IOC firmware.
|
|
|
|
|
*/
|
|
|
|
|
mpi_request.ConfigurationFlags |=
|
|
|
|
|
cpu_to_le16(MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE);
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/* This time stamp specifies number of milliseconds
|
|
|
|
|
* since epoch ~ midnight January 1, 1970.
|
|
|
|
|
*/
|
2016-04-13 15:01:40 +08:00
|
|
|
|
current_time = ktime_get_real();
|
|
|
|
|
mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
if (ioc->logging_level & MPT_DEBUG_INIT) {
|
|
|
|
|
__le32 *mfp;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
mfp = (__le32 *)&mpi_request;
|
2019-12-26 19:13:30 +08:00
|
|
|
|
ioc_info(ioc, "\toffset:data\n");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
|
2019-12-26 19:13:30 +08:00
|
|
|
|
ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
|
2012-11-30 10:14:21 +08:00
|
|
|
|
le32_to_cpu(mfp[i]));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
r = _base_handshake_req_reply_wait(ioc,
|
|
|
|
|
sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
|
2016-07-29 12:38:21 +08:00
|
|
|
|
sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
if (r != 0) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
|
2018-04-24 17:28:31 +08:00
|
|
|
|
return r;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
|
|
|
|
|
if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
|
|
|
|
|
mpi_reply.IOCLogInfo) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: failed\n", __func__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
r = -EIO;
|
|
|
|
|
}
|
|
|
|
|
|
2014-09-12 18:05:31 +08:00
|
|
|
|
return r;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_port_enable_done - command completion routine for port enable
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @smid: system request message index
|
|
|
|
|
* @msix_index: MSIX table index supplied by the OS
|
|
|
|
|
* @reply: reply message frame(lower 32bit addr)
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 1 meaning mf should be freed from _base_interrupt
|
|
|
|
|
* 0 means the mf is freed from this function.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
u8
|
|
|
|
|
mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
|
|
|
|
|
u32 reply)
|
|
|
|
|
{
|
|
|
|
|
MPI2DefaultReply_t *mpi_reply;
|
|
|
|
|
u16 ioc_status;
|
|
|
|
|
|
|
|
|
|
if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
|
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
|
|
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
|
|
|
|
|
if (!mpi_reply)
|
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
|
|
if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
|
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
|
|
ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
|
|
|
|
|
ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
|
|
|
|
|
ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
|
|
|
|
|
memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
|
|
|
|
|
ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
|
|
|
|
|
if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
|
|
|
|
|
ioc->port_enable_failed = 1;
|
|
|
|
|
|
|
|
|
|
if (ioc->is_driver_loading) {
|
|
|
|
|
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
|
|
|
|
|
mpt3sas_port_enable_complete(ioc);
|
|
|
|
|
return 1;
|
|
|
|
|
} else {
|
|
|
|
|
ioc->start_scan_failed = ioc_status;
|
|
|
|
|
ioc->start_scan = 0;
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
complete(&ioc->port_enable_cmds.done);
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_send_port_enable - send port_enable(discovery stuff) to firmware
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 for success, non-zero for failure.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
static int
|
2016-07-29 12:38:21 +08:00
|
|
|
|
_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
{
|
|
|
|
|
Mpi2PortEnableRequest_t *mpi_request;
|
|
|
|
|
Mpi2PortEnableReply_t *mpi_reply;
|
|
|
|
|
int r = 0;
|
|
|
|
|
u16 smid;
|
|
|
|
|
u16 ioc_status;
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "sending port enable !!\n");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: internal command already in use\n", __func__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -EAGAIN;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
|
|
|
|
|
if (!smid) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -EAGAIN;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
|
|
|
|
|
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
|
|
|
|
|
ioc->port_enable_cmds.smid = smid;
|
|
|
|
|
memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
|
|
|
|
|
mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
|
|
|
|
|
|
|
|
|
|
init_completion(&ioc->port_enable_cmds.done);
|
2019-05-31 20:14:34 +08:00
|
|
|
|
ioc->put_smid_default(ioc, smid);
|
2016-07-29 12:38:22 +08:00
|
|
|
|
wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: timeout\n", __func__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
_debug_dump_mf(mpi_request,
|
|
|
|
|
sizeof(Mpi2PortEnableRequest_t)/4);
|
|
|
|
|
if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
|
|
|
|
|
r = -EFAULT;
|
|
|
|
|
else
|
|
|
|
|
r = -ETIME;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
mpi_reply = ioc->port_enable_cmds.reply;
|
|
|
|
|
ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
|
|
|
|
|
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n",
|
|
|
|
|
__func__, ioc_status);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
r = -EFAULT;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return r;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 for success, non-zero for failure.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
int
|
|
|
|
|
mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
|
|
|
|
Mpi2PortEnableRequest_t *mpi_request;
|
|
|
|
|
u16 smid;
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "sending port enable !!\n");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: internal command already in use\n", __func__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -EAGAIN;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
|
|
|
|
|
if (!smid) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -EAGAIN;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
|
|
|
|
|
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
|
|
|
|
|
ioc->port_enable_cmds.smid = smid;
|
|
|
|
|
memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
|
|
|
|
|
mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
|
|
|
|
|
|
2019-05-31 20:14:34 +08:00
|
|
|
|
ioc->put_smid_default(ioc, smid);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_determine_wait_on_discovery - desposition
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
|
|
|
|
* Decide whether to wait on discovery to complete. Used to either
|
|
|
|
|
* locate boot device, or report volumes ahead of physical devices.
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 1 for wait, 0 for don't wait.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
static int
|
|
|
|
|
_base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
|
|
|
|
/* We wait for discovery to complete if IR firmware is loaded.
|
|
|
|
|
* The sas topology events arrive before PD events, so we need time to
|
|
|
|
|
* turn on the bit in ioc->pd_handles to indicate PD
|
|
|
|
|
* Also, it maybe required to report Volumes ahead of physical
|
|
|
|
|
* devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
|
|
|
|
|
*/
|
|
|
|
|
if (ioc->ir_firmware)
|
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
|
|
/* if no Bios, then we don't need to wait */
|
|
|
|
|
if (!ioc->bios_pg3.BiosVersion)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
/* Bios is present, then we drop down here.
|
|
|
|
|
*
|
|
|
|
|
* If there any entries in the Bios Page 2, then we wait
|
|
|
|
|
* for discovery to complete.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/* Current Boot Device */
|
|
|
|
|
if ((ioc->bios_pg2.CurrentBootDeviceForm &
|
|
|
|
|
MPI2_BIOSPAGE2_FORM_MASK) ==
|
|
|
|
|
MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
|
|
|
|
|
/* Request Boot Device */
|
|
|
|
|
(ioc->bios_pg2.ReqBootDeviceForm &
|
|
|
|
|
MPI2_BIOSPAGE2_FORM_MASK) ==
|
|
|
|
|
MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
|
|
|
|
|
/* Alternate Request Boot Device */
|
|
|
|
|
(ioc->bios_pg2.ReqAltBootDeviceForm &
|
|
|
|
|
MPI2_BIOSPAGE2_FORM_MASK) ==
|
|
|
|
|
MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_unmask_events - turn on notification for this event
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @event: firmware event
|
|
|
|
|
*
|
|
|
|
|
* The mask is stored in ioc->event_masks.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
_base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
|
|
|
|
|
{
|
|
|
|
|
u32 desired_event;
|
|
|
|
|
|
|
|
|
|
if (event >= 128)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
desired_event = (1 << (event % 32));
|
|
|
|
|
|
|
|
|
|
if (event < 32)
|
|
|
|
|
ioc->event_masks[0] &= ~desired_event;
|
|
|
|
|
else if (event < 64)
|
|
|
|
|
ioc->event_masks[1] &= ~desired_event;
|
|
|
|
|
else if (event < 96)
|
|
|
|
|
ioc->event_masks[2] &= ~desired_event;
|
|
|
|
|
else if (event < 128)
|
|
|
|
|
ioc->event_masks[3] &= ~desired_event;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_event_notification - send event notification
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 for success, non-zero for failure.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
static int
|
2016-07-29 12:38:21 +08:00
|
|
|
|
_base_event_notification(struct MPT3SAS_ADAPTER *ioc)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
{
|
|
|
|
|
Mpi2EventNotificationRequest_t *mpi_request;
|
|
|
|
|
u16 smid;
|
|
|
|
|
int r = 0;
|
|
|
|
|
int i;
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: internal command already in use\n", __func__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -EAGAIN;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
|
|
|
|
|
if (!smid) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -EAGAIN;
|
|
|
|
|
}
|
|
|
|
|
ioc->base_cmds.status = MPT3_CMD_PENDING;
|
|
|
|
|
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
|
|
|
|
|
ioc->base_cmds.smid = smid;
|
|
|
|
|
memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
|
|
|
|
|
mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
|
|
|
|
|
mpi_request->VF_ID = 0; /* TODO */
|
|
|
|
|
mpi_request->VP_ID = 0;
|
|
|
|
|
for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
|
|
|
|
|
mpi_request->EventMasks[i] =
|
|
|
|
|
cpu_to_le32(ioc->event_masks[i]);
|
|
|
|
|
init_completion(&ioc->base_cmds.done);
|
2019-05-31 20:14:34 +08:00
|
|
|
|
ioc->put_smid_default(ioc, smid);
|
2016-07-29 12:38:22 +08:00
|
|
|
|
wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: timeout\n", __func__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
_debug_dump_mf(mpi_request,
|
|
|
|
|
sizeof(Mpi2EventNotificationRequest_t)/4);
|
|
|
|
|
if (ioc->base_cmds.status & MPT3_CMD_RESET)
|
|
|
|
|
r = -EFAULT;
|
|
|
|
|
else
|
|
|
|
|
r = -ETIME;
|
|
|
|
|
} else
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
ioc->base_cmds.status = MPT3_CMD_NOT_USED;
|
|
|
|
|
return r;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_validate_event_type - validating event types
|
|
|
|
|
* @ioc: per adapter object
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* @event_type: firmware event
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*
|
|
|
|
|
* This will turn on firmware event notification when application
|
|
|
|
|
* ask for that event. We don't mask events that are already enabled.
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
|
|
|
|
|
{
|
|
|
|
|
int i, j;
|
|
|
|
|
u32 event_mask, desired_event;
|
|
|
|
|
u8 send_update_to_fw;
|
|
|
|
|
|
|
|
|
|
for (i = 0, send_update_to_fw = 0; i <
|
|
|
|
|
MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
|
|
|
|
|
event_mask = ~event_type[i];
|
|
|
|
|
desired_event = 1;
|
|
|
|
|
for (j = 0; j < 32; j++) {
|
|
|
|
|
if (!(event_mask & desired_event) &&
|
|
|
|
|
(ioc->event_masks[i] & desired_event)) {
|
|
|
|
|
ioc->event_masks[i] &= ~desired_event;
|
|
|
|
|
send_update_to_fw = 1;
|
|
|
|
|
}
|
|
|
|
|
desired_event = (desired_event << 1);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!send_update_to_fw)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
mutex_lock(&ioc->base_cmds.mutex);
|
2016-07-29 12:38:21 +08:00
|
|
|
|
_base_event_notification(ioc);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
mutex_unlock(&ioc->base_cmds.mutex);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_diag_reset - the "big hammer" start of day reset
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 for success, non-zero for failure.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
static int
|
2016-07-29 12:38:21 +08:00
|
|
|
|
_base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
{
|
|
|
|
|
u32 host_diagnostic;
|
|
|
|
|
u32 ioc_state;
|
|
|
|
|
u32 count;
|
|
|
|
|
u32 hcb_size;
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "sending diag reset !!\n");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
count = 0;
|
|
|
|
|
do {
|
|
|
|
|
/* Write magic sequence to WriteSequence register
|
|
|
|
|
* Loop until in diagnostic mode
|
|
|
|
|
*/
|
2018-09-17 23:01:09 +08:00
|
|
|
|
drsprintk(ioc, ioc_info(ioc, "write magic sequence\n"));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
|
|
|
|
|
writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
|
|
|
|
|
writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
|
|
|
|
|
writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
|
|
|
|
|
writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
|
|
|
|
|
writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
|
|
|
|
|
writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
|
|
|
|
|
|
|
|
|
|
/* wait 100 msec */
|
2016-07-29 12:38:21 +08:00
|
|
|
|
msleep(100);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2019-12-26 19:13:30 +08:00
|
|
|
|
if (count++ > 20) {
|
|
|
|
|
ioc_info(ioc,
|
|
|
|
|
"Stop writing magic sequence after 20 retries\n");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
goto out;
|
2019-12-26 19:13:30 +08:00
|
|
|
|
}
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2018-12-07 15:28:34 +08:00
|
|
|
|
host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
|
2018-09-17 23:01:09 +08:00
|
|
|
|
drsprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
|
|
|
|
|
count, host_diagnostic));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
} while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
|
|
|
|
|
|
2018-12-07 15:28:34 +08:00
|
|
|
|
hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
|
|
|
|
|
&ioc->chip->HostDiagnostic);
|
|
|
|
|
|
2013-06-29 06:21:19 +08:00
|
|
|
|
/*This delay allows the chip PCIe hardware time to finish reset tasks*/
|
2016-07-29 12:38:21 +08:00
|
|
|
|
msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2013-06-29 06:21:19 +08:00
|
|
|
|
/* Approximately 300 second max wait */
|
|
|
|
|
for (count = 0; count < (300000000 /
|
|
|
|
|
MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2018-12-07 15:28:34 +08:00
|
|
|
|
host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2019-12-26 19:13:30 +08:00
|
|
|
|
if (host_diagnostic == 0xFFFFFFFF) {
|
|
|
|
|
ioc_info(ioc,
|
|
|
|
|
"Invalid host diagnostic register value\n");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
goto out;
|
2019-12-26 19:13:30 +08:00
|
|
|
|
}
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
|
|
|
|
|
break;
|
|
|
|
|
|
2016-07-29 12:38:21 +08:00
|
|
|
|
msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
drsprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n"));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
|
|
|
|
|
host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
|
|
|
|
|
writel(host_diagnostic, &ioc->chip->HostDiagnostic);
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n"));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
|
|
|
|
|
&ioc->chip->HCBSize);
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
drsprintk(ioc, ioc_info(ioc, "restart the adapter\n"));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
|
|
|
|
|
&ioc->chip->HostDiagnostic);
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
drsprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "disable writes to the diagnostic register\n"));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
|
2016-07-29 12:38:21 +08:00
|
|
|
|
ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (ioc_state) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
|
|
|
|
|
__func__, ioc_state);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_info(ioc, "diag reset: SUCCESS\n");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
out:
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "diag reset: FAILED\n");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -EFAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_make_ioc_ready - put controller in READY state
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
* @type: FORCE_BIG_HAMMER or SOFT_RESET
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 for success, non-zero for failure.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
static int
|
2016-07-29 12:38:21 +08:00
|
|
|
|
_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
{
|
|
|
|
|
u32 ioc_state;
|
|
|
|
|
int rc;
|
|
|
|
|
int count;
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
if (ioc->pci_error_recovery)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dhsprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
|
|
|
|
|
__func__, ioc_state));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
/* if in RESET state, it should move to READY state shortly */
|
|
|
|
|
count = 0;
|
|
|
|
|
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
|
|
|
|
|
while ((ioc_state & MPI2_IOC_STATE_MASK) !=
|
|
|
|
|
MPI2_IOC_STATE_READY) {
|
|
|
|
|
if (count++ == 10) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
|
|
|
|
|
__func__, ioc_state);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return -EFAULT;
|
|
|
|
|
}
|
2016-07-29 12:38:21 +08:00
|
|
|
|
ssleep(1);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
if (ioc_state & MPI2_DOORBELL_USED) {
|
2019-12-26 19:13:30 +08:00
|
|
|
|
ioc_info(ioc, "unexpected doorbell active!\n");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
goto issue_diag_reset;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
|
2019-12-26 19:13:29 +08:00
|
|
|
|
mpt3sas_print_fault_code(ioc, ioc_state &
|
2012-11-30 10:14:21 +08:00
|
|
|
|
MPI2_DOORBELL_DATA_MASK);
|
|
|
|
|
goto issue_diag_reset;
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-26 19:13:27 +08:00
|
|
|
|
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
|
2019-12-26 19:13:28 +08:00
|
|
|
|
/*
|
|
|
|
|
* if host reset is invoked while watch dog thread is waiting
|
|
|
|
|
* for IOC state to be changed to Fault state then driver has
|
|
|
|
|
* to wait here for CoreDump state to clear otherwise reset
|
|
|
|
|
* will be issued to the FW and FW move the IOC state to
|
|
|
|
|
* reset state without copying the FW logs to coredump region.
|
|
|
|
|
*/
|
|
|
|
|
if (ioc->ioc_coredump_loop != MPT3SAS_COREDUMP_LOOP_DONE) {
|
2019-12-26 19:13:29 +08:00
|
|
|
|
mpt3sas_print_coredump_info(ioc, ioc_state &
|
2019-12-26 19:13:28 +08:00
|
|
|
|
MPI2_DOORBELL_DATA_MASK);
|
|
|
|
|
mpt3sas_base_wait_for_coredump_completion(ioc,
|
|
|
|
|
__func__);
|
|
|
|
|
}
|
2019-12-26 19:13:27 +08:00
|
|
|
|
goto issue_diag_reset;
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (type == FORCE_BIG_HAMMER)
|
|
|
|
|
goto issue_diag_reset;
|
|
|
|
|
|
|
|
|
|
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
|
|
|
|
|
if (!(_base_send_ioc_reset(ioc,
|
2016-07-29 12:38:21 +08:00
|
|
|
|
MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
issue_diag_reset:
|
2016-07-29 12:38:21 +08:00
|
|
|
|
rc = _base_diag_reset(ioc);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return rc;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _base_make_ioc_operational - put controller in OPERATIONAL state
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 for success, non-zero for failure.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
static int
|
2016-07-29 12:38:21 +08:00
|
|
|
|
_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
{
|
2019-08-03 21:59:54 +08:00
|
|
|
|
int r, i, index, rc;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
unsigned long flags;
|
|
|
|
|
u32 reply_address;
|
|
|
|
|
u16 smid;
|
|
|
|
|
struct _tr_list *delayed_tr, *delayed_tr_next;
|
2016-01-28 14:37:02 +08:00
|
|
|
|
struct _sc_list *delayed_sc, *delayed_sc_next;
|
|
|
|
|
struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
|
2015-11-11 20:00:28 +08:00
|
|
|
|
u8 hide_flag;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
struct adapter_reply_queue *reply_q;
|
2016-03-19 03:45:42 +08:00
|
|
|
|
Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
/* clean the delayed target reset list */
|
|
|
|
|
list_for_each_entry_safe(delayed_tr, delayed_tr_next,
|
|
|
|
|
&ioc->delayed_tr_list, list) {
|
|
|
|
|
list_del(&delayed_tr->list);
|
|
|
|
|
kfree(delayed_tr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
list_for_each_entry_safe(delayed_tr, delayed_tr_next,
|
|
|
|
|
&ioc->delayed_tr_volume_list, list) {
|
|
|
|
|
list_del(&delayed_tr->list);
|
|
|
|
|
kfree(delayed_tr);
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-28 14:37:02 +08:00
|
|
|
|
list_for_each_entry_safe(delayed_sc, delayed_sc_next,
|
|
|
|
|
&ioc->delayed_sc_list, list) {
|
|
|
|
|
list_del(&delayed_sc->list);
|
|
|
|
|
kfree(delayed_sc);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
|
|
|
|
|
&ioc->delayed_event_ack_list, list) {
|
|
|
|
|
list_del(&delayed_event_ack->list);
|
|
|
|
|
kfree(delayed_event_ack);
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
|
|
|
|
|
|
|
|
|
|
/* hi-priority queue */
|
|
|
|
|
INIT_LIST_HEAD(&ioc->hpr_free_list);
|
|
|
|
|
smid = ioc->hi_priority_smid;
|
|
|
|
|
for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
|
|
|
|
|
ioc->hpr_lookup[i].cb_idx = 0xFF;
|
|
|
|
|
ioc->hpr_lookup[i].smid = smid;
|
|
|
|
|
list_add_tail(&ioc->hpr_lookup[i].tracker_list,
|
|
|
|
|
&ioc->hpr_free_list);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* internal queue */
|
|
|
|
|
INIT_LIST_HEAD(&ioc->internal_free_list);
|
|
|
|
|
smid = ioc->internal_smid;
|
|
|
|
|
for (i = 0; i < ioc->internal_depth; i++, smid++) {
|
|
|
|
|
ioc->internal_lookup[i].cb_idx = 0xFF;
|
|
|
|
|
ioc->internal_lookup[i].smid = smid;
|
|
|
|
|
list_add_tail(&ioc->internal_lookup[i].tracker_list,
|
|
|
|
|
&ioc->internal_free_list);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
|
|
|
|
|
|
|
|
|
|
/* initialize Reply Free Queue */
|
|
|
|
|
for (i = 0, reply_address = (u32)ioc->reply_dma ;
|
|
|
|
|
i < ioc->reply_free_queue_depth ; i++, reply_address +=
|
2018-02-07 18:51:50 +08:00
|
|
|
|
ioc->reply_sz) {
|
2012-11-30 10:14:21 +08:00
|
|
|
|
ioc->reply_free[i] = cpu_to_le32(reply_address);
|
2018-02-07 18:51:50 +08:00
|
|
|
|
if (ioc->is_mcpu_endpoint)
|
|
|
|
|
_base_clone_reply_to_sys_mem(ioc,
|
2018-04-24 17:28:30 +08:00
|
|
|
|
reply_address, i);
|
2018-02-07 18:51:50 +08:00
|
|
|
|
}
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
/* initialize reply queues */
|
|
|
|
|
if (ioc->is_driver_loading)
|
|
|
|
|
_base_assign_reply_queues(ioc);
|
|
|
|
|
|
|
|
|
|
/* initialize Reply Post Free Queue */
|
2016-03-19 03:45:42 +08:00
|
|
|
|
index = 0;
|
|
|
|
|
reply_post_free_contig = ioc->reply_post[0].reply_post_free;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
|
2016-03-19 03:45:42 +08:00
|
|
|
|
/*
|
|
|
|
|
* If RDPQ is enabled, switch to the next allocation.
|
|
|
|
|
* Otherwise advance within the contiguous region.
|
|
|
|
|
*/
|
|
|
|
|
if (ioc->rdpq_array_enable) {
|
|
|
|
|
reply_q->reply_post_free =
|
|
|
|
|
ioc->reply_post[index++].reply_post_free;
|
|
|
|
|
} else {
|
|
|
|
|
reply_q->reply_post_free = reply_post_free_contig;
|
|
|
|
|
reply_post_free_contig += ioc->reply_post_queue_depth;
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
reply_q->reply_post_host_index = 0;
|
|
|
|
|
for (i = 0; i < ioc->reply_post_queue_depth; i++)
|
|
|
|
|
reply_q->reply_post_free[i].Words =
|
|
|
|
|
cpu_to_le64(ULLONG_MAX);
|
|
|
|
|
if (!_base_is_controller_msix_enabled(ioc))
|
|
|
|
|
goto skip_init_reply_post_free_queue;
|
|
|
|
|
}
|
|
|
|
|
skip_init_reply_post_free_queue:
|
|
|
|
|
|
2016-07-29 12:38:21 +08:00
|
|
|
|
r = _base_send_ioc_init(ioc);
|
2019-08-03 21:59:54 +08:00
|
|
|
|
if (r) {
|
|
|
|
|
/*
|
|
|
|
|
* No need to check IOC state for fault state & issue
|
|
|
|
|
* diag reset during host reset. This check is need
|
|
|
|
|
* only during driver load time.
|
|
|
|
|
*/
|
|
|
|
|
if (!ioc->is_driver_loading)
|
|
|
|
|
return r;
|
|
|
|
|
|
|
|
|
|
rc = _base_check_for_fault_and_issue_reset(ioc);
|
|
|
|
|
if (rc || (_base_send_ioc_init(ioc)))
|
|
|
|
|
return r;
|
|
|
|
|
}
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
/* initialize reply free host index */
|
|
|
|
|
ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
|
|
|
|
|
writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
|
|
|
|
|
|
|
|
|
|
/* initialize reply post host index */
|
|
|
|
|
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
|
2016-10-26 16:04:38 +08:00
|
|
|
|
if (ioc->combined_reply_queue)
|
2015-06-30 14:54:47 +08:00
|
|
|
|
writel((reply_q->msix_index & 7)<<
|
|
|
|
|
MPI2_RPHI_MSIX_INDEX_SHIFT,
|
|
|
|
|
ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
|
|
|
|
|
else
|
|
|
|
|
writel(reply_q->msix_index <<
|
|
|
|
|
MPI2_RPHI_MSIX_INDEX_SHIFT,
|
|
|
|
|
&ioc->chip->ReplyPostHostIndex);
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (!_base_is_controller_msix_enabled(ioc))
|
|
|
|
|
goto skip_init_reply_post_host_index;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
skip_init_reply_post_host_index:
|
|
|
|
|
|
|
|
|
|
_base_unmask_interrupts(ioc);
|
2018-04-24 17:28:39 +08:00
|
|
|
|
|
|
|
|
|
if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
|
|
|
|
|
r = _base_display_fwpkg_version(ioc);
|
|
|
|
|
if (r)
|
|
|
|
|
return r;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_base_static_config_pages(ioc);
|
2016-07-29 12:38:21 +08:00
|
|
|
|
r = _base_event_notification(ioc);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (r)
|
|
|
|
|
return r;
|
|
|
|
|
|
|
|
|
|
if (ioc->is_driver_loading) {
|
2015-11-11 20:00:28 +08:00
|
|
|
|
|
|
|
|
|
if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
|
|
|
|
|
== 0x80) {
|
|
|
|
|
hide_flag = (u8) (
|
|
|
|
|
le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
|
|
|
|
|
MFG_PAGE10_HIDE_SSDS_MASK);
|
|
|
|
|
if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
|
|
|
|
|
ioc->mfg_pg10_hide_flag = hide_flag;
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
ioc->wait_for_discovery_to_complete =
|
|
|
|
|
_base_determine_wait_on_discovery(ioc);
|
|
|
|
|
|
|
|
|
|
return r; /* scan_start and scan_finished support */
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-29 12:38:21 +08:00
|
|
|
|
r = _base_send_port_enable(ioc);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (r)
|
|
|
|
|
return r;
|
|
|
|
|
|
|
|
|
|
return r;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_free_resources - free resources controller resources
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2015-11-11 20:00:33 +08:00
|
|
|
|
/* synchronizing freeing resource with pci_access_mutex lock */
|
|
|
|
|
mutex_lock(&ioc->pci_access_mutex);
|
2013-08-09 04:45:39 +08:00
|
|
|
|
if (ioc->chip_phys && ioc->chip) {
|
|
|
|
|
_base_mask_interrupts(ioc);
|
|
|
|
|
ioc->shost_recovery = 1;
|
2016-07-29 12:38:21 +08:00
|
|
|
|
_base_make_ioc_ready(ioc, SOFT_RESET);
|
2013-08-09 04:45:39 +08:00
|
|
|
|
ioc->shost_recovery = 0;
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-30 14:54:50 +08:00
|
|
|
|
mpt3sas_base_unmap_resources(ioc);
|
2015-11-11 20:00:33 +08:00
|
|
|
|
mutex_unlock(&ioc->pci_access_mutex);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_attach - attach controller instance
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 for success, non-zero for failure.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
int
|
|
|
|
|
mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
2019-08-03 21:59:54 +08:00
|
|
|
|
int r, i, rc;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
int cpu_id, last_cpu_id = 0;
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
/* setup cpu_msix_table */
|
|
|
|
|
ioc->cpu_count = num_online_cpus();
|
|
|
|
|
for_each_online_cpu(cpu_id)
|
|
|
|
|
last_cpu_id = cpu_id;
|
|
|
|
|
ioc->cpu_msix_table_sz = last_cpu_id + 1;
|
|
|
|
|
ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
|
|
|
|
|
ioc->reply_queue_count = 1;
|
|
|
|
|
if (!ioc->cpu_msix_table) {
|
2019-12-26 19:13:30 +08:00
|
|
|
|
ioc_info(ioc, "Allocation for cpu_msix_table failed!!!\n");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
r = -ENOMEM;
|
|
|
|
|
goto out_free_resources;
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-11 20:00:28 +08:00
|
|
|
|
if (ioc->is_warpdrive) {
|
|
|
|
|
ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
|
|
|
|
|
sizeof(resource_size_t *), GFP_KERNEL);
|
|
|
|
|
if (!ioc->reply_post_host_index) {
|
2019-12-26 19:13:30 +08:00
|
|
|
|
ioc_info(ioc, "Allocation for reply_post_host_index failed!!!\n");
|
2015-11-11 20:00:28 +08:00
|
|
|
|
r = -ENOMEM;
|
|
|
|
|
goto out_free_resources;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-06-24 22:42:55 +08:00
|
|
|
|
ioc->smp_affinity_enable = smp_affinity_enable;
|
|
|
|
|
|
2014-09-12 18:05:31 +08:00
|
|
|
|
ioc->rdpq_array_enable_assigned = 0;
|
|
|
|
|
ioc->dma_mask = 0;
|
2018-12-07 15:28:33 +08:00
|
|
|
|
if (ioc->is_aero_ioc)
|
|
|
|
|
ioc->base_readl = &_base_readl_aero;
|
|
|
|
|
else
|
|
|
|
|
ioc->base_readl = &_base_readl;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
r = mpt3sas_base_map_resources(ioc);
|
|
|
|
|
if (r)
|
|
|
|
|
goto out_free_resources;
|
|
|
|
|
|
|
|
|
|
pci_set_drvdata(ioc->pdev, ioc->shost);
|
2016-07-29 12:38:21 +08:00
|
|
|
|
r = _base_get_ioc_facts(ioc);
|
2019-08-03 21:59:54 +08:00
|
|
|
|
if (r) {
|
|
|
|
|
rc = _base_check_for_fault_and_issue_reset(ioc);
|
|
|
|
|
if (rc || (_base_get_ioc_facts(ioc)))
|
|
|
|
|
goto out_free_resources;
|
|
|
|
|
}
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
2015-11-11 20:00:24 +08:00
|
|
|
|
switch (ioc->hba_mpi_version_belonged) {
|
|
|
|
|
case MPI2_VERSION:
|
|
|
|
|
ioc->build_sg_scmd = &_base_build_sg_scmd;
|
|
|
|
|
ioc->build_sg = &_base_build_sg;
|
|
|
|
|
ioc->build_zero_len_sge = &_base_build_zero_len_sge;
|
2019-05-31 20:14:38 +08:00
|
|
|
|
ioc->get_msix_index_for_smlio = &_base_get_msix_index;
|
2015-11-11 20:00:24 +08:00
|
|
|
|
break;
|
|
|
|
|
case MPI25_VERSION:
|
2016-01-28 14:36:58 +08:00
|
|
|
|
case MPI26_VERSION:
|
2015-11-11 20:00:24 +08:00
|
|
|
|
/*
|
|
|
|
|
* In SAS3.0,
|
|
|
|
|
* SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
|
|
|
|
|
* Target Status - all require the IEEE formated scatter gather
|
|
|
|
|
* elements.
|
|
|
|
|
*/
|
|
|
|
|
ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
|
|
|
|
|
ioc->build_sg = &_base_build_sg_ieee;
|
2017-10-31 20:32:29 +08:00
|
|
|
|
ioc->build_nvme_prp = &_base_build_nvme_prp;
|
2015-11-11 20:00:24 +08:00
|
|
|
|
ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
|
|
|
|
|
ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
|
2019-05-31 20:14:38 +08:00
|
|
|
|
if (ioc->high_iops_queues)
|
|
|
|
|
ioc->get_msix_index_for_smlio =
|
|
|
|
|
&_base_get_high_iops_msix_index;
|
|
|
|
|
else
|
|
|
|
|
ioc->get_msix_index_for_smlio = &_base_get_msix_index;
|
2015-11-11 20:00:24 +08:00
|
|
|
|
break;
|
|
|
|
|
}
|
2019-05-31 20:14:35 +08:00
|
|
|
|
if (ioc->atomic_desc_capable) {
|
|
|
|
|
ioc->put_smid_default = &_base_put_smid_default_atomic;
|
|
|
|
|
ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
|
|
|
|
|
ioc->put_smid_fast_path =
|
|
|
|
|
&_base_put_smid_fast_path_atomic;
|
|
|
|
|
ioc->put_smid_hi_priority =
|
|
|
|
|
&_base_put_smid_hi_priority_atomic;
|
|
|
|
|
} else {
|
|
|
|
|
ioc->put_smid_default = &_base_put_smid_default;
|
|
|
|
|
ioc->put_smid_fast_path = &_base_put_smid_fast_path;
|
|
|
|
|
ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
|
|
|
|
|
if (ioc->is_mcpu_endpoint)
|
|
|
|
|
ioc->put_smid_scsi_io =
|
|
|
|
|
&_base_put_smid_mpi_ep_scsi_io;
|
|
|
|
|
else
|
|
|
|
|
ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
|
|
|
|
|
}
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/*
|
|
|
|
|
* These function pointers for other requests that don't
|
|
|
|
|
* the require IEEE scatter gather elements.
|
|
|
|
|
*
|
|
|
|
|
* For example Configuration Pages and SAS IOUNIT Control don't.
|
|
|
|
|
*/
|
|
|
|
|
ioc->build_sg_mpi = &_base_build_sg;
|
|
|
|
|
ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
|
|
|
|
|
|
2016-07-29 12:38:21 +08:00
|
|
|
|
r = _base_make_ioc_ready(ioc, SOFT_RESET);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (r)
|
|
|
|
|
goto out_free_resources;
|
|
|
|
|
|
|
|
|
|
ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
|
|
|
|
|
sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
|
|
|
|
|
if (!ioc->pfacts) {
|
|
|
|
|
r = -ENOMEM;
|
|
|
|
|
goto out_free_resources;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
|
2016-07-29 12:38:21 +08:00
|
|
|
|
r = _base_get_port_facts(ioc, i);
|
2019-08-03 21:59:54 +08:00
|
|
|
|
if (r) {
|
|
|
|
|
rc = _base_check_for_fault_and_issue_reset(ioc);
|
|
|
|
|
if (rc || (_base_get_port_facts(ioc, i)))
|
|
|
|
|
goto out_free_resources;
|
|
|
|
|
}
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
|
2016-07-29 12:38:21 +08:00
|
|
|
|
r = _base_allocate_memory_pools(ioc);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (r)
|
|
|
|
|
goto out_free_resources;
|
|
|
|
|
|
2019-02-15 15:40:29 +08:00
|
|
|
|
if (irqpoll_weight > 0)
|
|
|
|
|
ioc->thresh_hold = irqpoll_weight;
|
|
|
|
|
else
|
|
|
|
|
ioc->thresh_hold = ioc->hba_queue_depth/4;
|
|
|
|
|
|
scsi: mpt3sas: Irq poll to avoid CPU hard lockups
Issue Description:
We have seen cpu lock up issue from fields if system has greater (more than
96) logical cpu count. SAS3.0 controller (Invader series) supports at max
96 msix vector and SAS3.5 product (Ventura) supports at max 128 msix
vectors.
This may be a generic issue (if PCI device supports completion on multiple
reply queues). Let me explain it w.r.t to mpt3sas supported h/w just to
simplify the problem and possible changes to handle such issues. IT HBA
(mpt3sas) supports multiple reply queues in completion path. Driver creates
MSI-x vectors for controller as "min of (FW supported Reply queue, Logical
CPUs)". If submitter is not interrupted via completion on same CPU, there
is a loop in the IO path. This behavior can cause hard/soft CPU lockups, IO
timeout, system sluggish etc.
Example - one CPU (e.g. CPU A) is busy submitting the IOs and another CPU
(e.g. CPU B) is busy with processing the corresponding IO's reply
descriptors from reply descriptor queue upon receiving the interrupts from
HBA. If the CPU A is continuously pumping the IOs then always CPU B (which
is executing the ISR) will see the valid reply descriptors in the reply
descriptor queue and it will be continuously processing those reply
descriptor in a loop without quitting the ISR handler.
Mpt3sas driver will exit ISR handler if it finds unused reply descriptor in
the reply descriptor queue. Since CPU A will be continuously sending the
IOs, CPU B may always see a valid reply descriptor (posted by HBA Firmware
after processing the IO) in the reply descriptor queue. In worst case,
driver will not quit from this loop in the ISR handler. Eventually, CPU
lockup will be detected by watchdog.
Above mentioned behavior is not common if "rq_affinity" set to 2 or
affinity_hint is honored by irqbalance as "exact". If rq_affinity is set
to 2, submitter will be always interrupted via completion on same CPU. If
irqbalance is using "exact" policy, interrupt will be delivered to
submitter CPU.
If CPU counts to MSI-X vectors (reply descriptor Queues) count ratio is not
1:1, we still have exposure of issue explained above and for that we don't
have any solution.
Exposure of soft/hard lockup if CPU count is more than MSI-x supported by
device.
If CPUs count to MSI-x vectors count ratio is not 1:1, (Other way, if CPU
counts to MSI-x vector count ratio is something like X:1, where X > 1) then
'exact' irqbalance policy OR rq_affinity = 2 won't help to avoid CPU
hard/soft lockups. There won't be any one to one mapping between CPU to
MSI-x vector instead one MSI-x interrupt (or reply descriptor queue) is
shared with group/set of CPUs and there is a possibility of having a loop
in the IO path within that CPU group and may observe lockups.
For example: Consider a system having two NUMA nodes and each node having
four logical CPUs and also consider that number of MSI-x vectors enabled on
the HBA is two, then CPUs count to MSI-x vector count ratio as 4:1. e.g.
MSIx vector 0 is affinity to CPU 0, CPU 1, CPU 2 & CPU 3 of NUMA node 0 and
MSI-x vector 1 is affinity to CPU 4, CPU 5, CPU 6 & CPU 7 of NUMA node 1.
numactl --hardware
available: 2 nodes (0-1)
node 0 cpus: 0 1 2 3 --> MSI-x 0
node 0 size: 65536 MB
node 0 free: 63176 MB
node 1 cpus: 4 5 6 7 -->MSI-x 1
node 1 size: 65536 MB
node 1 free: 63176 MB
Assume that user started an application which uses all the CPUs of NUMA
node 0 for issuing the IOs. Only one CPU from affinity list (it can be any
cpu since this behavior depends upon irqbalance) CPU0 will receive the
interrupts from MSIx vector 0 for all the IOs. Eventually, CPU 0 IO
submission percentage will be decreasing and ISR processing percentage will
be increasing as it is more busy with processing the interrupts. Gradually
IO submission percentage on CPU 0 will be zero and it's ISR processing
percentage will be 100 percentage as IO loop has already formed within the
NUMA node 0, i.e. CPU 1, CPU 2 & CPU 3 will be continuously busy with
submitting the heavy IOs and only CPU 0 is busy in the ISR path as it
always find the valid reply descriptor in the reply descriptor
queue. Eventually, we will observe the hard lockup here.
Chances of occurring of hard/soft lockups are directly proportional to
value of X. If value of X is high, then chances of observing CPU lockups is
high.
Solution: Use IRQ poll interface defined in " irq_poll.c". mpt3sas driver
will execute ISR routine in Softirq context and it will always quit the
loop based on budget provided in IRQ poll interface.
In these scenarios (i.e. where CPUs count to MSI-X vectors count ratio is
X:1 (where X > 1)), IRQ poll interface will avoid CPU hard lockups due to
voluntary exit from the reply queue processing based on budget. Note -
Only one MSI-x vector is busy doing processing.
Irqstat output:
IRQs / 1 second(s)
IRQ# TOTAL NODE0 NODE1 NODE2 NODE3 NAME
44 122871 122871 0 0 0 IR-PCI-MSI-edge mpt3sas0-msix0
45 0 0 0 0 0 IR-PCI-MSI-edge mpt3sas0-msix1
We use this approach only if cpu count is more than FW supported MSI-x
vector
Signed-off-by: Suganath Prabu <suganath-prabu.subramani@broadcom.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-15 15:40:27 +08:00
|
|
|
|
_base_init_irqpolls(ioc);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
init_waitqueue_head(&ioc->reset_wq);
|
|
|
|
|
|
|
|
|
|
/* allocate memory pd handle bitmask list */
|
|
|
|
|
ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
|
|
|
|
|
if (ioc->facts.MaxDevHandle % 8)
|
|
|
|
|
ioc->pd_handles_sz++;
|
|
|
|
|
ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
|
|
|
|
|
GFP_KERNEL);
|
|
|
|
|
if (!ioc->pd_handles) {
|
|
|
|
|
r = -ENOMEM;
|
|
|
|
|
goto out_free_resources;
|
|
|
|
|
}
|
|
|
|
|
ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
|
|
|
|
|
GFP_KERNEL);
|
|
|
|
|
if (!ioc->blocking_handles) {
|
|
|
|
|
r = -ENOMEM;
|
|
|
|
|
goto out_free_resources;
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-26 16:04:34 +08:00
|
|
|
|
/* allocate memory for pending OS device add list */
|
|
|
|
|
ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
|
|
|
|
|
if (ioc->facts.MaxDevHandle % 8)
|
|
|
|
|
ioc->pend_os_device_add_sz++;
|
|
|
|
|
ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
|
|
|
|
|
GFP_KERNEL);
|
|
|
|
|
if (!ioc->pend_os_device_add)
|
|
|
|
|
goto out_free_resources;
|
|
|
|
|
|
|
|
|
|
ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
|
|
|
|
|
ioc->device_remove_in_progress =
|
|
|
|
|
kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
|
|
|
|
|
if (!ioc->device_remove_in_progress)
|
|
|
|
|
goto out_free_resources;
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
ioc->fwfault_debug = mpt3sas_fwfault_debug;
|
|
|
|
|
|
|
|
|
|
/* base internal command bits */
|
|
|
|
|
mutex_init(&ioc->base_cmds.mutex);
|
|
|
|
|
ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
|
|
|
|
|
ioc->base_cmds.status = MPT3_CMD_NOT_USED;
|
|
|
|
|
|
|
|
|
|
/* port_enable command bits */
|
|
|
|
|
ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
|
|
|
|
|
ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
|
|
|
|
|
|
|
|
|
|
/* transport internal command bits */
|
|
|
|
|
ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
|
|
|
|
|
ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
|
|
|
|
|
mutex_init(&ioc->transport_cmds.mutex);
|
|
|
|
|
|
|
|
|
|
/* scsih internal command bits */
|
|
|
|
|
ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
|
|
|
|
|
ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
|
|
|
|
|
mutex_init(&ioc->scsih_cmds.mutex);
|
|
|
|
|
|
|
|
|
|
/* task management internal command bits */
|
|
|
|
|
ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
|
|
|
|
|
ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
|
|
|
|
|
mutex_init(&ioc->tm_cmds.mutex);
|
|
|
|
|
|
|
|
|
|
/* config page internal command bits */
|
|
|
|
|
ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
|
|
|
|
|
ioc->config_cmds.status = MPT3_CMD_NOT_USED;
|
|
|
|
|
mutex_init(&ioc->config_cmds.mutex);
|
|
|
|
|
|
|
|
|
|
/* ctl module internal command bits */
|
|
|
|
|
ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
|
|
|
|
|
ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
|
|
|
|
|
ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
|
|
|
|
|
mutex_init(&ioc->ctl_cmds.mutex);
|
|
|
|
|
|
2017-08-07 06:51:29 +08:00
|
|
|
|
if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply ||
|
|
|
|
|
!ioc->transport_cmds.reply || !ioc->scsih_cmds.reply ||
|
|
|
|
|
!ioc->tm_cmds.reply || !ioc->config_cmds.reply ||
|
|
|
|
|
!ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) {
|
2012-11-30 10:14:21 +08:00
|
|
|
|
r = -ENOMEM;
|
|
|
|
|
goto out_free_resources;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
|
|
|
|
|
ioc->event_masks[i] = -1;
|
|
|
|
|
|
|
|
|
|
/* here we enable the events we care about */
|
|
|
|
|
_base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
|
|
|
|
|
_base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
|
|
|
|
|
_base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
|
|
|
|
|
_base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
|
|
|
|
|
_base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
|
|
|
|
|
_base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
|
|
|
|
|
_base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
|
|
|
|
|
_base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
|
|
|
|
|
_base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
|
|
|
|
|
_base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
|
2015-01-12 14:08:56 +08:00
|
|
|
|
_base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
|
2017-10-10 21:11:14 +08:00
|
|
|
|
_base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
|
2018-04-24 17:28:35 +08:00
|
|
|
|
_base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
|
2017-10-31 20:32:32 +08:00
|
|
|
|
if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
|
|
|
|
|
if (ioc->is_gen35_ioc) {
|
|
|
|
|
_base_unmask_events(ioc,
|
|
|
|
|
MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
|
|
|
|
|
_base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION);
|
|
|
|
|
_base_unmask_events(ioc,
|
|
|
|
|
MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-07-29 12:38:21 +08:00
|
|
|
|
r = _base_make_ioc_operational(ioc);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (r)
|
|
|
|
|
goto out_free_resources;
|
|
|
|
|
|
2019-08-03 21:59:48 +08:00
|
|
|
|
/*
|
|
|
|
|
* Copy current copy of IOCFacts in prev_fw_facts
|
|
|
|
|
* and it will be used during online firmware upgrade.
|
|
|
|
|
*/
|
|
|
|
|
memcpy(&ioc->prev_fw_facts, &ioc->facts,
|
|
|
|
|
sizeof(struct mpt3sas_facts));
|
|
|
|
|
|
2015-11-11 20:00:27 +08:00
|
|
|
|
ioc->non_operational_loop = 0;
|
2019-12-26 19:13:28 +08:00
|
|
|
|
ioc->ioc_coredump_loop = 0;
|
2017-01-23 17:56:08 +08:00
|
|
|
|
ioc->got_task_abort_from_ioctl = 0;
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
out_free_resources:
|
|
|
|
|
|
|
|
|
|
ioc->remove_host = 1;
|
|
|
|
|
|
|
|
|
|
mpt3sas_base_free_resources(ioc);
|
|
|
|
|
_base_release_memory_pools(ioc);
|
|
|
|
|
pci_set_drvdata(ioc->pdev, NULL);
|
|
|
|
|
kfree(ioc->cpu_msix_table);
|
2015-11-11 20:00:28 +08:00
|
|
|
|
if (ioc->is_warpdrive)
|
|
|
|
|
kfree(ioc->reply_post_host_index);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
kfree(ioc->pd_handles);
|
|
|
|
|
kfree(ioc->blocking_handles);
|
2016-10-26 16:04:34 +08:00
|
|
|
|
kfree(ioc->device_remove_in_progress);
|
|
|
|
|
kfree(ioc->pend_os_device_add);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
kfree(ioc->tm_cmds.reply);
|
|
|
|
|
kfree(ioc->transport_cmds.reply);
|
|
|
|
|
kfree(ioc->scsih_cmds.reply);
|
|
|
|
|
kfree(ioc->config_cmds.reply);
|
|
|
|
|
kfree(ioc->base_cmds.reply);
|
|
|
|
|
kfree(ioc->port_enable_cmds.reply);
|
|
|
|
|
kfree(ioc->ctl_cmds.reply);
|
|
|
|
|
kfree(ioc->ctl_cmds.sense);
|
|
|
|
|
kfree(ioc->pfacts);
|
|
|
|
|
ioc->ctl_cmds.reply = NULL;
|
|
|
|
|
ioc->base_cmds.reply = NULL;
|
|
|
|
|
ioc->tm_cmds.reply = NULL;
|
|
|
|
|
ioc->scsih_cmds.reply = NULL;
|
|
|
|
|
ioc->transport_cmds.reply = NULL;
|
|
|
|
|
ioc->config_cmds.reply = NULL;
|
|
|
|
|
ioc->pfacts = NULL;
|
|
|
|
|
return r;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_detach - remove controller instance
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
mpt3sas_base_stop_watchdog(ioc);
|
|
|
|
|
mpt3sas_base_free_resources(ioc);
|
|
|
|
|
_base_release_memory_pools(ioc);
|
2018-04-24 17:28:38 +08:00
|
|
|
|
mpt3sas_free_enclosure_list(ioc);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
pci_set_drvdata(ioc->pdev, NULL);
|
|
|
|
|
kfree(ioc->cpu_msix_table);
|
2015-11-11 20:00:28 +08:00
|
|
|
|
if (ioc->is_warpdrive)
|
|
|
|
|
kfree(ioc->reply_post_host_index);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
kfree(ioc->pd_handles);
|
|
|
|
|
kfree(ioc->blocking_handles);
|
2016-10-26 16:04:34 +08:00
|
|
|
|
kfree(ioc->device_remove_in_progress);
|
|
|
|
|
kfree(ioc->pend_os_device_add);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
kfree(ioc->pfacts);
|
|
|
|
|
kfree(ioc->ctl_cmds.reply);
|
|
|
|
|
kfree(ioc->ctl_cmds.sense);
|
|
|
|
|
kfree(ioc->base_cmds.reply);
|
|
|
|
|
kfree(ioc->port_enable_cmds.reply);
|
|
|
|
|
kfree(ioc->tm_cmds.reply);
|
|
|
|
|
kfree(ioc->transport_cmds.reply);
|
|
|
|
|
kfree(ioc->scsih_cmds.reply);
|
|
|
|
|
kfree(ioc->config_cmds.reply);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
2018-06-16 05:42:00 +08:00
|
|
|
|
* _base_pre_reset_handler - pre reset handler
|
2012-11-30 10:14:21 +08:00
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*/
|
2018-06-16 05:42:00 +08:00
|
|
|
|
static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
{
|
2018-06-16 05:42:00 +08:00
|
|
|
|
mpt3sas_scsih_pre_reset_handler(ioc);
|
|
|
|
|
mpt3sas_ctl_pre_reset_handler(ioc);
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
|
2018-06-16 05:42:00 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
2019-12-26 19:13:26 +08:00
|
|
|
|
* _base_clear_outstanding_mpt_commands - clears outstanding mpt commands
|
2018-06-16 05:42:00 +08:00
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*/
|
2019-12-26 19:13:26 +08:00
|
|
|
|
static void
|
|
|
|
|
_base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc)
|
2018-06-16 05:42:00 +08:00
|
|
|
|
{
|
2019-12-26 19:13:26 +08:00
|
|
|
|
dtmprintk(ioc,
|
|
|
|
|
ioc_info(ioc, "%s: clear outstanding mpt cmds\n", __func__));
|
2018-06-16 05:42:00 +08:00
|
|
|
|
if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
|
|
|
|
|
ioc->transport_cmds.status |= MPT3_CMD_RESET;
|
|
|
|
|
mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
|
|
|
|
|
complete(&ioc->transport_cmds.done);
|
|
|
|
|
}
|
|
|
|
|
if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
|
|
|
|
|
ioc->base_cmds.status |= MPT3_CMD_RESET;
|
|
|
|
|
mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
|
|
|
|
|
complete(&ioc->base_cmds.done);
|
|
|
|
|
}
|
|
|
|
|
if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
|
|
|
|
|
ioc->port_enable_failed = 1;
|
|
|
|
|
ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
|
|
|
|
|
mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
|
|
|
|
|
if (ioc->is_driver_loading) {
|
|
|
|
|
ioc->start_scan_failed =
|
|
|
|
|
MPI2_IOCSTATUS_INTERNAL_ERROR;
|
|
|
|
|
ioc->start_scan = 0;
|
|
|
|
|
ioc->port_enable_cmds.status =
|
|
|
|
|
MPT3_CMD_NOT_USED;
|
|
|
|
|
} else {
|
|
|
|
|
complete(&ioc->port_enable_cmds.done);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
2018-06-16 05:42:00 +08:00
|
|
|
|
if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
|
|
|
|
|
ioc->config_cmds.status |= MPT3_CMD_RESET;
|
|
|
|
|
mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
|
|
|
|
|
ioc->config_cmds.smid = USHRT_MAX;
|
|
|
|
|
complete(&ioc->config_cmds.done);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-26 19:13:26 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_clear_outstanding_commands - clear all outstanding commands
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*/
|
|
|
|
|
static void _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
|
|
|
|
mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc);
|
|
|
|
|
mpt3sas_ctl_clear_outstanding_ioctls(ioc);
|
|
|
|
|
_base_clear_outstanding_mpt_commands(ioc);
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-16 05:42:00 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_reset_done_handler - reset done handler
|
|
|
|
|
* @ioc: per adapter object
|
|
|
|
|
*/
|
|
|
|
|
static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
|
|
|
|
mpt3sas_scsih_reset_done_handler(ioc);
|
|
|
|
|
mpt3sas_ctl_reset_done_handler(ioc);
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
2018-02-17 06:39:58 +08:00
|
|
|
|
* mpt3sas_wait_for_commands_to_complete - reset controller
|
2012-11-30 10:14:21 +08:00
|
|
|
|
* @ioc: Pointer to MPT_ADAPTER structure
|
|
|
|
|
*
|
2018-01-04 20:57:10 +08:00
|
|
|
|
* This function is waiting 10s for all pending commands to complete
|
2012-11-30 10:14:21 +08:00
|
|
|
|
* prior to putting controller in reset.
|
|
|
|
|
*/
|
2018-02-17 06:39:58 +08:00
|
|
|
|
void
|
|
|
|
|
mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
{
|
|
|
|
|
u32 ioc_state;
|
|
|
|
|
|
|
|
|
|
ioc->pending_io_count = 0;
|
|
|
|
|
|
|
|
|
|
ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
|
|
|
|
|
if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
/* pending command count */
|
2018-06-24 22:03:26 +08:00
|
|
|
|
ioc->pending_io_count = scsi_host_busy(ioc->shost);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
if (!ioc->pending_io_count)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
/* wait for pending commands to complete */
|
|
|
|
|
wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-03 21:59:48 +08:00
|
|
|
|
/**
|
|
|
|
|
* _base_check_ioc_facts_changes - Look for increase/decrease of IOCFacts
|
|
|
|
|
* attributes during online firmware upgrade and update the corresponding
|
|
|
|
|
* IOC variables accordingly.
|
|
|
|
|
*
|
|
|
|
|
* @ioc: Pointer to MPT_ADAPTER structure
|
|
|
|
|
*/
|
|
|
|
|
static int
|
|
|
|
|
_base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
|
{
|
|
|
|
|
u16 pd_handles_sz;
|
|
|
|
|
void *pd_handles = NULL, *blocking_handles = NULL;
|
|
|
|
|
void *pend_os_device_add = NULL, *device_remove_in_progress = NULL;
|
|
|
|
|
struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts;
|
|
|
|
|
|
|
|
|
|
if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) {
|
|
|
|
|
pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
|
|
|
|
|
if (ioc->facts.MaxDevHandle % 8)
|
|
|
|
|
pd_handles_sz++;
|
|
|
|
|
|
|
|
|
|
pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
|
|
|
|
|
GFP_KERNEL);
|
|
|
|
|
if (!pd_handles) {
|
|
|
|
|
ioc_info(ioc,
|
|
|
|
|
"Unable to allocate the memory for pd_handles of sz: %d\n",
|
|
|
|
|
pd_handles_sz);
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
memset(pd_handles + ioc->pd_handles_sz, 0,
|
|
|
|
|
(pd_handles_sz - ioc->pd_handles_sz));
|
|
|
|
|
ioc->pd_handles = pd_handles;
|
|
|
|
|
|
|
|
|
|
blocking_handles = krealloc(ioc->blocking_handles,
|
|
|
|
|
pd_handles_sz, GFP_KERNEL);
|
|
|
|
|
if (!blocking_handles) {
|
|
|
|
|
ioc_info(ioc,
|
|
|
|
|
"Unable to allocate the memory for "
|
|
|
|
|
"blocking_handles of sz: %d\n",
|
|
|
|
|
pd_handles_sz);
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
memset(blocking_handles + ioc->pd_handles_sz, 0,
|
|
|
|
|
(pd_handles_sz - ioc->pd_handles_sz));
|
|
|
|
|
ioc->blocking_handles = blocking_handles;
|
|
|
|
|
ioc->pd_handles_sz = pd_handles_sz;
|
|
|
|
|
|
|
|
|
|
pend_os_device_add = krealloc(ioc->pend_os_device_add,
|
|
|
|
|
pd_handles_sz, GFP_KERNEL);
|
|
|
|
|
if (!pend_os_device_add) {
|
|
|
|
|
ioc_info(ioc,
|
|
|
|
|
"Unable to allocate the memory for pend_os_device_add of sz: %d\n",
|
|
|
|
|
pd_handles_sz);
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0,
|
|
|
|
|
(pd_handles_sz - ioc->pend_os_device_add_sz));
|
|
|
|
|
ioc->pend_os_device_add = pend_os_device_add;
|
|
|
|
|
ioc->pend_os_device_add_sz = pd_handles_sz;
|
|
|
|
|
|
|
|
|
|
device_remove_in_progress = krealloc(
|
|
|
|
|
ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL);
|
|
|
|
|
if (!device_remove_in_progress) {
|
|
|
|
|
ioc_info(ioc,
|
|
|
|
|
"Unable to allocate the memory for "
|
|
|
|
|
"device_remove_in_progress of sz: %d\n "
|
|
|
|
|
, pd_handles_sz);
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
memset(device_remove_in_progress +
|
|
|
|
|
ioc->device_remove_in_progress_sz, 0,
|
|
|
|
|
(pd_handles_sz - ioc->device_remove_in_progress_sz));
|
|
|
|
|
ioc->device_remove_in_progress = device_remove_in_progress;
|
|
|
|
|
ioc->device_remove_in_progress_sz = pd_handles_sz;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts));
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-30 10:14:21 +08:00
|
|
|
|
/**
|
|
|
|
|
* mpt3sas_base_hard_reset_handler - reset controller
|
|
|
|
|
* @ioc: Pointer to MPT_ADAPTER structure
|
|
|
|
|
* @type: FORCE_BIG_HAMMER or SOFT_RESET
|
|
|
|
|
*
|
2018-06-16 05:42:01 +08:00
|
|
|
|
* Return: 0 for success, non-zero for failure.
|
2012-11-30 10:14:21 +08:00
|
|
|
|
*/
|
|
|
|
|
int
|
2016-07-29 12:38:21 +08:00
|
|
|
|
mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
|
2012-11-30 10:14:21 +08:00
|
|
|
|
enum reset_type type)
|
|
|
|
|
{
|
|
|
|
|
int r;
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
u32 ioc_state;
|
|
|
|
|
u8 is_fault = 0, is_trigger = 0;
|
|
|
|
|
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
if (ioc->pci_error_recovery) {
|
2018-09-17 23:01:09 +08:00
|
|
|
|
ioc_err(ioc, "%s: pci error recovery reset\n", __func__);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
r = 0;
|
|
|
|
|
goto out_unlocked;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (mpt3sas_fwfault_debug)
|
|
|
|
|
mpt3sas_halt_firmware(ioc);
|
|
|
|
|
|
|
|
|
|
/* wait for an active reset in progress to complete */
|
2018-06-16 05:41:59 +08:00
|
|
|
|
mutex_lock(&ioc->reset_in_progress_mutex);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
|
|
|
|
|
ioc->shost_recovery = 1;
|
|
|
|
|
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
|
|
|
|
|
|
|
|
|
|
if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
|
|
|
|
|
MPT3_DIAG_BUFFER_IS_REGISTERED) &&
|
|
|
|
|
(!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
|
|
|
|
|
MPT3_DIAG_BUFFER_IS_RELEASED))) {
|
|
|
|
|
is_trigger = 1;
|
|
|
|
|
ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
|
2019-12-26 19:13:28 +08:00
|
|
|
|
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT ||
|
|
|
|
|
(ioc_state & MPI2_IOC_STATE_MASK) ==
|
|
|
|
|
MPI2_IOC_STATE_COREDUMP)
|
2012-11-30 10:14:21 +08:00
|
|
|
|
is_fault = 1;
|
|
|
|
|
}
|
2018-06-16 05:42:00 +08:00
|
|
|
|
_base_pre_reset_handler(ioc);
|
2018-02-17 06:39:58 +08:00
|
|
|
|
mpt3sas_wait_for_commands_to_complete(ioc);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
_base_mask_interrupts(ioc);
|
2016-07-29 12:38:21 +08:00
|
|
|
|
r = _base_make_ioc_ready(ioc, type);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (r)
|
|
|
|
|
goto out;
|
2019-12-26 19:13:26 +08:00
|
|
|
|
_base_clear_outstanding_commands(ioc);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
/* If this hard reset is called while port enable is active, then
|
|
|
|
|
* there is no reason to call make_ioc_operational
|
|
|
|
|
*/
|
|
|
|
|
if (ioc->is_driver_loading && ioc->port_enable_failed) {
|
|
|
|
|
ioc->remove_host = 1;
|
|
|
|
|
r = -EFAULT;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
2016-07-29 12:38:21 +08:00
|
|
|
|
r = _base_get_ioc_facts(ioc);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (r)
|
|
|
|
|
goto out;
|
2014-09-12 18:05:31 +08:00
|
|
|
|
|
2019-08-03 21:59:48 +08:00
|
|
|
|
r = _base_check_ioc_facts_changes(ioc);
|
|
|
|
|
if (r) {
|
|
|
|
|
ioc_info(ioc,
|
|
|
|
|
"Some of the parameters got changed in this new firmware"
|
|
|
|
|
" image and it requires system reboot\n");
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
2014-09-12 18:05:31 +08:00
|
|
|
|
if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
|
|
|
|
|
panic("%s: Issue occurred with flashing controller firmware."
|
|
|
|
|
"Please reboot the system and ensure that the correct"
|
|
|
|
|
" firmware version is running\n", ioc->name);
|
|
|
|
|
|
2016-07-29 12:38:21 +08:00
|
|
|
|
r = _base_make_ioc_operational(ioc);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
if (!r)
|
2018-06-16 05:42:00 +08:00
|
|
|
|
_base_reset_done_handler(ioc);
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
out:
|
2019-12-26 19:13:30 +08:00
|
|
|
|
ioc_info(ioc, "%s: %s\n", __func__, r == 0 ? "SUCCESS" : "FAILED");
|
2012-11-30 10:14:21 +08:00
|
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
|
|
|
|
|
ioc->shost_recovery = 0;
|
|
|
|
|
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
|
|
|
|
|
ioc->ioc_reset_count++;
|
|
|
|
|
mutex_unlock(&ioc->reset_in_progress_mutex);
|
|
|
|
|
|
|
|
|
|
out_unlocked:
|
|
|
|
|
if ((r == 0) && is_trigger) {
|
|
|
|
|
if (is_fault)
|
|
|
|
|
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
|
|
|
|
|
else
|
|
|
|
|
mpt3sas_trigger_master(ioc,
|
|
|
|
|
MASTER_TRIGGER_ADAPTER_RESET);
|
|
|
|
|
}
|
2018-09-17 23:01:09 +08:00
|
|
|
|
dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__));
|
2012-11-30 10:14:21 +08:00
|
|
|
|
return r;
|
|
|
|
|
}
|