2019-05-27 14:55:01 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2015-11-18 00:50:30 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2015 Linaro Ltd.
|
|
|
|
* Copyright (c) 2015 Hisilicon Limited.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _HISI_SAS_H_
|
|
|
|
#define _HISI_SAS_H_
|
|
|
|
|
2016-02-04 02:26:08 +08:00
|
|
|
#include <linux/acpi.h>
|
2021-06-07 17:29:36 +08:00
|
|
|
#include <linux/async.h>
|
|
|
|
#include <linux/blk-mq.h>
|
|
|
|
#include <linux/blk-mq-pci.h>
|
2016-10-04 19:11:11 +08:00
|
|
|
#include <linux/clk.h>
|
2018-12-19 23:56:39 +08:00
|
|
|
#include <linux/debugfs.h>
|
2015-11-18 00:50:30 +08:00
|
|
|
#include <linux/dmapool.h>
|
2017-08-11 00:09:40 +08:00
|
|
|
#include <linux/iopoll.h>
|
scsi: hisi_sas: Pre-allocate slot DMA buffers
Currently the driver spends much time allocating and freeing the slot DMA
buffer for command delivery/completion. To boost the performance,
pre-allocate the buffers for all IPTT. The downside of this approach is
that we are reallocating all buffer memory upfront, so hog memory which we
may not need.
However, the current method - DMA buffer pool - also caches all buffers and
does not free them until the pool is destroyed, so is not exactly efficient
either.
On top of this, since the slot DMA buffer is slightly bigger than a 4K
page, we need to allocate 2x4K pages per buffer (for 4K page kernel), which
is quite wasteful. For 64K page size this is not such an issue.
So, for the 4K page case, in order to make memory usage more efficient,
pre-allocating larger blocks of DMA memory for the buffers can be more
efficient.
To make DMA memory usage most efficient, we would choose a single
contiguous DMA memory block, but this could use up all the DMA memory in
the system (when CMA enabled and no IOMMU), or we may just not be able to
allocate a DMA buffer large enough when no CMA or IOMMU.
To decide the block size we use the LCM (least common multiple) of the
buffer size and the page size. We roundup(64) to ensure the LCM is not too
large, even though a little memory may be wasted per block.
So, with this, the total memory requirement is about is about 17MB for 4096
max IPTT.
Previously (for 4K pages case), it would be 32MB (for all slots
allocated).
With this change, the relative increase of IOPS for bs=4K read when
PAGE_SIZE=4K and PAGE_SIZE=64K is as follows:
IODEPTH 4K PAGE_SIZE 64K PAGE_SIZE
32 56% 47%
64 53% 44%
128 64% 43%
256 67% 45%
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-05-31 20:50:48 +08:00
|
|
|
#include <linux/lcm.h>
|
2019-02-28 22:51:01 +08:00
|
|
|
#include <linux/libata.h>
|
2015-11-18 00:50:30 +08:00
|
|
|
#include <linux/mfd/syscon.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/of_address.h>
|
2017-06-14 23:33:17 +08:00
|
|
|
#include <linux/pci.h>
|
2015-11-18 00:50:30 +08:00
|
|
|
#include <linux/platform_device.h>
|
2024-06-12 13:13:20 +08:00
|
|
|
#include <linux/pm_runtime.h>
|
2016-02-04 02:26:08 +08:00
|
|
|
#include <linux/property.h>
|
2015-11-18 00:50:30 +08:00
|
|
|
#include <linux/regmap.h>
|
2024-06-12 13:13:20 +08:00
|
|
|
#include <linux/timer.h>
|
2016-01-26 02:47:20 +08:00
|
|
|
#include <scsi/sas_ata.h>
|
2015-11-18 00:50:30 +08:00
|
|
|
#include <scsi/libsas.h>
|
|
|
|
|
2015-11-18 00:50:31 +08:00
|
|
|
#define HISI_SAS_MAX_PHYS 9
|
2015-11-18 00:50:34 +08:00
|
|
|
#define HISI_SAS_MAX_QUEUES 32
|
2019-02-06 18:52:53 +08:00
|
|
|
#define HISI_SAS_QUEUE_SLOTS 4096
|
2017-10-24 23:51:35 +08:00
|
|
|
#define HISI_SAS_MAX_ITCT_ENTRIES 1024
|
2015-11-18 00:50:31 +08:00
|
|
|
#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
|
2017-03-23 01:25:18 +08:00
|
|
|
#define HISI_SAS_RESET_BIT 0
|
2017-08-11 00:09:26 +08:00
|
|
|
#define HISI_SAS_REJECT_CMD_BIT 1
|
2024-06-12 13:13:20 +08:00
|
|
|
#define HISI_SAS_PM_BIT 2
|
|
|
|
#define HISI_SAS_HW_FAULT_BIT 3
|
2019-08-05 21:47:58 +08:00
|
|
|
#define HISI_SAS_MAX_COMMANDS (HISI_SAS_QUEUE_SLOTS)
|
|
|
|
#define HISI_SAS_RESERVED_IPTT 96
|
|
|
|
#define HISI_SAS_UNRESERVED_IPTT \
|
|
|
|
(HISI_SAS_MAX_COMMANDS - HISI_SAS_RESERVED_IPTT)
|
2015-11-18 00:50:31 +08:00
|
|
|
|
2019-08-05 21:48:01 +08:00
|
|
|
#define HISI_SAS_IOST_ITCT_CACHE_NUM 64
|
|
|
|
#define HISI_SAS_IOST_ITCT_CACHE_DW_SZ 10
|
2024-06-12 13:13:20 +08:00
|
|
|
#define HISI_SAS_FIFO_DATA_DW_SIZE 32
|
2019-08-05 21:48:01 +08:00
|
|
|
|
2017-06-29 21:02:14 +08:00
|
|
|
#define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer))
|
|
|
|
#define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table))
|
|
|
|
|
|
|
|
#define hisi_sas_status_buf_addr(buf) \
|
2019-02-06 18:52:51 +08:00
|
|
|
((buf) + offsetof(struct hisi_sas_slot_buf_table, status_buffer))
|
|
|
|
#define hisi_sas_status_buf_addr_mem(slot) hisi_sas_status_buf_addr((slot)->buf)
|
2017-06-29 21:02:14 +08:00
|
|
|
#define hisi_sas_status_buf_addr_dma(slot) \
|
2019-02-06 18:52:51 +08:00
|
|
|
hisi_sas_status_buf_addr((slot)->buf_dma)
|
2017-06-29 21:02:14 +08:00
|
|
|
|
|
|
|
#define hisi_sas_cmd_hdr_addr(buf) \
|
2019-02-06 18:52:51 +08:00
|
|
|
((buf) + offsetof(struct hisi_sas_slot_buf_table, command_header))
|
|
|
|
#define hisi_sas_cmd_hdr_addr_mem(slot) hisi_sas_cmd_hdr_addr((slot)->buf)
|
|
|
|
#define hisi_sas_cmd_hdr_addr_dma(slot) hisi_sas_cmd_hdr_addr((slot)->buf_dma)
|
2017-06-29 21:02:14 +08:00
|
|
|
|
|
|
|
#define hisi_sas_sge_addr(buf) \
|
2019-02-06 18:52:51 +08:00
|
|
|
((buf) + offsetof(struct hisi_sas_slot_buf_table, sge_page))
|
|
|
|
#define hisi_sas_sge_addr_mem(slot) hisi_sas_sge_addr((slot)->buf)
|
|
|
|
#define hisi_sas_sge_addr_dma(slot) hisi_sas_sge_addr((slot)->buf_dma)
|
|
|
|
|
|
|
|
#define hisi_sas_sge_dif_addr(buf) \
|
|
|
|
((buf) + offsetof(struct hisi_sas_slot_dif_buf_table, sge_dif_page))
|
|
|
|
#define hisi_sas_sge_dif_addr_mem(slot) hisi_sas_sge_dif_addr((slot)->buf)
|
|
|
|
#define hisi_sas_sge_dif_addr_dma(slot) hisi_sas_sge_dif_addr((slot)->buf_dma)
|
2015-11-18 00:50:34 +08:00
|
|
|
|
2015-11-18 00:50:49 +08:00
|
|
|
#define HISI_SAS_MAX_SSP_RESP_SZ (sizeof(struct ssp_frame_hdr) + 1024)
|
2015-11-18 00:50:54 +08:00
|
|
|
#define HISI_SAS_MAX_SMP_RESP_SZ 1028
|
2016-01-26 02:47:20 +08:00
|
|
|
#define HISI_SAS_MAX_STP_RESP_SZ 28
|
2015-11-18 00:50:49 +08:00
|
|
|
|
2017-06-14 23:33:14 +08:00
|
|
|
#define HISI_SAS_SATA_PROTOCOL_NONDATA 0x1
|
|
|
|
#define HISI_SAS_SATA_PROTOCOL_PIO 0x2
|
|
|
|
#define HISI_SAS_SATA_PROTOCOL_DMA 0x4
|
|
|
|
#define HISI_SAS_SATA_PROTOCOL_FPDMA 0x8
|
|
|
|
#define HISI_SAS_SATA_PROTOCOL_ATAPI 0x10
|
|
|
|
|
2018-12-17 22:40:07 +08:00
|
|
|
#define HISI_SAS_DIF_PROT_MASK (SHOST_DIF_TYPE1_PROTECTION | \
|
|
|
|
SHOST_DIF_TYPE2_PROTECTION | \
|
|
|
|
SHOST_DIF_TYPE3_PROTECTION)
|
|
|
|
|
2019-02-06 18:52:51 +08:00
|
|
|
#define HISI_SAS_DIX_PROT_MASK (SHOST_DIX_TYPE1_PROTECTION | \
|
|
|
|
SHOST_DIX_TYPE2_PROTECTION | \
|
|
|
|
SHOST_DIX_TYPE3_PROTECTION)
|
|
|
|
|
|
|
|
#define HISI_SAS_PROT_MASK (HISI_SAS_DIF_PROT_MASK | HISI_SAS_DIX_PROT_MASK)
|
2018-12-17 22:40:07 +08:00
|
|
|
|
2024-06-12 13:13:20 +08:00
|
|
|
#define HISI_SAS_WAIT_PHYUP_TIMEOUT (30 * HZ)
|
|
|
|
#define HISI_SAS_CLEAR_ITCT_TIMEOUT (20 * HZ)
|
2019-01-25 22:22:35 +08:00
|
|
|
|
2015-11-18 00:50:51 +08:00
|
|
|
struct hisi_hba;
|
2015-11-18 00:50:41 +08:00
|
|
|
|
2015-11-18 00:50:47 +08:00
|
|
|
enum {
|
|
|
|
PORT_TYPE_SAS = (1U << 1),
|
|
|
|
PORT_TYPE_SATA = (1U << 0),
|
|
|
|
};
|
|
|
|
|
2019-02-28 22:51:01 +08:00
|
|
|
enum dev_status {
|
|
|
|
HISI_SAS_DEV_INIT,
|
|
|
|
HISI_SAS_DEV_NORMAL,
|
2024-06-12 13:13:20 +08:00
|
|
|
HISI_SAS_DEV_NCQ_ERR,
|
2019-02-28 22:51:01 +08:00
|
|
|
};
|
|
|
|
|
2016-08-24 19:05:47 +08:00
|
|
|
enum {
|
|
|
|
HISI_SAS_INT_ABT_CMD = 0,
|
|
|
|
HISI_SAS_INT_ABT_DEV = 1,
|
|
|
|
};
|
|
|
|
|
2015-11-18 00:50:51 +08:00
|
|
|
enum hisi_sas_dev_type {
|
|
|
|
HISI_SAS_DEV_TYPE_STP = 0,
|
|
|
|
HISI_SAS_DEV_TYPE_SSP,
|
|
|
|
HISI_SAS_DEV_TYPE_SATA,
|
|
|
|
};
|
|
|
|
|
2017-08-11 00:09:30 +08:00
|
|
|
struct hisi_sas_hw_error {
|
|
|
|
u32 irq_msk;
|
|
|
|
u32 msk;
|
|
|
|
int shift;
|
|
|
|
const char *msg;
|
|
|
|
int reg;
|
2017-10-24 23:51:39 +08:00
|
|
|
const struct hisi_sas_hw_error *sub;
|
2017-08-11 00:09:30 +08:00
|
|
|
};
|
|
|
|
|
2017-12-09 01:16:38 +08:00
|
|
|
struct hisi_sas_rst {
|
|
|
|
struct hisi_hba *hisi_hba;
|
|
|
|
struct completion *completion;
|
|
|
|
struct work_struct work;
|
|
|
|
bool done;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define HISI_SAS_RST_WORK_INIT(r, c) \
|
|
|
|
{ .hisi_hba = hisi_hba, \
|
|
|
|
.completion = &c, \
|
|
|
|
.work = __WORK_INITIALIZER(r.work, \
|
|
|
|
hisi_sas_sync_rst_work_handler), \
|
|
|
|
.done = false, \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define HISI_SAS_DECLARE_RST_WORK_ON_STACK(r) \
|
|
|
|
DECLARE_COMPLETION_ONSTACK(c); \
|
|
|
|
struct hisi_sas_rst r = HISI_SAS_RST_WORK_INIT(r, c)
|
|
|
|
|
|
|
|
enum hisi_sas_bit_err_type {
|
|
|
|
HISI_SAS_ERR_SINGLE_BIT_ECC = 0x0,
|
|
|
|
HISI_SAS_ERR_MULTI_BIT_ECC = 0x1,
|
|
|
|
};
|
|
|
|
|
2017-12-09 01:16:44 +08:00
|
|
|
enum hisi_sas_phy_event {
|
|
|
|
HISI_PHYE_PHY_UP = 0U,
|
2017-12-09 01:16:45 +08:00
|
|
|
HISI_PHYE_LINK_RESET,
|
2024-06-12 13:13:20 +08:00
|
|
|
HISI_PHYE_PHY_UP_PM,
|
2017-12-09 01:16:44 +08:00
|
|
|
HISI_PHYES_NUM,
|
|
|
|
};
|
|
|
|
|
2024-06-12 13:13:20 +08:00
|
|
|
struct hisi_sas_debugfs_fifo {
|
|
|
|
u32 signal_sel;
|
|
|
|
u32 dump_msk;
|
|
|
|
u32 dump_mode;
|
|
|
|
u32 trigger;
|
|
|
|
u32 trigger_msk;
|
|
|
|
u32 trigger_mode;
|
|
|
|
u32 rd_data[HISI_SAS_FIFO_DATA_DW_SIZE];
|
|
|
|
};
|
|
|
|
|
2015-11-18 00:50:31 +08:00
|
|
|
struct hisi_sas_phy {
|
2017-12-09 01:16:44 +08:00
|
|
|
struct work_struct works[HISI_PHYES_NUM];
|
2015-11-18 00:50:42 +08:00
|
|
|
struct hisi_hba *hisi_hba;
|
|
|
|
struct hisi_sas_port *port;
|
2015-11-18 00:50:31 +08:00
|
|
|
struct asd_sas_phy sas_phy;
|
2015-11-18 00:50:42 +08:00
|
|
|
struct sas_identify identify;
|
2018-05-21 18:09:25 +08:00
|
|
|
struct completion *reset_completion;
|
2019-01-25 22:22:35 +08:00
|
|
|
struct timer_list timer;
|
2018-05-21 18:09:25 +08:00
|
|
|
spinlock_t lock;
|
2015-11-18 00:50:42 +08:00
|
|
|
u64 port_id; /* from hw */
|
|
|
|
u64 frame_rcvd_size;
|
|
|
|
u8 frame_rcvd[32];
|
|
|
|
u8 phy_attached;
|
2018-05-21 18:09:25 +08:00
|
|
|
u8 in_reset;
|
|
|
|
u8 reserved[2];
|
2017-06-26 18:27:28 +08:00
|
|
|
u32 phy_type;
|
2019-02-28 22:50:59 +08:00
|
|
|
u32 code_violation_err_count;
|
2015-11-18 00:50:42 +08:00
|
|
|
enum sas_linkrate minimum_linkrate;
|
|
|
|
enum sas_linkrate maximum_linkrate;
|
2019-04-11 20:46:38 +08:00
|
|
|
int enable;
|
2024-06-12 13:13:20 +08:00
|
|
|
int wait_phyup_cnt;
|
|
|
|
atomic_t down_cnt;
|
|
|
|
|
|
|
|
/* Trace FIFO */
|
|
|
|
struct hisi_sas_debugfs_fifo fifo;
|
2015-11-18 00:50:31 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct hisi_sas_port {
|
|
|
|
struct asd_sas_port sas_port;
|
2015-11-18 00:50:42 +08:00
|
|
|
u8 port_attached;
|
|
|
|
u8 id; /* from hw */
|
2015-11-18 00:50:31 +08:00
|
|
|
};
|
|
|
|
|
2015-11-18 00:50:37 +08:00
|
|
|
struct hisi_sas_cq {
|
|
|
|
struct hisi_hba *hisi_hba;
|
2024-06-12 13:13:20 +08:00
|
|
|
const struct cpumask *irq_mask;
|
2016-09-06 23:36:11 +08:00
|
|
|
int rd_point;
|
2015-11-18 00:50:37 +08:00
|
|
|
int id;
|
2024-06-12 13:13:20 +08:00
|
|
|
int irq_no;
|
2015-11-18 00:50:37 +08:00
|
|
|
};
|
|
|
|
|
2016-09-06 23:36:12 +08:00
|
|
|
struct hisi_sas_dq {
|
|
|
|
struct hisi_hba *hisi_hba;
|
2018-05-09 23:10:48 +08:00
|
|
|
struct list_head list;
|
2017-06-14 23:33:13 +08:00
|
|
|
spinlock_t lock;
|
2016-09-06 23:36:12 +08:00
|
|
|
int wr_point;
|
|
|
|
int id;
|
|
|
|
};
|
|
|
|
|
2015-11-18 00:50:41 +08:00
|
|
|
struct hisi_sas_device {
|
2015-11-18 00:50:51 +08:00
|
|
|
struct hisi_hba *hisi_hba;
|
|
|
|
struct domain_device *sas_device;
|
2017-08-11 00:09:33 +08:00
|
|
|
struct completion *completion;
|
2017-06-14 23:33:13 +08:00
|
|
|
struct hisi_sas_dq *dq;
|
2017-06-14 23:33:12 +08:00
|
|
|
struct list_head list;
|
|
|
|
enum sas_device_type dev_type;
|
2019-02-28 22:51:01 +08:00
|
|
|
enum dev_status dev_status;
|
2017-06-14 23:33:12 +08:00
|
|
|
int device_id;
|
2017-04-10 21:21:57 +08:00
|
|
|
int sata_idx;
|
2019-02-06 18:52:55 +08:00
|
|
|
spinlock_t lock; /* For protecting slots */
|
2015-11-18 00:50:41 +08:00
|
|
|
};
|
|
|
|
|
2018-05-21 18:09:21 +08:00
|
|
|
struct hisi_sas_tmf_task {
|
2018-05-21 18:09:22 +08:00
|
|
|
int force_phy;
|
|
|
|
int phy_id;
|
2018-05-21 18:09:21 +08:00
|
|
|
u8 tmf;
|
|
|
|
u16 tag_of_task_to_be_managed;
|
|
|
|
};
|
|
|
|
|
2015-11-18 00:50:34 +08:00
|
|
|
struct hisi_sas_slot {
|
2015-11-18 00:50:49 +08:00
|
|
|
struct list_head entry;
|
2018-05-09 23:10:48 +08:00
|
|
|
struct list_head delivery;
|
2015-11-18 00:50:49 +08:00
|
|
|
struct sas_task *task;
|
|
|
|
struct hisi_sas_port *port;
|
|
|
|
u64 n_elem;
|
2019-02-06 18:52:51 +08:00
|
|
|
u64 n_elem_dif;
|
2015-11-18 00:50:49 +08:00
|
|
|
int dlvry_queue;
|
|
|
|
int dlvry_queue_slot;
|
2015-11-18 00:50:50 +08:00
|
|
|
int cmplt_queue;
|
|
|
|
int cmplt_queue_slot;
|
2016-02-25 17:42:11 +08:00
|
|
|
int abort;
|
2018-05-09 23:10:48 +08:00
|
|
|
int ready;
|
2019-02-06 18:52:55 +08:00
|
|
|
int device_id;
|
2015-11-18 00:50:49 +08:00
|
|
|
void *cmd_hdr;
|
|
|
|
dma_addr_t cmd_hdr_dma;
|
2017-04-10 21:21:59 +08:00
|
|
|
struct timer_list internal_abort_timer;
|
2018-05-02 23:56:26 +08:00
|
|
|
bool is_internal;
|
2018-05-21 18:09:21 +08:00
|
|
|
struct hisi_sas_tmf_task *tmf;
|
scsi: hisi_sas: Pre-allocate slot DMA buffers
Currently the driver spends much time allocating and freeing the slot DMA
buffer for command delivery/completion. To boost the performance,
pre-allocate the buffers for all IPTT. The downside of this approach is
that we are reallocating all buffer memory upfront, so hog memory which we
may not need.
However, the current method - DMA buffer pool - also caches all buffers and
does not free them until the pool is destroyed, so is not exactly efficient
either.
On top of this, since the slot DMA buffer is slightly bigger than a 4K
page, we need to allocate 2x4K pages per buffer (for 4K page kernel), which
is quite wasteful. For 64K page size this is not such an issue.
So, for the 4K page case, in order to make memory usage more efficient,
pre-allocating larger blocks of DMA memory for the buffers can be more
efficient.
To make DMA memory usage most efficient, we would choose a single
contiguous DMA memory block, but this could use up all the DMA memory in
the system (when CMA enabled and no IOMMU), or we may just not be able to
allocate a DMA buffer large enough when no CMA or IOMMU.
To decide the block size we use the LCM (least common multiple) of the
buffer size and the page size. We roundup(64) to ensure the LCM is not too
large, even though a little memory may be wasted per block.
So, with this, the total memory requirement is about is about 17MB for 4096
max IPTT.
Previously (for 4K pages case), it would be 32MB (for all slots
allocated).
With this change, the relative increase of IOPS for bs=4K read when
PAGE_SIZE=4K and PAGE_SIZE=64K is as follows:
IODEPTH 4K PAGE_SIZE 64K PAGE_SIZE
32 56% 47%
64 53% 44%
128 64% 43%
256 67% 45%
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-05-31 20:50:48 +08:00
|
|
|
/* Do not reorder/change members after here */
|
|
|
|
void *buf;
|
|
|
|
dma_addr_t buf_dma;
|
2018-12-06 21:34:40 +08:00
|
|
|
u16 idx;
|
2015-11-18 00:50:34 +08:00
|
|
|
};
|
|
|
|
|
2019-08-05 21:48:01 +08:00
|
|
|
struct hisi_sas_iost_itct_cache {
|
|
|
|
u32 data[HISI_SAS_IOST_ITCT_CACHE_DW_SZ];
|
|
|
|
};
|
|
|
|
|
2019-08-05 21:48:02 +08:00
|
|
|
enum hisi_sas_debugfs_reg_array_member {
|
|
|
|
DEBUGFS_GLOBAL = 0,
|
|
|
|
DEBUGFS_AXI,
|
|
|
|
DEBUGFS_RAS,
|
|
|
|
DEBUGFS_REGS_NUM
|
|
|
|
};
|
|
|
|
|
2019-08-05 21:48:01 +08:00
|
|
|
enum hisi_sas_debugfs_cache_type {
|
|
|
|
HISI_SAS_ITCT_CACHE,
|
|
|
|
HISI_SAS_IOST_CACHE,
|
|
|
|
};
|
|
|
|
|
2024-06-12 13:13:20 +08:00
|
|
|
enum hisi_sas_debugfs_bist_ffe_cfg {
|
|
|
|
FFE_SAS_1_5_GBPS,
|
|
|
|
FFE_SAS_3_0_GBPS,
|
|
|
|
FFE_SAS_6_0_GBPS,
|
|
|
|
FFE_SAS_12_0_GBPS,
|
|
|
|
FFE_RESV,
|
|
|
|
FFE_SATA_1_5_GBPS,
|
|
|
|
FFE_SATA_3_0_GBPS,
|
|
|
|
FFE_SATA_6_0_GBPS,
|
|
|
|
FFE_CFG_MAX
|
|
|
|
};
|
|
|
|
|
|
|
|
enum hisi_sas_debugfs_bist_fixed_code {
|
|
|
|
FIXED_CODE,
|
|
|
|
FIXED_CODE_1,
|
|
|
|
FIXED_CODE_MAX
|
|
|
|
};
|
|
|
|
|
|
|
|
enum {
|
|
|
|
HISI_SAS_BIST_CODE_MODE_PRBS7,
|
|
|
|
HISI_SAS_BIST_CODE_MODE_PRBS23,
|
|
|
|
HISI_SAS_BIST_CODE_MODE_PRBS31,
|
|
|
|
HISI_SAS_BIST_CODE_MODE_JTPAT,
|
|
|
|
HISI_SAS_BIST_CODE_MODE_CJTPAT,
|
|
|
|
HISI_SAS_BIST_CODE_MODE_SCRAMBED_0,
|
|
|
|
HISI_SAS_BIST_CODE_MODE_TRAIN,
|
|
|
|
HISI_SAS_BIST_CODE_MODE_TRAIN_DONE,
|
|
|
|
HISI_SAS_BIST_CODE_MODE_HFTP,
|
|
|
|
HISI_SAS_BIST_CODE_MODE_MFTP,
|
|
|
|
HISI_SAS_BIST_CODE_MODE_LFTP,
|
|
|
|
HISI_SAS_BIST_CODE_MODE_FIXED_DATA,
|
|
|
|
};
|
|
|
|
|
2015-11-18 00:50:31 +08:00
|
|
|
struct hisi_sas_hw {
|
2015-11-18 00:50:46 +08:00
|
|
|
int (*hw_init)(struct hisi_hba *hisi_hba);
|
2015-11-18 00:50:51 +08:00
|
|
|
void (*setup_itct)(struct hisi_hba *hisi_hba,
|
|
|
|
struct hisi_sas_device *device);
|
2018-09-24 23:06:33 +08:00
|
|
|
int (*slot_index_alloc)(struct hisi_hba *hisi_hba,
|
2016-04-15 21:36:36 +08:00
|
|
|
struct domain_device *device);
|
|
|
|
struct hisi_sas_device *(*alloc_dev)(struct domain_device *device);
|
2019-01-25 22:22:30 +08:00
|
|
|
void (*sl_notify_ssp)(struct hisi_hba *hisi_hba, int phy_no);
|
2017-06-14 23:33:13 +08:00
|
|
|
void (*start_delivery)(struct hisi_sas_dq *dq);
|
2018-05-09 23:10:46 +08:00
|
|
|
void (*prep_ssp)(struct hisi_hba *hisi_hba,
|
2018-05-21 18:09:21 +08:00
|
|
|
struct hisi_sas_slot *slot);
|
2018-05-09 23:10:46 +08:00
|
|
|
void (*prep_smp)(struct hisi_hba *hisi_hba,
|
2015-11-18 00:50:54 +08:00
|
|
|
struct hisi_sas_slot *slot);
|
2018-05-09 23:10:46 +08:00
|
|
|
void (*prep_stp)(struct hisi_hba *hisi_hba,
|
2016-01-26 02:47:20 +08:00
|
|
|
struct hisi_sas_slot *slot);
|
2018-05-09 23:10:46 +08:00
|
|
|
void (*prep_abort)(struct hisi_hba *hisi_hba,
|
2016-08-24 19:05:47 +08:00
|
|
|
struct hisi_sas_slot *slot,
|
|
|
|
int device_id, int abort_flag, int tag_to_abort);
|
2017-03-23 01:25:19 +08:00
|
|
|
void (*phys_init)(struct hisi_hba *hisi_hba);
|
2017-10-24 23:51:36 +08:00
|
|
|
void (*phy_start)(struct hisi_hba *hisi_hba, int phy_no);
|
2015-11-18 00:50:57 +08:00
|
|
|
void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no);
|
|
|
|
void (*phy_hard_reset)(struct hisi_hba *hisi_hba, int phy_no);
|
2017-08-11 00:09:29 +08:00
|
|
|
void (*get_events)(struct hisi_hba *hisi_hba, int phy_no);
|
2016-11-07 20:48:40 +08:00
|
|
|
void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no,
|
|
|
|
struct sas_phy_linkrates *linkrates);
|
|
|
|
enum sas_linkrate (*phy_get_max_linkrate)(void);
|
2024-06-12 13:13:20 +08:00
|
|
|
int (*clear_itct)(struct hisi_hba *hisi_hba,
|
|
|
|
struct hisi_sas_device *dev);
|
2017-12-09 01:16:34 +08:00
|
|
|
void (*free_device)(struct hisi_sas_device *sas_dev);
|
2015-11-18 00:50:52 +08:00
|
|
|
int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id);
|
2017-06-14 23:33:32 +08:00
|
|
|
void (*dereg_device)(struct hisi_hba *hisi_hba,
|
|
|
|
struct domain_device *device);
|
2017-03-23 01:25:18 +08:00
|
|
|
int (*soft_reset)(struct hisi_hba *hisi_hba);
|
2017-08-11 00:09:26 +08:00
|
|
|
u32 (*get_phys_state)(struct hisi_hba *hisi_hba);
|
2018-01-18 00:46:53 +08:00
|
|
|
int (*write_gpio)(struct hisi_hba *hisi_hba, u8 reg_type,
|
|
|
|
u8 reg_index, u8 reg_count, u8 *write_data);
|
2019-09-06 20:55:34 +08:00
|
|
|
void (*wait_cmds_complete_timeout)(struct hisi_hba *hisi_hba,
|
|
|
|
int delay_ms, int timeout_ms);
|
2024-06-12 13:13:20 +08:00
|
|
|
int (*debugfs_snapshot_regs)(struct hisi_hba *hisi_hba);
|
2015-11-18 00:50:34 +08:00
|
|
|
int complete_hdr_size;
|
2018-05-21 18:09:18 +08:00
|
|
|
struct scsi_host_template *sht;
|
2024-06-12 13:13:20 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#define HISI_SAS_MAX_DEBUGFS_DUMP (50)
|
2018-12-19 23:56:40 +08:00
|
|
|
|
2024-06-12 13:13:20 +08:00
|
|
|
struct hisi_sas_debugfs_cq {
|
|
|
|
struct hisi_sas_cq *cq;
|
|
|
|
void *complete_hdr;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct hisi_sas_debugfs_dq {
|
|
|
|
struct hisi_sas_dq *dq;
|
|
|
|
struct hisi_sas_cmd_hdr *hdr;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct hisi_sas_debugfs_regs {
|
|
|
|
struct hisi_hba *hisi_hba;
|
|
|
|
u32 *data;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct hisi_sas_debugfs_port {
|
|
|
|
struct hisi_sas_phy *phy;
|
|
|
|
u32 *data;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct hisi_sas_debugfs_iost {
|
|
|
|
struct hisi_sas_iost *iost;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct hisi_sas_debugfs_itct {
|
|
|
|
struct hisi_sas_itct *itct;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct hisi_sas_debugfs_iost_cache {
|
|
|
|
struct hisi_sas_iost_itct_cache *cache;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct hisi_sas_debugfs_itct_cache {
|
|
|
|
struct hisi_sas_iost_itct_cache *cache;
|
2015-11-18 00:50:31 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct hisi_hba {
|
|
|
|
/* This must be the first element, used by SHOST_TO_SAS_HA */
|
|
|
|
struct sas_ha_struct *p;
|
|
|
|
|
2017-06-14 23:33:17 +08:00
|
|
|
struct platform_device *platform_dev;
|
|
|
|
struct pci_dev *pci_dev;
|
|
|
|
struct device *dev;
|
|
|
|
|
2018-12-17 22:40:07 +08:00
|
|
|
int prot_mask;
|
|
|
|
|
2015-11-18 00:50:32 +08:00
|
|
|
void __iomem *regs;
|
2018-01-18 00:46:53 +08:00
|
|
|
void __iomem *sgpio_regs;
|
2015-11-18 00:50:32 +08:00
|
|
|
struct regmap *ctrl;
|
|
|
|
u32 ctrl_reset_reg;
|
|
|
|
u32 ctrl_reset_sts_reg;
|
|
|
|
u32 ctrl_clock_ena_reg;
|
2016-10-04 19:11:11 +08:00
|
|
|
u32 refclk_frequency_mhz;
|
2015-11-18 00:50:31 +08:00
|
|
|
u8 sas_addr[SAS_ADDR_SIZE];
|
|
|
|
|
|
|
|
int n_phy;
|
2015-11-18 00:50:43 +08:00
|
|
|
spinlock_t lock;
|
2018-05-31 20:50:44 +08:00
|
|
|
struct semaphore sem;
|
2015-11-18 00:50:31 +08:00
|
|
|
|
2015-11-18 00:50:43 +08:00
|
|
|
struct timer_list timer;
|
2015-11-18 00:50:40 +08:00
|
|
|
struct workqueue_struct *wq;
|
2015-11-18 00:50:36 +08:00
|
|
|
|
|
|
|
int slot_index_count;
|
2018-05-21 18:09:14 +08:00
|
|
|
int last_slot_index;
|
2018-05-21 18:09:15 +08:00
|
|
|
int last_dev_id;
|
2015-11-18 00:50:36 +08:00
|
|
|
unsigned long *slot_index_tags;
|
2017-04-10 21:21:56 +08:00
|
|
|
unsigned long reject_stp_links_msk;
|
2015-11-18 00:50:36 +08:00
|
|
|
|
2015-11-18 00:50:31 +08:00
|
|
|
/* SCSI/SAS glue */
|
|
|
|
struct sas_ha_struct sha;
|
|
|
|
struct Scsi_Host *shost;
|
2015-11-18 00:50:37 +08:00
|
|
|
|
|
|
|
struct hisi_sas_cq cq[HISI_SAS_MAX_QUEUES];
|
2016-09-06 23:36:12 +08:00
|
|
|
struct hisi_sas_dq dq[HISI_SAS_MAX_QUEUES];
|
2015-11-18 00:50:31 +08:00
|
|
|
struct hisi_sas_phy phy[HISI_SAS_MAX_PHYS];
|
|
|
|
struct hisi_sas_port port[HISI_SAS_MAX_PHYS];
|
2015-11-18 00:50:32 +08:00
|
|
|
|
|
|
|
int queue_count;
|
2015-11-18 00:50:34 +08:00
|
|
|
|
2015-11-18 00:50:41 +08:00
|
|
|
struct hisi_sas_device devices[HISI_SAS_MAX_DEVICES];
|
2015-11-18 00:50:34 +08:00
|
|
|
struct hisi_sas_cmd_hdr *cmd_hdr[HISI_SAS_MAX_QUEUES];
|
|
|
|
dma_addr_t cmd_hdr_dma[HISI_SAS_MAX_QUEUES];
|
|
|
|
void *complete_hdr[HISI_SAS_MAX_QUEUES];
|
|
|
|
dma_addr_t complete_hdr_dma[HISI_SAS_MAX_QUEUES];
|
|
|
|
struct hisi_sas_initial_fis *initial_fis;
|
|
|
|
dma_addr_t initial_fis_dma;
|
|
|
|
struct hisi_sas_itct *itct;
|
|
|
|
dma_addr_t itct_dma;
|
|
|
|
struct hisi_sas_iost *iost;
|
|
|
|
dma_addr_t iost_dma;
|
|
|
|
struct hisi_sas_breakpoint *breakpoint;
|
|
|
|
dma_addr_t breakpoint_dma;
|
|
|
|
struct hisi_sas_breakpoint *sata_breakpoint;
|
|
|
|
dma_addr_t sata_breakpoint_dma;
|
|
|
|
struct hisi_sas_slot *slot_info;
|
2017-03-23 01:25:18 +08:00
|
|
|
unsigned long flags;
|
2015-11-18 00:50:31 +08:00
|
|
|
const struct hisi_sas_hw *hw; /* Low level hw interface */
|
2017-04-10 21:21:57 +08:00
|
|
|
unsigned long sata_dev_bitmap[BITS_TO_LONGS(HISI_SAS_MAX_DEVICES)];
|
2017-03-23 01:25:18 +08:00
|
|
|
struct work_struct rst_work;
|
2018-07-18 22:14:28 +08:00
|
|
|
u32 phy_state;
|
2018-11-09 22:06:34 +08:00
|
|
|
u32 intr_coal_ticks; /* Time of interrupt coalesce in us */
|
|
|
|
u32 intr_coal_count; /* Interrupt count to coalesce */
|
2018-12-19 23:56:39 +08:00
|
|
|
|
scsi: hisi_sas: Issue internal abort on all relevant queues
To support queue mapped to a CPU, it needs to be ensured that issuing an
internal abort is safe, in that it is guaranteed that an internal abort is
processed for a single IO or a device after all the relevant command(s)
which it is attempting to abort have been processed by the controller.
Currently we only deliver commands for any device on a single queue to
solve this problem, as we know that commands issued on the same queue will
be processed in order, and we will not have a scenario where the internal
abort is racing against a command(s) which it is trying to abort.
To enqueue commands on queue mapped to a CPU, choosing a queue for an
command is based on the associated queue for the current CPU, so this is
not safe for internal abort since it would definitely not be guaranteed
that commands for the command devices are issued on the same queue.
To solve this issue, we take a bludgeoning approach, and issue a separate
internal abort on any queue(s) relevant to the command or device, in that
we will be guaranteed that at least one of these internal aborts will be
received last in the controller.
So, for aborting a single command, we can just force the internal abort to
be issued on the same queue as the command which we are trying to abort.
For aborting all commands associated with a device, we issue a separate
internal abort on all relevant queues. Issuing multiple internal aborts in
this fashion would have not side affect.
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-06 18:52:54 +08:00
|
|
|
int cq_nvecs;
|
2019-02-06 18:52:55 +08:00
|
|
|
unsigned int *reply_map;
|
scsi: hisi_sas: Issue internal abort on all relevant queues
To support queue mapped to a CPU, it needs to be ensured that issuing an
internal abort is safe, in that it is guaranteed that an internal abort is
processed for a single IO or a device after all the relevant command(s)
which it is attempting to abort have been processed by the controller.
Currently we only deliver commands for any device on a single queue to
solve this problem, as we know that commands issued on the same queue will
be processed in order, and we will not have a scenario where the internal
abort is racing against a command(s) which it is trying to abort.
To enqueue commands on queue mapped to a CPU, choosing a queue for an
command is based on the associated queue for the current CPU, so this is
not safe for internal abort since it would definitely not be guaranteed
that commands for the command devices are issued on the same queue.
To solve this issue, we take a bludgeoning approach, and issue a separate
internal abort on any queue(s) relevant to the command or device, in that
we will be guaranteed that at least one of these internal aborts will be
received last in the controller.
So, for aborting a single command, we can just force the internal abort to
be issued on the same queue as the command which we are trying to abort.
For aborting all commands associated with a device, we issue a separate
internal abort on all relevant queues. Issuing multiple internal aborts in
this fashion would have not side affect.
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-06 18:52:54 +08:00
|
|
|
|
2019-09-06 20:55:36 +08:00
|
|
|
/* bist */
|
|
|
|
enum sas_linkrate debugfs_bist_linkrate;
|
|
|
|
int debugfs_bist_code_mode;
|
|
|
|
int debugfs_bist_phy_no;
|
|
|
|
int debugfs_bist_mode;
|
|
|
|
u32 debugfs_bist_cnt;
|
|
|
|
int debugfs_bist_enable;
|
2024-06-12 13:13:20 +08:00
|
|
|
u32 debugfs_bist_ffe[HISI_SAS_MAX_PHYS][FFE_CFG_MAX];
|
|
|
|
u32 debugfs_bist_fixed_code[FIXED_CODE_MAX];
|
2019-09-06 20:55:36 +08:00
|
|
|
|
2018-12-19 23:56:40 +08:00
|
|
|
/* debugfs memories */
|
2019-08-05 21:48:02 +08:00
|
|
|
/* Put Global AXI and RAS Register into register array */
|
2024-06-12 13:13:20 +08:00
|
|
|
struct hisi_sas_debugfs_regs debugfs_regs[HISI_SAS_MAX_DEBUGFS_DUMP][DEBUGFS_REGS_NUM];
|
|
|
|
struct hisi_sas_debugfs_port debugfs_port_reg[HISI_SAS_MAX_DEBUGFS_DUMP][HISI_SAS_MAX_PHYS];
|
|
|
|
struct hisi_sas_debugfs_cq debugfs_cq[HISI_SAS_MAX_DEBUGFS_DUMP][HISI_SAS_MAX_QUEUES];
|
|
|
|
struct hisi_sas_debugfs_dq debugfs_dq[HISI_SAS_MAX_DEBUGFS_DUMP][HISI_SAS_MAX_QUEUES];
|
|
|
|
struct hisi_sas_debugfs_iost debugfs_iost[HISI_SAS_MAX_DEBUGFS_DUMP];
|
|
|
|
struct hisi_sas_debugfs_itct debugfs_itct[HISI_SAS_MAX_DEBUGFS_DUMP];
|
|
|
|
struct hisi_sas_debugfs_iost_cache debugfs_iost_cache[HISI_SAS_MAX_DEBUGFS_DUMP];
|
|
|
|
struct hisi_sas_debugfs_itct_cache debugfs_itct_cache[HISI_SAS_MAX_DEBUGFS_DUMP];
|
|
|
|
|
|
|
|
u64 debugfs_timestamp[HISI_SAS_MAX_DEBUGFS_DUMP];
|
|
|
|
int debugfs_dump_index;
|
2018-12-19 23:56:39 +08:00
|
|
|
struct dentry *debugfs_dir;
|
2018-12-19 23:56:41 +08:00
|
|
|
struct dentry *debugfs_dump_dentry;
|
2019-09-06 20:55:36 +08:00
|
|
|
struct dentry *debugfs_bist_dentry;
|
2024-06-12 13:13:20 +08:00
|
|
|
struct dentry *debugfs_fifo_dentry;
|
2015-11-18 00:50:31 +08:00
|
|
|
};
|
|
|
|
|
2015-11-18 00:50:33 +08:00
|
|
|
/* Generic HW DMA host memory structures */
|
|
|
|
/* Delivery queue header */
|
|
|
|
struct hisi_sas_cmd_hdr {
|
|
|
|
/* dw0 */
|
|
|
|
__le32 dw0;
|
|
|
|
|
|
|
|
/* dw1 */
|
|
|
|
__le32 dw1;
|
|
|
|
|
|
|
|
/* dw2 */
|
|
|
|
__le32 dw2;
|
|
|
|
|
|
|
|
/* dw3 */
|
|
|
|
__le32 transfer_tags;
|
|
|
|
|
|
|
|
/* dw4 */
|
|
|
|
__le32 data_transfer_len;
|
|
|
|
|
|
|
|
/* dw5 */
|
|
|
|
__le32 first_burst_num;
|
|
|
|
|
|
|
|
/* dw6 */
|
|
|
|
__le32 sg_len;
|
|
|
|
|
|
|
|
/* dw7 */
|
|
|
|
__le32 dw7;
|
|
|
|
|
|
|
|
/* dw8-9 */
|
|
|
|
__le64 cmd_table_addr;
|
|
|
|
|
|
|
|
/* dw10-11 */
|
|
|
|
__le64 sts_buffer_addr;
|
|
|
|
|
|
|
|
/* dw12-13 */
|
|
|
|
__le64 prd_table_addr;
|
|
|
|
|
|
|
|
/* dw14-15 */
|
|
|
|
__le64 dif_prd_table_addr;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct hisi_sas_itct {
|
|
|
|
__le64 qw0;
|
|
|
|
__le64 sas_addr;
|
|
|
|
__le64 qw2;
|
|
|
|
__le64 qw3;
|
2016-01-26 02:47:06 +08:00
|
|
|
__le64 qw4_15[12];
|
2015-11-18 00:50:33 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct hisi_sas_iost {
|
|
|
|
__le64 qw0;
|
|
|
|
__le64 qw1;
|
|
|
|
__le64 qw2;
|
|
|
|
__le64 qw3;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct hisi_sas_err_record {
|
2016-01-26 02:47:05 +08:00
|
|
|
u32 data[4];
|
2015-11-18 00:50:33 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct hisi_sas_initial_fis {
|
|
|
|
struct hisi_sas_err_record err_record;
|
|
|
|
struct dev_to_host_fis fis;
|
|
|
|
u32 rsvd[3];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct hisi_sas_breakpoint {
|
2017-10-24 23:51:35 +08:00
|
|
|
u8 data[128];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct hisi_sas_sata_breakpoint {
|
|
|
|
struct hisi_sas_breakpoint tag[32];
|
2015-11-18 00:50:33 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct hisi_sas_sge {
|
|
|
|
__le64 addr;
|
|
|
|
__le32 page_ctrl_0;
|
|
|
|
__le32 page_ctrl_1;
|
|
|
|
__le32 data_len;
|
|
|
|
__le32 data_off;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct hisi_sas_command_table_smp {
|
|
|
|
u8 bytes[44];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct hisi_sas_command_table_stp {
|
|
|
|
struct host_to_dev_fis command_fis;
|
|
|
|
u8 dummy[12];
|
|
|
|
u8 atapi_cdb[ATAPI_CDB_LEN];
|
|
|
|
};
|
|
|
|
|
2019-05-29 17:58:44 +08:00
|
|
|
#define HISI_SAS_SGE_PAGE_CNT (124)
|
2015-11-18 00:50:33 +08:00
|
|
|
struct hisi_sas_sge_page {
|
|
|
|
struct hisi_sas_sge sge[HISI_SAS_SGE_PAGE_CNT];
|
2017-06-29 21:02:14 +08:00
|
|
|
} __aligned(16);
|
2015-11-18 00:50:33 +08:00
|
|
|
|
2019-05-29 17:58:44 +08:00
|
|
|
#define HISI_SAS_SGE_DIF_PAGE_CNT HISI_SAS_SGE_PAGE_CNT
|
2019-02-06 18:52:51 +08:00
|
|
|
struct hisi_sas_sge_dif_page {
|
|
|
|
struct hisi_sas_sge sge[HISI_SAS_SGE_DIF_PAGE_CNT];
|
|
|
|
} __aligned(16);
|
|
|
|
|
2015-11-18 00:50:33 +08:00
|
|
|
struct hisi_sas_command_table_ssp {
|
|
|
|
struct ssp_frame_hdr hdr;
|
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
struct ssp_command_iu task;
|
2018-05-02 23:56:27 +08:00
|
|
|
u32 prot[7];
|
2015-11-18 00:50:33 +08:00
|
|
|
};
|
|
|
|
struct ssp_tmf_iu ssp_task;
|
|
|
|
struct xfer_rdy_iu xfer_rdy;
|
|
|
|
struct ssp_response_iu ssp_res;
|
|
|
|
} u;
|
|
|
|
};
|
|
|
|
|
|
|
|
union hisi_sas_command_table {
|
|
|
|
struct hisi_sas_command_table_ssp ssp;
|
|
|
|
struct hisi_sas_command_table_smp smp;
|
|
|
|
struct hisi_sas_command_table_stp stp;
|
2017-06-29 21:02:14 +08:00
|
|
|
} __aligned(16);
|
|
|
|
|
|
|
|
struct hisi_sas_status_buffer {
|
|
|
|
struct hisi_sas_err_record err;
|
|
|
|
u8 iu[1024];
|
|
|
|
} __aligned(16);
|
|
|
|
|
|
|
|
struct hisi_sas_slot_buf_table {
|
|
|
|
struct hisi_sas_status_buffer status_buffer;
|
|
|
|
union hisi_sas_command_table command_header;
|
|
|
|
struct hisi_sas_sge_page sge_page;
|
2015-11-18 00:50:33 +08:00
|
|
|
};
|
2017-03-23 01:25:17 +08:00
|
|
|
|
2019-02-06 18:52:51 +08:00
|
|
|
struct hisi_sas_slot_dif_buf_table {
|
|
|
|
struct hisi_sas_slot_buf_table slot_buf;
|
|
|
|
struct hisi_sas_sge_dif_page sge_dif_page;
|
|
|
|
};
|
|
|
|
|
2017-06-14 23:33:20 +08:00
|
|
|
extern struct scsi_transport_template *hisi_sas_stt;
|
2018-12-19 23:56:39 +08:00
|
|
|
|
|
|
|
extern bool hisi_sas_debugfs_enable;
|
2024-06-12 13:13:20 +08:00
|
|
|
extern u32 hisi_sas_debugfs_dump_count;
|
2018-12-19 23:56:39 +08:00
|
|
|
extern struct dentry *hisi_sas_debugfs_dir;
|
|
|
|
|
2017-08-11 00:09:40 +08:00
|
|
|
extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba);
|
2019-01-25 22:22:33 +08:00
|
|
|
extern int hisi_sas_alloc(struct hisi_hba *hisi_hba);
|
2017-06-14 23:33:20 +08:00
|
|
|
extern void hisi_sas_free(struct hisi_hba *hisi_hba);
|
2017-12-28 18:20:47 +08:00
|
|
|
extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis,
|
|
|
|
int direction);
|
2017-03-23 01:25:17 +08:00
|
|
|
extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port);
|
2017-06-14 23:33:15 +08:00
|
|
|
extern void hisi_sas_sata_done(struct sas_task *task,
|
|
|
|
struct hisi_sas_slot *slot);
|
2017-06-14 23:33:18 +08:00
|
|
|
extern int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba);
|
2015-11-18 00:50:44 +08:00
|
|
|
extern int hisi_sas_probe(struct platform_device *pdev,
|
|
|
|
const struct hisi_sas_hw *ops);
|
|
|
|
extern int hisi_sas_remove(struct platform_device *pdev);
|
2015-11-18 00:50:33 +08:00
|
|
|
|
2018-05-21 18:09:18 +08:00
|
|
|
extern int hisi_sas_slave_configure(struct scsi_device *sdev);
|
2024-06-12 13:13:20 +08:00
|
|
|
extern int hisi_sas_slave_alloc(struct scsi_device *sdev);
|
2018-05-21 18:09:18 +08:00
|
|
|
extern int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time);
|
|
|
|
extern void hisi_sas_scan_start(struct Scsi_Host *shost);
|
|
|
|
extern int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type);
|
2019-04-11 20:46:38 +08:00
|
|
|
extern void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no,
|
|
|
|
int enable);
|
2024-06-12 13:13:20 +08:00
|
|
|
extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy,
|
|
|
|
gfp_t gfp_flags);
|
2015-11-18 00:50:50 +08:00
|
|
|
extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
|
|
|
|
struct sas_task *task,
|
2024-06-12 13:13:20 +08:00
|
|
|
struct hisi_sas_slot *slot,
|
|
|
|
bool need_lock);
|
2017-03-23 01:25:18 +08:00
|
|
|
extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba);
|
2017-10-24 23:51:45 +08:00
|
|
|
extern void hisi_sas_rst_work_handler(struct work_struct *work);
|
2017-12-09 01:16:38 +08:00
|
|
|
extern void hisi_sas_sync_rst_work_handler(struct work_struct *work);
|
2024-06-12 13:13:20 +08:00
|
|
|
extern void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba);
|
2019-01-25 22:22:35 +08:00
|
|
|
extern void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no);
|
2017-12-09 01:16:44 +08:00
|
|
|
extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
|
|
|
|
enum hisi_sas_phy_event event);
|
2017-12-09 01:16:50 +08:00
|
|
|
extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba);
|
2018-05-02 23:56:30 +08:00
|
|
|
extern u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max);
|
2018-07-18 22:14:28 +08:00
|
|
|
extern void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba);
|
|
|
|
extern void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba);
|
2015-11-18 00:50:30 +08:00
|
|
|
#endif
|