sssnic: support this new driver

All files are extracted from 3snic-eth-3s9xx-driver-sssnic-1.0.6.4-1-src.tar.gz.

Here is some key information:
1)please do not remove those #ifdef for compatability something like this
because it could hinder your steps.
2) replace four files as wrote in scripts/release.sh file:
cp -p -f $MK_DIR/replace/makefile_hw $HW_DIR/Makefile
cp -p -f $MK_DIR/replace/makefile_nic $NIC_DIR/Makefile
cp -p -f $MK_DIR/replace/sss_linux_kernel.h $CUR_DIR/include/kernel/sss_linux_kernel.h
cp -p -f $MK_DIR/replace/sss_hwdev_link.c $CUR_DIR/hw/sss_hwdev_link.c
The reason here is very simple: compatability.
3) Add vlan config dependency in Kconfig and do not add more specific configs into
some config files.
4) get rid of "Werror" in Makefile.
5) If someone is willing to update to a new version, please keep the makefile and config
untouched which I rewrote for the compatability.

Signed-off-by: Jason Xing <kernelxing@tencent.com>
Signed-off-by: Jianping Liu <frankjpliu@tencent.com>
This commit is contained in:
Jason Xing 2023-10-09 15:29:49 +08:00 committed by Jianping Liu
parent bf0d1166c8
commit 2ae9cf86d2
206 changed files with 43886 additions and 0 deletions

View File

@ -0,0 +1,23 @@
# SPDX-License-Identifier: GPL-2.0
#
# 3SNIC network device configuration
#
config NET_VENDOR_3SNIC
bool "3SNIC smart NIC devices"
default y
depends on PCI
depends on VLAN_8021Q || VLAN_8021Q_MODULE
help
If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
the questions about 3SNIC cards. If you say Y, you will be
asked for your specific card in the following questions.
if NET_VENDOR_3SNIC
source "drivers/net/ethernet/3snic/sssnic/Kconfig"
endif # NET_VENDOR_3SNIC

View File

@ -0,0 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the 3SNIC network device drivers.
#
obj-$(CONFIG_SSSNIC) += sssnic/

View File

@ -0,0 +1,27 @@
# SPDX-License-Identifier: GPL-2.0
#
# 3SNIC network device configuration
#
config SSSNIC
tristate "3SNIC Ethernet Controller SSSNIC Support"
depends on PCI
depends on ARM64 || X86_64
depends on VLAN_8021Q || VLAN_8021Q_MODULE
select SSSNIC_HW
default m
help
This driver supports 3SNIC Ethernet Controller SSSNIC device.
For more information about this product, go to the product
description with smart NIC:
<http://www.3snic.com>
To compile this driver as a module, choose M here. The module
will be called sssnic.
config SSSNIC_HW
tristate
depends on PCI
depends on CONFIG_VLAN_8021Q || CONFIG_VLAN_8021Q_MODULE
default n

View File

@ -0,0 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the 3SNIC network device drivers.
#
obj-$(CONFIG_SSSNIC_HW) += hw/
obj-$(CONFIG_SSSNIC) += nic/

View File

@ -0,0 +1,62 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2023 3SNIC
#
EXPORT_SYMBOL := true
SYS_TIME=$(shell date +%Y-%m-%d_%H:%M:%S)
ccflags-y += -D __TIME_STR__=\"$(SYS_TIME)\"
ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/hw
ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/include
ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/include/hw
ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/include/kernel
ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/hw/include
ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/kcompat
ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/hw/tool
#ccflags-y += -Werror
ccflags-y += -Wno-implicit-fallthrough
include $(srctree)/drivers/net/ethernet/3snic/sssnic/kcompat/mk/kcompat.mk
obj-$(CONFIG_SSSNIC_HW) += sssdk.o
sssdk-y := sss_hw_main.o \
sss_pci_probe.o \
sss_pci_remove.o \
sss_pci_shutdown.o \
sss_pci_error.o \
sss_pci_sriov.o \
sss_pci_global.o \
sss_hwdev_api.o \
sss_hwdev_cap.o \
sss_hwdev_export.o \
sss_hwdev_link.o \
sss_hwdev_init.o \
sss_hwdev_mgmt_info.o \
sss_hwdev_mgmt_channel.o \
sss_hwdev_io_flush.o \
sss_hwif_ctrlq.o \
sss_hwif_ctrlq_init.o \
sss_hwif_ctrlq_export.o \
sss_hwif_mbx.o \
sss_hwif_mbx_init.o \
sss_hwif_mbx_export.o \
sss_hwif_adm.o \
sss_hwif_adm_init.o \
sss_hwif_init.o \
sss_hwif_api.o \
sss_hwif_export.o \
sss_hwif_eq.o \
sss_hwif_mgmt_init.o \
sss_hwif_irq.o \
sss_hwif_aeq.o \
sss_common.o \
sss_wq.o \
sss_hwif_ceq.o \
sss_adapter_mgmt.o \
../kcompat/sss_kcompat.o \
./tool/sss_tool_chip.o \
./tool/sss_tool_main.o \
./tool/sss_tool_sdk.o \
./tool/sss_tool_sm.o

View File

@ -0,0 +1,78 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_ADAPTER_H
#define SSS_ADAPTER_H
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/list.h>
#include <linux/atomic.h>
#include <linux/spinlock.h>
#include "sss_hw_common.h"
#include "sss_hw_uld_driver.h"
#include "sss_hw_svc_cap.h"
#include "sss_sriov_info.h"
#define SSS_MAX_FUNC 4096
struct sss_card_node {
struct list_head node;
struct list_head func_list;
char chip_name[IFNAMSIZ];
u8 bus_id;
u8 resvd[7];
u16 func_num;
atomic_t channel_timeout_cnt;
void *func_handle_array[SSS_MAX_FUNC];
void *dbgtool_info;
};
/* Structure pcidev private */
struct sss_pci_adapter {
struct pci_dev *pcidev;
void *hwdev;
struct sss_hal_dev hal_dev;
/* Record the upper driver object address,
* such as nic_dev and toe_dev, fc_dev
*/
void *uld_dev[SSS_SERVICE_TYPE_MAX];
/* Record the upper driver object name */
char uld_dev_name[SSS_SERVICE_TYPE_MAX][IFNAMSIZ];
/* Manage all function device linked by list */
struct list_head node;
void __iomem *cfg_reg_bar;
void __iomem *intr_reg_bar;
void __iomem *mgmt_reg_bar;
void __iomem *db_reg_bar;
u64 db_dwqe_len;
u64 db_base_paddr;
struct sss_card_node *chip_node;
int init_state;
struct sss_sriov_info sriov_info;
atomic_t ref_cnt;
atomic_t uld_ref_cnt[SSS_SERVICE_TYPE_MAX];
spinlock_t uld_lock; /* protect uld probe and remove */
/* set when uld driver processing event */
unsigned long uld_run_state;
unsigned long uld_attach_state;
/* lock for attach/detach uld */
struct mutex uld_attach_mutex;
spinlock_t dettach_uld_lock; /* spin lock for uld_attach_state access */
};
#endif

View File

@ -0,0 +1,121 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_ADM_INFO_H
#define SSS_ADM_INFO_H
#include <linux/types.h>
#include <linux/semaphore.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include "sss_hw_common.h"
enum sss_adm_msg_type {
/* write to mgmt cpu command with completion */
SSS_ADM_MSG_WRITE_TO_MGMT_MODULE = 2,
/* multi read command with completion notification */
SSS_ADM_MSG_MULTI_READ = 3,
/* write command without completion notification */
SSS_ADM_MSG_POLL_WRITE = 4,
/* read command without completion notification */
SSS_ADM_MSG_POLL_READ = 5,
/* read from mgmt cpu command with completion */
SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE = 6,
SSS_ADM_MSG_MAX,
};
struct sss_adm_msg_state {
u64 head;
u32 desc_buf;
u32 elem_hi;
u32 elem_lo;
u32 rsvd0;
u64 rsvd1;
};
/* HW struct */
struct sss_adm_msg_elem {
u64 control;
u64 next_elem_paddr;
u64 desc;
/* HW struct */
union {
struct {
u64 hw_msg_paddr;
} write;
struct {
u64 hw_wb_reply_paddr;
u64 hw_msg_paddr;
} read;
};
};
struct sss_adm_msg_reply_fmt {
u64 head;
u64 reply;
};
struct sss_adm_msg_elem_ctx {
struct sss_adm_msg_elem *elem_vaddr;
void *adm_msg_vaddr;
struct sss_adm_msg_reply_fmt *reply_fmt;
struct completion done;
int state;
u32 store_pi;
void *hwdev;
};
struct sss_adm_msg {
void *hwdev;
enum sss_adm_msg_type msg_type;
u32 elem_num;
u16 elem_size;
u16 reply_size;
u32 pi;
u32 ci;
struct semaphore sem;
spinlock_t async_lock; /* protect adm msg async and sync */
dma_addr_t wb_state_paddr;
dma_addr_t head_elem_paddr;
struct sss_adm_msg_state *wb_state;
struct sss_adm_msg_elem *head_node;
struct sss_adm_msg_elem_ctx *elem_ctx;
struct sss_adm_msg_elem *now_node;
struct sss_dma_addr_align elem_addr;
u8 *elem_vaddr_base;
u8 *reply_vaddr_base;
u8 *buf_vaddr_base;
u64 elem_paddr_base;
u64 reply_paddr_base;
u64 buf_paddr_base;
u64 elem_size_align;
u64 reply_size_align;
u64 buf_size_align;
};
#endif

View File

@ -0,0 +1,34 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_AEQ_INFO_H
#define SSS_AEQ_INFO_H
#include <linux/types.h>
#include <linux/workqueue.h>
#include "sss_eq_info.h"
#include "sss_hw_aeq.h"
#define SSS_MAX_AEQ 4
typedef void (*sss_aeq_hw_event_handler_t)(void *pri_handle, u8 *data, u8 size);
typedef u8 (*sss_aeq_sw_event_handler_t)(void *pri_handle, u8 event, u8 *data);
struct sss_aeq_info {
void *hwdev;
sss_aeq_hw_event_handler_t hw_event_handler[SSS_AEQ_EVENT_MAX];
void *hw_event_data[SSS_AEQ_EVENT_MAX];
sss_aeq_sw_event_handler_t sw_event_handler[SSS_AEQ_SW_EVENT_MAX];
void *sw_event_data[SSS_AEQ_SW_EVENT_MAX];
unsigned long hw_event_handler_state[SSS_AEQ_EVENT_MAX];
unsigned long sw_event_handler_state[SSS_AEQ_SW_EVENT_MAX];
struct sss_eq aeq[SSS_MAX_AEQ];
u16 num;
u16 rsvd1;
u32 rsvd2;
struct workqueue_struct *workq;
};
#endif

View File

@ -0,0 +1,32 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_BOARD_INFO_H
#define SSS_BOARD_INFO_H
enum sss_board_type_define {
SSS_BOARD_TYPE_MPU_DEFAULT = 0, /* Default config */
SSS_BOARD_TYPE_TEST_EVB_4X25G = 1, /* EVB Board */
SSS_BOARD_TYPE_TEST_CEM_2X100G = 2, /* 2X100G CEM Card */
SSS_BOARD_TYPE_STRG_SMARTIO_4X32G_FC = 30, /* 4X32G SmartIO FC Card */
SSS_BOARD_TYPE_STRG_SMARTIO_4X25G_TIOE = 31, /* 4X25GE SmartIO TIOE Card */
SSS_BOARD_TYPE_STRG_SMARTIO_4X25G_ROCE = 32, /* 4X25GE SmartIO ROCE Card */
SSS_BOARD_TYPE_STRG_SMARTIO_4X25G_ROCE_AA = 33, /* 4X25GE SmartIO ROCE_AA Card */
SSS_BOARD_TYPE_STRG_SMARTIO_4X25G_SRIOV = 34, /* 4X25GE SmartIO container Card */
SSS_BOARD_TYPE_STRG_SMARTIO_4X25G_SRIOV_SW = 35, /* 4X25GE SmartIO container switch Card */
SSS_BOARD_TYPE_STRG_2X100G_TIOE = 40, /* 2X100G SmartIO TIOE Card */
SSS_BOARD_TYPE_STRG_2X100G_ROCE = 41, /* 2X100G SmartIO ROCE Card */
SSS_BOARD_TYPE_STRG_2X100G_ROCE_AA = 42, /* 2X100G SmartIO ROCE_AA Card */
SSS_BOARD_TYPE_CAL_2X25G_NIC_75MPPS = 100, /* 2X25G ETH Standard card 75MPPS */
SSS_BOARD_TYPE_CAL_2X25G_NIC_40MPPS = 101, /* 2X25G ETH Standard card 40MPPS */
SSS_BOARD_TYPE_CAL_4X25G_NIC_120MPPS = 105, /* 4X25G ETH Standard card 120MPPS */
SSS_BOARD_TYPE_CAL_2X32G_FC_HBA = 110, /* 2X32G FC HBA card */
SSS_BOARD_TYPE_CAL_2X16G_FC_HBA = 111, /* 2X16G FC HBA card */
SSS_BOARD_TYPE_CAL_2X100G_NIC_120MPPS = 115, /* 2X100G ETH Standard card 120MPPS */
SSS_BOARD_TYPE_CLD_2X100G_SDI5_1 = 170, /* 2X100G SDI 5.1 Card */
SSS_BOARD_TYPE_CLD_2X25G_SDI5_0_LITE = 171, /* 2x25G SDI5.0 Lite Card */
SSS_BOARD_TYPE_CLD_2X100G_SDI5_0 = 172, /* 2x100G SDI5.0 Card */
SSS_BOARD_MAX_TYPE = 0xFF
};
#endif

View File

@ -0,0 +1,29 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_CEQ_INFO_H
#define SSS_CEQ_INFO_H
#include <linux/types.h>
#include "sss_hw_ceq.h"
#include "sss_eq_info.h"
#define SSS_MAX_CEQ 32
typedef void (*sss_ceq_event_handler_t)(void *dev, u32 data);
struct sss_ceq_info {
void *hwdev;
sss_ceq_event_handler_t event_handler[SSS_CEQ_EVENT_MAX];
void *event_handler_data[SSS_CEQ_EVENT_MAX];
void *ceq_data[SSS_CEQ_EVENT_MAX];
unsigned long event_handler_state[SSS_CEQ_EVENT_MAX];
struct sss_eq ceq[SSS_MAX_CEQ];
u16 num;
u16 rsvd1;
u32 rsvd2;
};
#endif

View File

@ -0,0 +1,171 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_CSR_H
#define SSS_CSR_H
#define SSS_CSR_CFG_FLAG 0x40000000
#define SSS_MGMT_FLAG 0xC0000000
#define SSS_CSR_FLAG_MASK 0x3FFFFFFF
#define SSS_VF_CFG_REG_OFFSET 0x2000
#define SSS_HOST_CSR_BASE_ADDR (SSS_MGMT_FLAG + 0x6000)
#define SSS_CSR_GLOBAL_BASE_ADDR (SSS_MGMT_FLAG + 0x6400)
/* HW interface registers */
#define SSS_CSR_HW_ATTR0_ADDR (SSS_CSR_CFG_FLAG + 0x0)
#define SSS_CSR_HW_ATTR1_ADDR (SSS_CSR_CFG_FLAG + 0x4)
#define SSS_CSR_HW_ATTR2_ADDR (SSS_CSR_CFG_FLAG + 0x8)
#define SSS_CSR_HW_ATTR3_ADDR (SSS_CSR_CFG_FLAG + 0xC)
#define SSS_CSR_HW_ATTR4_ADDR (SSS_CSR_CFG_FLAG + 0x10)
#define SSS_CSR_HW_ATTR5_ADDR (SSS_CSR_CFG_FLAG + 0x14)
#define SSS_CSR_HW_ATTR6_ADDR (SSS_CSR_CFG_FLAG + 0x18)
#define SSS_HW_CSR_MBX_DATA_OFF 0x80
#define SSS_HW_CSR_MBX_CTRL_OFF (SSS_CSR_CFG_FLAG + 0x0100)
#define SSS_HW_CSR_MBX_INT_OFFSET_OFF (SSS_CSR_CFG_FLAG + 0x0104)
#define SSS_HW_CSR_MBX_RES_H_OFF (SSS_CSR_CFG_FLAG + 0x0108)
#define SSS_HW_CSR_MBX_RES_L_OFF (SSS_CSR_CFG_FLAG + 0x010C)
#define SSS_PPF_ELECT_OFF 0x0
#define SSS_MPF_ELECT_OFF 0x20
#define SSS_CSR_PPF_ELECT_ADDR \
(SSS_HOST_CSR_BASE_ADDR + SSS_PPF_ELECT_OFF)
#define SSS_CSR_GLOBAL_MPF_ELECT_ADDR \
(SSS_HOST_CSR_BASE_ADDR + SSS_MPF_ELECT_OFF)
#define SSS_CSR_HW_PPF_ELECT_BASE_ADDR (SSS_CSR_CFG_FLAG + 0x60)
#define SSS_CSR_HW_PPF_ELECT_PORT_STRIDE 0x4
#define SSS_CSR_FUNC_PPF_ELECT(host_id) \
(SSS_CSR_HW_PPF_ELECT_BASE_ADDR + \
(host_id) * SSS_CSR_HW_PPF_ELECT_PORT_STRIDE)
#define SSS_CSR_DMA_ATTR_TBL_ADDR (SSS_CSR_CFG_FLAG + 0x380)
#define SSS_CSR_DMA_ATTR_INDIR_ID_ADDR (SSS_CSR_CFG_FLAG + 0x390)
/* CLP registers */
#define SSS_BAR3_CLP_BASE_ADDR (SSS_MGMT_FLAG + 0x0000)
#define SSS_UCPU_CLP_SIZE_REG (SSS_HOST_CSR_BASE_ADDR + 0x40)
#define SSS_UCPU_CLP_REQBASE_REG (SSS_HOST_CSR_BASE_ADDR + 0x44)
#define SSS_UCPU_CLP_RSPBASE_REG (SSS_HOST_CSR_BASE_ADDR + 0x48)
#define SSS_UCPU_CLP_REQ_REG (SSS_HOST_CSR_BASE_ADDR + 0x4c)
#define SSS_UCPU_CLP_RSP_REG (SSS_HOST_CSR_BASE_ADDR + 0x50)
#define SSS_CLP_REG(member) (SSS_UCPU_CLP_##member##_REG)
#define SSS_CLP_REQ_DATA SSS_BAR3_CLP_BASE_ADDR
#define SSS_CLP_RSP_DATA (SSS_BAR3_CLP_BASE_ADDR + 0x1000)
#define SSS_CLP_DATA(member) (SSS_CLP_##member##_DATA)
/* MSI-X registers */
#define SSS_CSR_MSIX_INDIR_ID_ADDR (SSS_CSR_CFG_FLAG + 0x310)
#define SSS_CSR_MSIX_CTRL_ADDR (SSS_CSR_CFG_FLAG + 0x300)
#define SSS_CSR_MSIX_CNT_ADDR (SSS_CSR_CFG_FLAG + 0x304)
#define SSS_CSR_FUNC_MSI_CLR_WR_ADDR (SSS_CSR_CFG_FLAG + 0x58)
#define SSS_MSI_CLR_INDIR_RESEND_TIMER_CLR_SHIFT 0
#define SSS_MSI_CLR_INDIR_INT_MSK_SET_SHIFT 1
#define SSS_MSI_CLR_INDIR_INT_MSK_CLR_SHIFT 2
#define SSS_MSI_CLR_INDIR_AUTO_MSK_SET_SHIFT 3
#define SSS_MSI_CLR_INDIR_AUTO_MSK_CLR_SHIFT 4
#define SSS_MSI_CLR_INDIR_SIMPLE_INDIR_ID_SHIFT 22
#define SSS_MSI_CLR_INDIR_RESEND_TIMER_CLR_MASK 0x1U
#define SSS_MSI_CLR_INDIR_INT_MSK_SET_MASK 0x1U
#define SSS_MSI_CLR_INDIR_INT_MSK_CLR_MASK 0x1U
#define SSS_MSI_CLR_INDIR_AUTO_MSK_SET_MASK 0x1U
#define SSS_MSI_CLR_INDIR_AUTO_MSK_CLR_MASK 0x1U
#define SSS_MSI_CLR_INDIR_SIMPLE_INDIR_ID_MASK 0x3FFU
#define SSS_SET_MSI_CLR_INDIR(val, member) \
(((val) & SSS_MSI_CLR_INDIR_##member##_MASK) << \
SSS_MSI_CLR_INDIR_##member##_SHIFT)
/* EQ registers */
#define SSS_AEQ_INDIR_ID_ADDR (SSS_CSR_CFG_FLAG + 0x210)
#define SSS_CEQ_INDIR_ID_ADDR (SSS_CSR_CFG_FLAG + 0x290)
#define SSS_EQ_INDIR_ID_ADDR(type) \
((type == SSS_AEQ) ? SSS_AEQ_INDIR_ID_ADDR : SSS_CEQ_INDIR_ID_ADDR)
#define SSS_AEQ_MTT_OFF_BASE_ADDR (SSS_CSR_CFG_FLAG + 0x240)
#define SSS_CEQ_MTT_OFF_BASE_ADDR (SSS_CSR_CFG_FLAG + 0x2C0)
#define SSS_CSR_EQ_PAGE_OFF_STRIDE 8
#define SSS_AEQ_PHY_HI_ADDR_REG(pg_num) \
(SSS_AEQ_MTT_OFF_BASE_ADDR + (pg_num) * SSS_CSR_EQ_PAGE_OFF_STRIDE)
#define SSS_AEQ_PHY_LO_ADDR_REG(pg_num) \
(SSS_AEQ_MTT_OFF_BASE_ADDR + (pg_num) * SSS_CSR_EQ_PAGE_OFF_STRIDE + 4)
#define SSS_CEQ_PHY_HI_ADDR_REG(pg_num) \
(SSS_CEQ_MTT_OFF_BASE_ADDR + (pg_num) * SSS_CSR_EQ_PAGE_OFF_STRIDE)
#define SSS_CEQ_PHY_LO_ADDR_REG(pg_num) \
(SSS_CEQ_MTT_OFF_BASE_ADDR + \
(pg_num) * SSS_CSR_EQ_PAGE_OFF_STRIDE + 4)
#define SSS_CSR_AEQ_CTRL_0_ADDR (SSS_CSR_CFG_FLAG + 0x200)
#define SSS_CSR_AEQ_CTRL_1_ADDR (SSS_CSR_CFG_FLAG + 0x204)
#define SSS_CSR_AEQ_CI_ADDR (SSS_CSR_CFG_FLAG + 0x208)
#define SSS_CSR_AEQ_PI_ADDR (SSS_CSR_CFG_FLAG + 0x20C)
#define SSS_CSR_AEQ_CI_SIMPLE_INDIR_ADDR (SSS_CSR_CFG_FLAG + 0x50)
#define SSS_CSR_CEQ_CTRL_0_ADDR (SSS_CSR_CFG_FLAG + 0x280)
#define SSS_CSR_CEQ_CTRL_1_ADDR (SSS_CSR_CFG_FLAG + 0x284)
#define SSS_CSR_CEQ_CI_ADDR (SSS_CSR_CFG_FLAG + 0x288)
#define SSS_CSR_CEQ_PI_ADDR (SSS_CSR_CFG_FLAG + 0x28c)
#define SSS_CSR_CEQ_CI_SIMPLE_INDIR_ADDR (SSS_CSR_CFG_FLAG + 0x54)
/* ADM MSG registers */
#define SSS_CSR_ADM_MSG_BASE (SSS_MGMT_FLAG + 0x2000)
#define SSS_CSR_ADM_MSG_STRIDE 0x80
#define SSS_CSR_ADM_MSG_HEAD_HI_ADDR(id) \
(SSS_CSR_ADM_MSG_BASE + 0x0 + (id) * SSS_CSR_ADM_MSG_STRIDE)
#define SSS_CSR_ADM_MSG_HEAD_LO_ADDR(id) \
(SSS_CSR_ADM_MSG_BASE + 0x4 + (id) * SSS_CSR_ADM_MSG_STRIDE)
#define SSS_CSR_ADM_MSG_STATE_HI_ADDR(id) \
(SSS_CSR_ADM_MSG_BASE + 0x8 + (id) * SSS_CSR_ADM_MSG_STRIDE)
#define SSS_CSR_ADM_MSG_STATE_LO_ADDR(id) \
(SSS_CSR_ADM_MSG_BASE + 0xC + (id) * SSS_CSR_ADM_MSG_STRIDE)
#define SSS_CSR_ADM_MSG_NUM_ELEM_ADDR(id) \
(SSS_CSR_ADM_MSG_BASE + 0x10 + (id) * SSS_CSR_ADM_MSG_STRIDE)
#define SSS_CSR_ADM_MSG_CTRL_ADDR(id) \
(SSS_CSR_ADM_MSG_BASE + 0x14 + (id) * SSS_CSR_ADM_MSG_STRIDE)
#define SSS_CSR_ADM_MSG_PI_ADDR(id) \
(SSS_CSR_ADM_MSG_BASE + 0x1C + (id) * SSS_CSR_ADM_MSG_STRIDE)
#define SSS_CSR_ADM_MSG_REQ_ADDR(id) \
(SSS_CSR_ADM_MSG_BASE + 0x20 + (id) * SSS_CSR_ADM_MSG_STRIDE)
#define SSS_CSR_ADM_MSG_STATE_0_ADDR(id) \
(SSS_CSR_ADM_MSG_BASE + 0x30 + (id) * SSS_CSR_ADM_MSG_STRIDE)
/* self test register */
#define SSS_MGMT_HEALTH_STATUS_ADDR (SSS_MGMT_FLAG + 0x983c)
#define SSS_CHIP_BASE_INFO_ADDR (SSS_MGMT_FLAG + 0xB02C)
#define SSS_CHIP_ERR_STATUS0_ADDR (SSS_MGMT_FLAG + 0xC0EC)
#define SSS_CHIP_ERR_STATUS1_ADDR (SSS_MGMT_FLAG + 0xC0F0)
#define SSS_ERR_INFO0_ADDR (SSS_MGMT_FLAG + 0xC0F4)
#define SSS_ERR_INFO1_ADDR (SSS_MGMT_FLAG + 0xC0F8)
#define SSS_ERR_INFO2_ADDR (SSS_MGMT_FLAG + 0xC0FC)
#endif

View File

@ -0,0 +1,98 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_CTRLQ_INFO_H
#define SSS_CTRLQ_INFO_H
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/pci.h>
#include "sss_hw_mbx_msg.h"
#include "sss_hw_wq.h"
#include "sss_hw_ctrlq.h"
#define SSS_DEFAULT_WQ_PAGE_SIZE 0x100000
#define SSS_HW_WQ_PAGE_SIZE 0x1000
#define SSS_MAX_WQ_PAGE_NUM 8
/* ctrlq ack type */
enum sss_ack_type {
SSS_ACK_TYPE_CTRLQ,
SSS_ACK_TYPE_SHARE_CQN,
SSS_ACK_TYPE_APP_CQN,
SSS_MOD_ACK_MAX = 15,
};
enum sss_ctrlq_type {
SSS_CTRLQ_SYNC,
SSS_CTRLQ_ASYNC,
SSS_MAX_CTRLQ_TYPE = 4
};
enum sss_ctrlq_msg_type {
SSS_MSG_TYPE_NONE,
SSS_MSG_TYPE_SET_ARM,
SSS_MSG_TYPE_DIRECT_RESP,
SSS_MSG_TYPE_SGE_RESP,
SSS_MSG_TYPE_ASYNC,
SSS_MSG_TYPE_PSEUDO_TIMEOUT,
SSS_MSG_TYPE_TIMEOUT,
SSS_MSG_TYPE_FORCE_STOP,
SSS_MSG_TYPE_MAX
};
struct sss_ctrlq_cmd_info {
enum sss_ctrlq_msg_type msg_type;
u16 channel;
struct completion *done;
int *err_code;
int *cmpt_code;
u64 *direct_resp;
u64 msg_id;
struct sss_ctrl_msg_buf *in_buf;
struct sss_ctrl_msg_buf *out_buf;
};
struct sss_ctrlq {
struct sss_wq wq;
enum sss_ctrlq_type ctrlq_type;
int wrapped;
/* spinlock for send ctrlq commands */
spinlock_t ctrlq_lock;
struct sss_ctrlq_ctxt_info ctrlq_ctxt;
struct sss_ctrlq_cmd_info *cmd_info;
void *hwdev;
};
struct sss_ctrlq_info {
void *hwdev;
struct pci_pool *msg_buf_pool;
/* doorbell area */
u8 __iomem *db_base;
/* All ctrlq's CLA of a VF occupy a PAGE when ctrlq wq is 1-level CLA */
void *wq_block_vaddr;
dma_addr_t wq_block_paddr;
struct sss_ctrlq ctrlq[SSS_MAX_CTRLQ_TYPE];
u32 state;
u32 disable_flag;
u8 lock_channel_en;
u8 num;
u8 rsvd[6];
unsigned long channel_stop;
};
#endif

View File

@ -0,0 +1,77 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_EQ_INFO_H
#define SSS_EQ_INFO_H
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include "sss_hw_common.h"
#include "sss_hw_irq.h"
#include "sss_hw_svc_cap.h"
#define SSS_EQ_IRQ_NAME_LEN 64
enum sss_eq_type {
SSS_AEQ,
SSS_CEQ
};
typedef void (*sss_init_desc_handler_t)(void *eq);
typedef u32 (*sss_chip_init_attr_handler_t)(void *eq);
struct sss_eq {
char *name;
void *hwdev;
enum sss_eq_type type;
u32 page_size;
u32 old_page_size;
u32 len;
u32 ci;
u16 wrap;
u16 qid;
u16 entry_size;
u16 page_num;
u32 num_entry_per_pg;
struct sss_irq_desc irq_desc;
char irq_name[SSS_EQ_IRQ_NAME_LEN];
struct sss_dma_addr_align *page_array;
struct work_struct aeq_work;
struct tasklet_struct ceq_tasklet;
u64 hw_intr_jiffies;
u64 sw_intr_jiffies;
sss_init_desc_handler_t init_desc_handler;
sss_chip_init_attr_handler_t init_attr_handler;
irq_handler_t irq_handler;
};
struct sss_eq_cfg {
enum sss_service_type type;
int id;
int free; /* 1 - alocated, 0- freed */
};
struct sss_eq_info {
struct sss_eq_cfg *eq;
u8 ceq_num;
u8 remain_ceq_num;
/* mutex used for allocate EQs */
struct mutex eq_mutex;
};
#endif

View File

@ -0,0 +1,273 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HWDEV_H
#define SSS_HWDEV_H
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/timer.h>
#include "sss_hw_common.h"
#include "sss_hw_svc_cap.h"
#include "sss_hw_mbx_msg.h"
#include "sss_hw_statistics.h"
#include "sss_hw_event.h"
#include "sss_hwif.h"
#include "sss_mgmt_info.h"
#include "sss_ctrlq_info.h"
#include "sss_aeq_info.h"
#include "sss_ceq_info.h"
#include "sss_mbx_info.h"
#include "sss_mgmt_channel.h"
#define SSSNIC_CHANNEL_DETECT_PERIOD (5 * 1000)
enum sss_func_mode {
SSS_FUNC_MOD_MIN,
/* single host */
SSS_FUNC_MOD_NORMAL_HOST = SSS_FUNC_MOD_MIN,
/* multi host, bare-metal, sdi side */
SSS_FUNC_MOD_MULTI_BM_MASTER,
/* multi host, bare-metal, host side */
SSS_FUNC_MOD_MULTI_BM_SLAVE,
/* multi host, vm mode, sdi side */
SSS_FUNC_MOD_MULTI_VM_MASTER,
/* multi host, vm mode, host side */
SSS_FUNC_MOD_MULTI_VM_SLAVE,
SSS_FUNC_MOD_MAX = SSS_FUNC_MOD_MULTI_VM_SLAVE,
};
struct sss_page_addr {
void *virt_addr;
u64 phys_addr;
};
struct sss_mqm_addr_trans_tbl_info {
u32 chunk_num;
u32 search_gpa_num;
u32 page_size;
u32 page_num;
struct sss_page_addr *brm_srch_page_addr;
};
struct sss_devlink {
void *hwdev;
u8 active_cfg_id; /* 1 ~ 8 */
u8 switch_cfg_id; /* 1 ~ 8 */
};
struct sss_heartbeat {
u8 pcie_link_down;
u8 heartbeat_lost;
u16 rsvd;
u32 pcie_link_down_cnt;
struct timer_list heartbeat_timer;
struct work_struct lost_work;
};
struct sss_aeq_stat {
u16 busy_cnt;
u16 rsvd;
u64 cur_recv_cnt;
u64 last_recv_cnt;
};
struct sss_clp_pf_to_mgmt {
struct semaphore clp_msg_lock;
void *clp_msg_buf;
};
struct sss_hwdev {
void *adapter_hdl; /* pointer to sss_pci_adapter or NDIS_Adapter */
void *pcidev_hdl; /* pointer to pcidev or Handler */
/* pointer to pcidev->dev or Handler, for
* sdk_err() or dma_alloc()
*/
void *dev_hdl;
void *chip_node;
void *service_adapter[SSS_SERVICE_TYPE_MAX];
u32 wq_page_size;
int chip_present_flag;
u8 poll; /* use polling mode or int mode */
u8 rsvd[3];
struct sss_hwif *hwif; /* include void __iomem *bar */
struct sss_comm_global_attr glb_attr;
u64 features[SSS_MAX_FEATURE_QWORD];
struct sss_mgmt_info *mgmt_info;
struct sss_ctrlq_info *ctrlq_info;
struct sss_aeq_info *aeq_info;
struct sss_ceq_info *ceq_info;
struct sss_mbx *mbx; // mbx
struct sss_msg_pf_to_mgmt *pf_to_mgmt; // adm
struct sss_clp_pf_to_mgmt *clp_pf_to_mgmt;
struct sss_hw_stats hw_stats;
u8 *chip_fault_stats;
sss_event_handler_t event_handler;
void *event_handler_data;
struct sss_board_info board_info;
struct delayed_work sync_time_task;
struct delayed_work channel_detect_task;
struct workqueue_struct *workq;
struct sss_heartbeat heartbeat;
ulong func_state;
spinlock_t channel_lock; /* protect channel init and deinit */
struct sss_devlink *devlink_dev;
enum sss_func_mode func_mode;
struct sss_aeq_stat aeq_stat;
u16 aeq_busy_cnt;
};
#define SSS_TO_HWDEV(ptr) ((struct sss_hwdev *)(ptr)->hwdev)
#define SSS_TO_DEV(hwdev) (((struct sss_hwdev *)hwdev)->dev_hdl)
#define SSS_TO_HWIF(hwdev) (((struct sss_hwdev *)hwdev)->hwif)
#define SSS_TO_MGMT_INFO(hwdev) (((struct sss_hwdev *)hwdev)->mgmt_info)
#define SSS_TO_AEQ_INFO(hwdev) (((struct sss_hwdev *)hwdev)->aeq_info)
#define SSS_TO_CEQ_INFO(hwdev) (((struct sss_hwdev *)hwdev)->ceq_info)
#define SSS_TO_CTRLQ_INFO(hwdev) (((struct sss_hwdev *)hwdev)->ctrlq_info)
#define SSS_TO_IRQ_INFO(hwdev) (&((struct sss_hwdev *)hwdev)->mgmt_info->irq_info)
#define SSS_TO_SVC_CAP(hwdev) (&(((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap))
#define SSS_TO_NIC_CAP(hwdev) (&(((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap.nic_cap))
#define SSS_TO_MAX_SQ_NUM(hwdev) \
(((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap.nic_cap.max_sq)
#define SSS_TO_PHY_PORT_ID(hwdev) (((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap.port_id)
#define SSS_TO_MAX_VF_NUM(hwdev) (((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap.max_vf)
#define SSS_TO_FUNC_COS_BITMAP(hwdev) \
(((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap.cos_valid_bitmap)
#define SSS_TO_PORT_COS_BITMAP(hwdev) \
(((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap.port_cos_valid_bitmap)
enum sss_servic_bit_define {
SSS_SERVICE_BIT_NIC = 0,
SSS_SERVICE_BIT_ROCE = 1,
SSS_SERVICE_BIT_VBS = 2,
SSS_SERVICE_BIT_TOE = 3,
SSS_SERVICE_BIT_IPSEC = 4,
SSS_SERVICE_BIT_FC = 5,
SSS_SERVICE_BIT_VIRTIO = 6,
SSS_SERVICE_BIT_OVS = 7,
SSS_SERVICE_BIT_NVME = 8,
SSS_SERVICE_BIT_ROCEAA = 9,
SSS_SERVICE_BIT_CURRENET = 10,
SSS_SERVICE_BIT_PPA = 11,
SSS_SERVICE_BIT_MIGRATE = 12,
SSS_MAX_SERVICE_BIT
};
#define SSS_CFG_SERVICE_MASK_NIC (0x1 << SSS_SERVICE_BIT_NIC)
#define SSS_CFG_SERVICE_MASK_ROCE (0x1 << SSS_SERVICE_BIT_ROCE)
#define SSS_CFG_SERVICE_MASK_VBS (0x1 << SSS_SERVICE_BIT_VBS)
#define SSS_CFG_SERVICE_MASK_TOE (0x1 << SSS_SERVICE_BIT_TOE)
#define SSS_CFG_SERVICE_MASK_IPSEC (0x1 << SSS_SERVICE_BIT_IPSEC)
#define SSS_CFG_SERVICE_MASK_FC (0x1 << SSS_SERVICE_BIT_FC)
#define SSS_CFG_SERVICE_MASK_VIRTIO (0x1 << SSS_SERVICE_BIT_VIRTIO)
#define SSS_CFG_SERVICE_MASK_OVS (0x1 << SSS_SERVICE_BIT_OVS)
#define SSS_CFG_SERVICE_MASK_NVME (0x1 << SSS_SERVICE_BIT_NVME)
#define SSS_CFG_SERVICE_MASK_ROCEAA (0x1 << SSS_SERVICE_BIT_ROCEAA)
#define SSS_CFG_SERVICE_MASK_CURRENET (0x1 << SSS_SERVICE_BIT_CURRENET)
#define SSS_CFG_SERVICE_MASK_PPA (0x1 << SSS_SERVICE_BIT_PPA)
#define SSS_CFG_SERVICE_MASK_MIGRATE (0x1 << SSS_SERVICE_BIT_MIGRATE)
#define SSS_CFG_SERVICE_RDMA_EN SSS_CFG_SERVICE_MASK_ROCE
#define SSS_IS_NIC_TYPE(dev) \
(((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_NIC)
#define SSS_IS_ROCE_TYPE(dev) \
(((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_ROCE)
#define SSS_IS_VBS_TYPE(dev) \
(((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_VBS)
#define SSS_IS_TOE_TYPE(dev) \
(((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_TOE)
#define SSS_IS_IPSEC_TYPE(dev) \
(((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_IPSEC)
#define SSS_IS_FC_TYPE(dev) \
(((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_FC)
#define SSS_IS_OVS_TYPE(dev) \
(((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_OVS)
#define SSS_IS_RDMA_TYPE(dev) \
(((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_RDMA_EN)
#define SSS_IS_RDMA_ENABLE(dev) \
((dev)->mgmt_info->svc_cap.sf_svc_attr.rdma_en)
#define SSS_IS_PPA_TYPE(dev) \
(((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_PPA)
#define SSS_IS_MIGR_TYPE(dev) \
(((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_MIGRATE)
#define SSS_MAX_HOST_NUM(hwdev) ((hwdev)->glb_attr.max_host_num)
#define SSS_MAX_PF_NUM(hwdev) ((hwdev)->glb_attr.max_pf_num)
#define SSS_MGMT_CPU_NODE_ID(hwdev) \
((hwdev)->glb_attr.mgmt_host_node_id)
#define SSS_GET_FUNC_TYPE(hwdev) ((hwdev)->hwif->attr.func_type)
#define SSS_IS_PF(dev) (SSS_GET_FUNC_TYPE(dev) == SSS_FUNC_TYPE_PF)
#define SSS_IS_VF(dev) (SSS_GET_FUNC_TYPE(dev) == SSS_FUNC_TYPE_VF)
#define SSS_IS_PPF(dev) \
(SSS_GET_FUNC_TYPE(dev) == SSS_FUNC_TYPE_PPF)
#define SSS_GET_FUNC_ID(hwdev) ((hwdev)->hwif->attr.func_id)
#define SSS_IS_BMGW_MASTER_HOST(hwdev) \
((hwdev)->func_mode == SSS_FUNC_MOD_MULTI_BM_MASTER)
#define SSS_IS_BMGW_SLAVE_HOST(hwdev) \
((hwdev)->func_mode == SSS_FUNC_MOD_MULTI_BM_SLAVE)
#define SSS_IS_VM_MASTER_HOST(hwdev) \
((hwdev)->func_mode == SSS_FUNC_MOD_MULTI_VM_MASTER)
#define SSS_IS_VM_SLAVE_HOST(hwdev) \
((hwdev)->func_mode == SSS_FUNC_MOD_MULTI_VM_SLAVE)
#define SSS_IS_MASTER_HOST(hwdev) \
(SSS_IS_BMGW_MASTER_HOST(hwdev) || SSS_IS_VM_MASTER_HOST(hwdev))
#define SSS_IS_SLAVE_HOST(hwdev) \
(SSS_IS_BMGW_SLAVE_HOST(hwdev) || SSS_IS_VM_SLAVE_HOST(hwdev))
#define SSS_IS_MULTI_HOST(hwdev) \
(SSS_IS_BMGW_MASTER_HOST(hwdev) || SSS_IS_BMGW_SLAVE_HOST(hwdev) || \
SSS_IS_VM_MASTER_HOST(hwdev) || SSS_IS_VM_SLAVE_HOST(hwdev))
#define SSS_SPU_HOST_ID 4
#define SSS_SUPPORT_ADM_MSG(hwdev) ((hwdev)->features[0] & SSS_COMM_F_ADM)
#define SSS_SUPPORT_MBX_SEGMENT(hwdev) \
(SSS_GET_HWIF_PCI_INTF_ID(hwdev->hwif) == SSS_SPU_HOST_ID)
#define SSS_SUPPORT_CTRLQ_NUM(hwdev) \
((hwdev)->features[0] & SSS_COMM_F_CTRLQ_NUM)
#define SSS_SUPPORT_VIRTIO_VQ_SIZE(hwdev) \
((hwdev)->features[0] & SSS_COMM_F_VIRTIO_VQ_SIZE)
#define SSS_SUPPORT_CHANNEL_DETECT(hwdev) \
((hwdev)->features[0] & SSS_COMM_F_CHANNEL_DETECT)
#define SSS_SUPPORT_CLP(hwdev) \
((hwdev)->features[0] & SSS_COMM_F_CLP)
enum {
SSS_CFG_FREE = 0,
SSS_CFG_BUSY = 1
};
#endif

View File

@ -0,0 +1,103 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HWIF_H
#define SSS_HWIF_H
#include <linux/types.h>
#include <linux/spinlock.h>
struct sss_db_pool {
unsigned long *bitmap;
u32 bit_size;
/* spinlock for allocating doorbell area */
spinlock_t id_lock;
};
struct sss_func_attr {
enum sss_func_type func_type;
u16 func_id;
u8 pf_id;
u8 pci_intf_id;
u16 global_vf_off;
u8 mpf_id;
u8 ppf_id;
u16 irq_num; /* max: 2 ^ 15 */
u8 aeq_num; /* max: 2 ^ 3 */
u8 ceq_num; /* max: 2 ^ 7 */
u16 sq_num; /* max: 2 ^ 8 */
u8 dma_attr_num; /* max: 2 ^ 6 */
u8 msix_flex_en;
};
struct sss_hwif {
u8 __iomem *cfg_reg_base;
u8 __iomem *mgmt_reg_base;
u64 db_base_paddr;
u64 db_dwqe_len;
u8 __iomem *db_base_vaddr;
void *pdev;
struct sss_db_pool db_pool;
struct sss_func_attr attr;
};
#define SSS_GET_HWIF_AEQ_NUM(hwif) ((hwif)->attr.aeq_num)
#define SSS_GET_HWIF_CEQ_NUM(hwif) ((hwif)->attr.ceq_num)
#define SSS_GET_HWIF_IRQ_NUM(hwif) ((hwif)->attr.irq_num)
#define SSS_GET_HWIF_GLOBAL_ID(hwif) ((hwif)->attr.func_id)
#define SSS_GET_HWIF_PF_ID(hwif) ((hwif)->attr.pf_id)
#define SSS_GET_HWIF_GLOBAL_VF_OFFSET(hwif) ((hwif)->attr.global_vf_off)
#define SSS_GET_HWIF_PPF_ID(hwif) ((hwif)->attr.ppf_id)
#define SSS_GET_HWIF_MPF_ID(hwif) ((hwif)->attr.mpf_id)
#define SSS_GET_HWIF_PCI_INTF_ID(hwif) ((hwif)->attr.pci_intf_id)
#define SSS_GET_HWIF_FUNC_TYPE(hwif) ((hwif)->attr.func_type)
#define SSS_GET_HWIF_MSIX_EN(hwif) ((hwif)->attr.msix_flex_en)
#define SSS_SET_HWIF_AEQ_NUM(hwif, val) \
((hwif)->attr.aeq_num = (val))
#define SSS_SET_HWIF_CEQ_NUM(hwif, val) \
((hwif)->attr.ceq_num = (val))
#define SSS_SET_HWIF_IRQ_NUM(hwif, val) \
((hwif)->attr.irq_num = (val))
#define SSS_SET_HWIF_GLOBAL_ID(hwif, val) \
((hwif)->attr.func_id = (val))
#define SSS_SET_HWIF_PF_ID(hwif, val) \
((hwif)->attr.pf_id = (val))
#define SSS_SET_HWIF_GLOBAL_VF_OFFSET(hwif, val) \
((hwif)->attr.global_vf_off = (val))
#define SSS_SET_HWIF_PPF_ID(hwif, val) \
((hwif)->attr.ppf_id = (val))
#define SSS_SET_HWIF_MPF_ID(hwif, val) \
((hwif)->attr.mpf_id = (val))
#define SSS_SET_HWIF_PCI_INTF_ID(hwif, val) \
((hwif)->attr.pci_intf_id = (val))
#define SSS_SET_HWIF_FUNC_TYPE(hwif, val) \
((hwif)->attr.func_type = (val))
#define SSS_SET_HWIF_DMA_ATTR_NUM(hwif, val) \
((hwif)->attr.dma_attr_num = (val))
#define SSS_SET_HWIF_MSIX_EN(hwif, val) \
((hwif)->attr.msix_flex_en = (val))
#define SSS_SET_HWIF_SQ_NUM(hwif, val) \
((hwif)->attr.sq_num = (val))
#endif

View File

@ -0,0 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_IRQ_INFO_H
#define SSS_IRQ_INFO_H
#include <linux/types.h>
#include <linux/mutex.h>
#include "sss_hw_svc_cap.h"
#include "sss_hw_irq.h"
struct sss_irq {
enum sss_service_type type;
int busy; /* 1 - allocated, 0 - freed */
struct sss_irq_desc desc;
};
struct sss_irq_info {
struct sss_irq *irq;
u16 total_num;
u16 free_num;
u16 max_num; /* device max irq number */
struct mutex irq_mutex; /* mutex is used to allocate eq */
};
#endif

View File

@ -0,0 +1,110 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_MBX_INFO_H
#define SSS_MBX_INFO_H
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include "sss_hw_mbx.h"
enum sss_mbx_event_state {
SSS_EVENT_START = 0,
SSS_EVENT_FAIL,
SSS_EVENT_SUCCESS,
SSS_EVENT_TIMEOUT,
SSS_EVENT_END,
};
struct sss_mbx_send {
u8 *data;
u64 *wb_state; /* write back status */
void *wb_vaddr;
dma_addr_t wb_paddr;
};
struct sss_mbx_dma_queue {
void *dma_buff_vaddr;
dma_addr_t dma_buff_paddr;
u16 depth;
u16 pi;
u16 ci;
};
struct sss_mbx_msg_info {
u8 msg_id;
u8 state; /* can only use 1 bit */
};
struct sss_msg_desc {
void *msg;
u16 msg_len;
u8 seq_id;
u8 mod;
u16 cmd;
struct sss_mbx_msg_info msg_info;
};
struct sss_msg_buffer {
struct sss_msg_desc resp_msg;
struct sss_msg_desc recv_msg;
atomic_t recv_msg_cnt;
};
struct sss_mbx {
void *hwdev;
u8 lock_channel_en;
u8 rsvd0[3];
unsigned long channel_stop;
/* lock for send mbx message and ack message */
struct mutex mbx_send_lock;
/* lock for send mbx message */
struct mutex msg_send_lock;
struct sss_mbx_send mbx_send;
struct sss_mbx_dma_queue sync_msg_queue;
struct sss_mbx_dma_queue async_msg_queue;
struct workqueue_struct *workq;
struct sss_msg_buffer mgmt_msg; /* driver and MGMT CPU */
struct sss_msg_buffer *host_msg; /* PPF message between hosts */
struct sss_msg_buffer *func_msg; /* PF to VF or VF to PF */
u16 num_func_msg;
u16 cur_msg_channel;
u8 support_h2h_msg; /* host to host */
u8 rsvd1[3];
/* vf receive pf/ppf callback */
sss_vf_mbx_handler_t vf_mbx_cb[SSS_MOD_TYPE_MAX];
void *vf_mbx_data[SSS_MOD_TYPE_MAX];
/* pf/ppf receive vf callback */
sss_pf_mbx_handler_t pf_mbx_cb[SSS_MOD_TYPE_MAX];
void *pf_mbx_data[SSS_MOD_TYPE_MAX];
/* ppf receive pf/ppf callback */
sss_ppf_mbx_handler_t ppf_mbx_cb[SSS_MOD_TYPE_MAX];
void *ppf_mbx_data[SSS_MOD_TYPE_MAX];
/* pf receive ppf callback */
sss_pf_from_ppf_mbx_handler_t pf_recv_ppf_mbx_cb[SSS_MOD_TYPE_MAX];
void *pf_recv_ppf_mbx_data[SSS_MOD_TYPE_MAX];
unsigned long ppf_to_pf_mbx_cb_state[SSS_MOD_TYPE_MAX];
unsigned long ppf_mbx_cb_state[SSS_MOD_TYPE_MAX];
unsigned long pf_mbx_cb_state[SSS_MOD_TYPE_MAX];
unsigned long vf_mbx_cb_state[SSS_MOD_TYPE_MAX];
enum sss_mbx_event_state event_flag;
/* lock for mbx event flag */
spinlock_t mbx_lock;
u8 send_msg_id;
u8 rsvd2[3];
};
#endif

View File

@ -0,0 +1,141 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_MGMT_CHANNEL_H
#define SSS_MGMT_CHANNEL_H
#include <linux/types.h>
#include <linux/semaphore.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
#include "sss_hw_mbx.h"
#include "sss_hw_mgmt.h"
#include "sss_adm_info.h"
/* message header define */
#define SSS_MSG_HEADER_SRC_GLB_FUNC_ID_SHIFT 0
#define SSS_MSG_HEADER_STATUS_SHIFT 13
#define SSS_MSG_HEADER_SOURCE_SHIFT 15
#define SSS_MSG_HEADER_AEQ_ID_SHIFT 16
#define SSS_MSG_HEADER_MSG_ID_SHIFT 18
#define SSS_MSG_HEADER_CMD_SHIFT 22
#define SSS_MSG_HEADER_MSG_LEN_SHIFT 32
#define SSS_MSG_HEADER_MODULE_SHIFT 43
#define SSS_MSG_HEADER_SEG_LEN_SHIFT 48
#define SSS_MSG_HEADER_NO_ACK_SHIFT 54
#define SSS_MSG_HEADER_DATA_TYPE_SHIFT 55
#define SSS_MSG_HEADER_SEQID_SHIFT 56
#define SSS_MSG_HEADER_LAST_SHIFT 62
#define SSS_MSG_HEADER_DIRECTION_SHIFT 63
#define SSS_MSG_HEADER_SRC_GLB_FUNC_ID_MASK 0x1FFF
#define SSS_MSG_HEADER_STATUS_MASK 0x1
#define SSS_MSG_HEADER_SOURCE_MASK 0x1
#define SSS_MSG_HEADER_AEQ_ID_MASK 0x3
#define SSS_MSG_HEADER_MSG_ID_MASK 0xF
#define SSS_MSG_HEADER_CMD_MASK 0x3FF
#define SSS_MSG_HEADER_MSG_LEN_MASK 0x7FF
#define SSS_MSG_HEADER_MODULE_MASK 0x1F
#define SSS_MSG_HEADER_SEG_LEN_MASK 0x3F
#define SSS_MSG_HEADER_NO_ACK_MASK 0x1
#define SSS_MSG_HEADER_DATA_TYPE_MASK 0x1
#define SSS_MSG_HEADER_SEQID_MASK 0x3F
#define SSS_MSG_HEADER_LAST_MASK 0x1
#define SSS_MSG_HEADER_DIRECTION_MASK 0x1
#define SSS_GET_MSG_HEADER(val, field) \
(((val) >> SSS_MSG_HEADER_##field##_SHIFT) & \
SSS_MSG_HEADER_##field##_MASK)
#define SSS_SET_MSG_HEADER(val, field) \
((u64)(((u64)(val)) & SSS_MSG_HEADER_##field##_MASK) << \
SSS_MSG_HEADER_##field##_SHIFT)
enum sss_msg_ack_type {
SSS_MSG_ACK,
SSS_MSG_NO_ACK,
};
enum sss_data_type {
SSS_INLINE_DATA = 0,
SSS_DMA_DATA = 1,
};
enum sss_msg_seg_type {
SSS_NOT_LAST_SEG = 0,
SSS_LAST_SEG = 1,
};
enum sss_msg_direction_type {
SSS_DIRECT_SEND_MSG = 0,
SSS_RESP_MSG = 1,
};
enum sss_msg_src_type {
SSS_MSG_SRC_MGMT = 0,
SSS_MSG_SRC_MBX = 1,
};
enum sss_mgmt_msg_cb_t_state {
SSS_CALLBACK_REG = 0,
SSS_CALLBACK_RUNNING,
};
enum sss_pf_to_mgmt_event_state {
SSS_ADM_EVENT_UNINIT = 0,
SSS_ADM_EVENT_START,
SSS_ADM_EVENT_SUCCESS,
SSS_ADM_EVENT_FAIL,
SSS_ADM_EVENT_TIMEOUT,
SSS_ADM_EVENT_END,
};
struct sss_recv_msg {
void *buf;
u16 buf_len;
u16 cmd;
u16 msg_id;
u8 seq_id;
u8 no_ack;
enum sss_mod_type mod;
struct completion done;
};
struct sss_msg_pf_to_mgmt {
void *hwdev;
spinlock_t async_msg_lock; /* protect msg async and sync */
struct semaphore sync_lock;
struct workqueue_struct *workq;
void *async_msg_buf;
void *sync_buf;
void *ack_buf;
struct sss_recv_msg recv_msg;
struct sss_recv_msg recv_resp_msg;
u16 rsvd;
u16 async_msg_id;
u16 sync_msg_id;
struct sss_adm_msg *adm_msg[SSS_ADM_MSG_MAX];
sss_mgmt_msg_handler_t recv_handler[SSS_MOD_TYPE_HW_MAX];
void *recv_data[SSS_MOD_TYPE_HW_MAX];
unsigned long recv_handler_state[SSS_MOD_TYPE_HW_MAX];
void *async_msg_cb_data[SSS_MOD_TYPE_HW_MAX];
/* lock when sending msg */
spinlock_t sync_event_lock; /* protect event async and sync */
enum sss_pf_to_mgmt_event_state event_state;
};
#endif

View File

@ -0,0 +1,123 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_MGMT_INFO_H
#define SSS_MGMT_INFO_H
#include <linux/types.h>
#include "sss_hw_svc_cap.h"
#include "sss_eq_info.h"
#include "sss_irq_info.h"
struct sss_dev_sf_svc_attr {
u8 rdma_en;
u8 rsvd[3];
};
enum sss_intr_type {
SSS_INTR_TYPE_MSIX,
SSS_INTR_TYPE_MSI,
SSS_INTR_TYPE_INT,
SSS_INTR_TYPE_NONE,
/* PXE,OVS need single thread processing,
* synchronization messages must use poll wait mechanism interface
*/
};
/* device service capability */
struct sss_service_cap {
struct sss_dev_sf_svc_attr sf_svc_attr;
u16 svc_type; /* user input service type */
u16 chip_svc_type; /* HW supported service type, reference to sss_servic_bit_define */
u8 host_id;
u8 ep_id;
u8 er_id; /* PF/VF's ER */
u8 port_id; /* PF/VF's physical port */
/* Host global resources */
u16 host_total_function;
u8 pf_num;
u8 pf_id_start;
u16 vf_num; /* max numbers of vf in current host */
u16 vf_id_start;
u8 host_oq_id_mask_val;
u8 host_valid_bitmap;
u8 master_host_id;
u8 srv_multi_host_mode;
u8 timer_pf_num;
u8 timer_pf_id_start;
u16 timer_vf_num;
u16 timer_vf_id_start;
u8 flexq_en;
u8 resvd;
u8 cos_valid_bitmap;
u8 port_cos_valid_bitmap;
u16 max_vf; /* max VF number that PF supported */
u16 pseudo_vf_start_id;
u16 pseudo_vf_num;
u32 pseudo_vf_max_pctx;
u16 pseudo_vf_bfilter_start_addr;
u16 pseudo_vf_bfilter_len;
u16 pseudo_vf_cfg_num;
u16 virtio_vq_size;
/* DO NOT get interrupt_type from firmware */
enum sss_intr_type intr_type;
u8 sf_en; /* stateful business status */
u8 timer_en; /* 0:disable, 1:enable */
u8 bloomfilter_en; /* 0:disable, 1:enable */
u8 lb_mode;
u8 smf_pg;
u8 rsvd[3];
u32 max_connect_num; /* PF/VF maximum connection number(1M) */
/* The maximum connections which can be stick to cache memory, max 1K */
u16 max_stick2cache_num;
/* Starting address in cache memory for bloom filter, 64Bytes aligned */
u16 bfilter_start_addr;
/* Length for bloom filter, aligned on 64Bytes. The size is length*64B.
* Bloom filter memory size + 1 must be power of 2.
* The maximum memory size of bloom filter is 4M
*/
u16 bfilter_len;
/* The size of hash bucket tables, align on 64 entries.
* Be used to AND (&) the hash value. Bucket Size +1 must be power of 2.
* The maximum number of hash bucket is 4M
*/
u16 hash_bucket_num;
struct sss_nic_service_cap nic_cap; /* NIC capability */
struct sss_rdma_service_cap rdma_cap; /* RDMA capability */
struct sss_fc_service_cap fc_cap; /* FC capability */
struct sss_toe_service_cap toe_cap; /* ToE capability */
struct sss_ovs_service_cap ovs_cap; /* OVS capability */
struct sss_ipsec_service_cap ipsec_cap; /* IPsec capability */
struct sss_ppa_service_cap ppa_cap; /* PPA capability */
struct sss_vbs_service_cap vbs_cap; /* VBS capability */
};
struct sss_svc_cap_info {
u32 func_id;
struct sss_service_cap cap;
};
struct sss_mgmt_info {
void *hwdev;
struct sss_service_cap svc_cap;
struct sss_eq_info eq_info; /* CEQ */
struct sss_irq_info irq_info; /* IRQ */
u32 func_seq_num; /* temporary */
};
#endif

View File

@ -0,0 +1,22 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_SRIOV_INFO_H
#define SSS_SRIOV_INFO_H
#include <linux/types.h>
enum sss_sriov_state {
SSS_SRIOV_DISABLE,
SSS_SRIOV_ENABLE,
SSS_SRIOV_PRESENT,
};
struct sss_sriov_info {
u8 enabled;
u8 rsvd[3];
unsigned int vf_num;
unsigned long state;
};
#endif

View File

@ -0,0 +1,724 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <net/addrconf.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/io-mapping.h>
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/rtc.h>
#include <linux/debugfs.h>
#include "sss_kernel.h"
#include "sss_hw.h"
#include "sss_hwdev.h"
#include "sss_pci_sriov.h"
#include "sss_pci_id_tbl.h"
#include "sss_adapter.h"
#include "sss_adapter_mgmt.h"
#include "sss_pci_global.h"
#include "sss_tool_comm.h"
#include "sss_hw_export.h"
#include "sss_tool_hw.h"
#include "sss_tool.h"
#ifndef SSS_PF_NUM_MAX
#define SSS_PF_NUM_MAX (16)
#endif
#define SSS_ADAPTER_CNT_TIMEOUT 10000
#define SSS_WAIT_ADAPTER_USLEEP_MIN 9900
#define SSS_WAIT_ADAPTER_USLEEP_MAX 10000
#define SSS_CHIP_NODE_HOLD_TIMEOUT (10 * 60 * 1000)
#define SSS_WAIT_CHIP_NODE_CHANGED (10 * 60 * 1000)
#define SSS_PRINT_TIMEOUT_INTERVAL 10000
#define SSS_MICRO_SECOND 1000
#define SSS_CHIP_NODE_USLEEP_MIN 900
#define SSS_CHIP_NODE_USLEEP_MAX 1000
#define SSS_CARD_CNT_MAX 64
#define SSS_IS_SPU_DEV(pdev) ((pdev)->device == SSS_DEV_ID_SPU)
enum sss_node_state {
SSS_NODE_CHANGE = BIT(0),
};
struct sss_chip_node_lock {
struct mutex chip_mutex; /* lock for chip list */
unsigned long state;
atomic_t ref_cnt;
};
static struct sss_chip_node_lock g_chip_node_lock;
static unsigned long g_index_bit_map;
LIST_HEAD(g_chip_list);
struct list_head *sss_get_chip_list(void)
{
return &g_chip_list;
}
void lld_dev_hold(struct sss_hal_dev *dev)
{
struct sss_pci_adapter *pci_adapter = pci_get_drvdata(dev->pdev);
atomic_inc(&pci_adapter->ref_cnt);
}
void lld_dev_put(struct sss_hal_dev *dev)
{
struct sss_pci_adapter *pci_adapter = pci_get_drvdata(dev->pdev);
atomic_dec(&pci_adapter->ref_cnt);
}
void sss_chip_node_lock(void)
{
unsigned long end;
bool timeout = true;
u32 loop_cnt;
mutex_lock(&g_chip_node_lock.chip_mutex);
loop_cnt = 0;
end = jiffies + msecs_to_jiffies(SSS_WAIT_CHIP_NODE_CHANGED);
do {
if (!test_and_set_bit(SSS_NODE_CHANGE, &g_chip_node_lock.state)) {
timeout = false;
break;
}
loop_cnt++;
if (loop_cnt % SSS_PRINT_TIMEOUT_INTERVAL == 0)
pr_warn("Wait for adapter change complete for %us\n",
loop_cnt / SSS_MICRO_SECOND);
/* if sleep 1ms, use usleep_range to be more precise */
usleep_range(SSS_CHIP_NODE_USLEEP_MIN, SSS_CHIP_NODE_USLEEP_MAX);
} while (time_before(jiffies, end));
if (timeout && test_and_set_bit(SSS_NODE_CHANGE, &g_chip_node_lock.state))
pr_warn("Wait for adapter change complete timeout when trying to get adapter lock\n");
loop_cnt = 0;
timeout = true;
end = jiffies + msecs_to_jiffies(SSS_WAIT_CHIP_NODE_CHANGED);
do {
if (!atomic_read(&g_chip_node_lock.ref_cnt)) {
timeout = false;
break;
}
loop_cnt++;
if (loop_cnt % SSS_PRINT_TIMEOUT_INTERVAL == 0)
pr_warn("Wait for adapter unused for %us, reference count: %d\n",
loop_cnt / SSS_MICRO_SECOND,
atomic_read(&g_chip_node_lock.ref_cnt));
usleep_range(SSS_CHIP_NODE_USLEEP_MIN,
SSS_CHIP_NODE_USLEEP_MAX);
} while (time_before(jiffies, end));
if (timeout && atomic_read(&g_chip_node_lock.ref_cnt))
pr_warn("Wait for adapter unused timeout\n");
mutex_unlock(&g_chip_node_lock.chip_mutex);
}
void sss_chip_node_unlock(void)
{
clear_bit(SSS_NODE_CHANGE, &g_chip_node_lock.state);
}
void sss_hold_chip_node(void)
{
unsigned long end;
u32 loop_cnt = 0;
mutex_lock(&g_chip_node_lock.chip_mutex);
end = jiffies + msecs_to_jiffies(SSS_CHIP_NODE_HOLD_TIMEOUT);
do {
if (!test_bit(SSS_NODE_CHANGE, &g_chip_node_lock.state))
break;
loop_cnt++;
if (loop_cnt % SSS_PRINT_TIMEOUT_INTERVAL == 0)
pr_warn("Wait adapter change complete for %us\n",
loop_cnt / SSS_MICRO_SECOND);
/* if sleep 1ms, use usleep_range to be more precise */
usleep_range(SSS_CHIP_NODE_USLEEP_MIN, SSS_CHIP_NODE_USLEEP_MAX);
} while (time_before(jiffies, end));
if (test_bit(SSS_NODE_CHANGE, &g_chip_node_lock.state))
pr_warn("Wait adapter change complete timeout when trying to adapter dev\n");
atomic_inc(&g_chip_node_lock.ref_cnt);
mutex_unlock(&g_chip_node_lock.chip_mutex);
}
void sss_put_chip_node(void)
{
atomic_dec(&g_chip_node_lock.ref_cnt);
}
void sss_pre_init(void)
{
mutex_init(&g_chip_node_lock.chip_mutex);
atomic_set(&g_chip_node_lock.ref_cnt, 0);
sss_init_uld_lock();
}
struct sss_pci_adapter *sss_get_adapter_by_pcidev(struct pci_dev *pdev)
{
struct sss_pci_adapter *adapter = pci_get_drvdata(pdev);
if (!pdev)
return NULL;
return adapter;
}
static bool sss_chip_node_exist(struct sss_pci_adapter *adapter,
unsigned char bus_id)
{
struct sss_card_node *chip_node = NULL;
sss_chip_node_lock();
if (bus_id != 0) {
list_for_each_entry(chip_node, &g_chip_list, node) {
if (chip_node->bus_id == bus_id) {
adapter->chip_node = chip_node;
sss_chip_node_unlock();
return true;
}
}
} else if (SSS_IS_VF_DEV(adapter->pcidev) ||
SSS_IS_SPU_DEV(adapter->pcidev)) {
list_for_each_entry(chip_node, &g_chip_list, node) {
if (chip_node) {
adapter->chip_node = chip_node;
sss_chip_node_unlock();
return true;
}
}
}
sss_chip_node_unlock();
return false;
}
static unsigned char sss_get_pci_bus_id(struct sss_pci_adapter *adapter)
{
struct pci_dev *pf_pdev = NULL;
unsigned char bus_id = 0;
if (!pci_is_root_bus(adapter->pcidev->bus))
bus_id = adapter->pcidev->bus->number;
if (bus_id == 0)
return bus_id;
if (adapter->pcidev->is_virtfn) {
pf_pdev = adapter->pcidev->physfn;
bus_id = pf_pdev->bus->number;
}
return bus_id;
}
static bool sss_alloc_card_id(u8 *id)
{
unsigned char i;
sss_chip_node_lock();
for (i = 0; i < SSS_CARD_CNT_MAX; i++) {
if (test_and_set_bit(i, &g_index_bit_map) == 0) {
sss_chip_node_unlock();
*id = i;
return true;
}
}
sss_chip_node_unlock();
return false;
}
static void sss_free_card_id(u8 id)
{
clear_bit(id, &g_index_bit_map);
}
int sss_alloc_chip_node(struct sss_pci_adapter *adapter)
{
struct sss_card_node *chip_node = NULL;
unsigned char card_id;
unsigned char bus_id;
bus_id = sss_get_pci_bus_id(adapter);
if (sss_chip_node_exist(adapter, bus_id))
return 0;
chip_node = kzalloc(sizeof(*chip_node), GFP_KERNEL);
if (!chip_node)
return -ENOMEM;
chip_node->bus_id = bus_id;
if (!sss_alloc_card_id(&card_id)) {
kfree(chip_node);
sdk_err(&adapter->pcidev->dev, "chip node is exceed\n");
return -EINVAL;
}
if (snprintf(chip_node->chip_name, IFNAMSIZ, "%s%u", SSS_CHIP_NAME, card_id) < 0) {
sss_free_card_id(card_id);
kfree(chip_node);
return -EINVAL;
}
INIT_LIST_HEAD(&chip_node->func_list);
sss_chip_node_lock();
list_add_tail(&chip_node->node, &g_chip_list);
sss_chip_node_unlock();
adapter->chip_node = chip_node;
sdk_info(&adapter->pcidev->dev,
"Success to add new chip %s to global list\n", chip_node->chip_name);
return 0;
}
void sss_free_chip_node(struct sss_pci_adapter *adapter)
{
struct sss_card_node *chip_node = adapter->chip_node;
int id;
int ret;
sss_chip_node_lock();
if (list_empty(&chip_node->func_list)) {
list_del(&chip_node->node);
sdk_info(&adapter->pcidev->dev,
"Success to delete chip %s from global list\n",
chip_node->chip_name);
ret = sscanf(chip_node->chip_name, SSS_CHIP_NAME "%d", &id);
if (ret < 0)
sdk_err(&adapter->pcidev->dev, "Fail to get nic id\n");
sss_free_card_id(id);
kfree(chip_node);
}
sss_chip_node_unlock();
}
void sss_add_func_list(struct sss_pci_adapter *adapter)
{
sss_chip_node_lock();
list_add_tail(&adapter->node, &adapter->chip_node->func_list);
sss_chip_node_unlock();
}
void sss_del_func_list(struct sss_pci_adapter *adapter)
{
sss_chip_node_lock();
list_del(&adapter->node);
sss_chip_node_unlock();
}
static struct sss_card_node *sss_get_chip_node_by_hwdev(const void *hwdev)
{
struct sss_card_node *chip_node = NULL;
struct sss_card_node *node_tmp = NULL;
struct sss_pci_adapter *dev = NULL;
if (!hwdev)
return NULL;
sss_hold_chip_node();
list_for_each_entry(node_tmp, &g_chip_list, node) {
if (!chip_node) {
list_for_each_entry(dev, &node_tmp->func_list, node) {
if (dev->hwdev == hwdev) {
chip_node = node_tmp;
break;
}
}
}
}
sss_put_chip_node();
return chip_node;
}
static bool sss_is_func_valid(struct sss_pci_adapter *dev)
{
if (sss_get_func_type(dev->hwdev) == SSS_FUNC_TYPE_VF)
return false;
return true;
}
static int sss_get_dynamic_uld_dev_name(struct sss_pci_adapter *dev, enum sss_service_type type,
char *ifname)
{
u32 out_size = IFNAMSIZ;
struct sss_uld_info *uld_info = sss_get_uld_info();
if (!uld_info[type].ioctl)
return -EFAULT;
return uld_info[type].ioctl(dev->uld_dev[type], SSS_TOOL_GET_ULD_DEV_NAME,
NULL, 0, ifname, &out_size);
}
static bool sss_support_service_type(void *hwdev)
{
struct sss_hwdev *dev = hwdev;
if (!hwdev)
return false;
return !dev->mgmt_info->svc_cap.chip_svc_type;
}
void sss_get_card_info(const void *hwdev, void *bufin)
{
struct sss_card_node *chip_node = NULL;
struct sss_tool_card_info *info = (struct sss_tool_card_info *)bufin;
struct sss_pci_adapter *dev = NULL;
void *fun_hwdev = NULL;
u32 i = 0;
info->pf_num = 0;
chip_node = sss_get_chip_node_by_hwdev(hwdev);
if (!chip_node)
return;
sss_hold_chip_node();
list_for_each_entry(dev, &chip_node->func_list, node) {
if (!sss_is_func_valid(dev))
continue;
fun_hwdev = dev->hwdev;
if (sss_support_nic(fun_hwdev)) {
if (dev->uld_dev[SSS_SERVICE_TYPE_NIC]) {
info->pf[i].pf_type |= (u32)BIT(SSS_SERVICE_TYPE_NIC);
sss_get_dynamic_uld_dev_name(dev, SSS_SERVICE_TYPE_NIC,
info->pf[i].name);
}
}
if (sss_support_ppa(fun_hwdev, NULL)) {
if (dev->uld_dev[SSS_SERVICE_TYPE_PPA]) {
info->pf[i].pf_type |= (u32)BIT(SSS_SERVICE_TYPE_PPA);
sss_get_dynamic_uld_dev_name(dev, SSS_SERVICE_TYPE_PPA,
info->pf[i].name);
}
}
if (sss_support_service_type(fun_hwdev))
strlcpy(info->pf[i].name, "FOR_MGMT", IFNAMSIZ);
strlcpy(info->pf[i].bus_info, pci_name(dev->pcidev),
sizeof(info->pf[i].bus_info));
info->pf_num++;
i = info->pf_num;
}
sss_put_chip_node();
}
bool sss_is_in_host(void)
{
struct sss_card_node *node = NULL;
struct sss_pci_adapter *adapter = NULL;
sss_hold_chip_node();
list_for_each_entry(node, &g_chip_list, node) {
list_for_each_entry(adapter, &node->func_list, node) {
if (sss_get_func_type(adapter->hwdev) != SSS_FUNC_TYPE_VF) {
sss_put_chip_node();
return true;
}
}
}
sss_put_chip_node();
return false;
}
void sss_get_all_chip_id(void *id_info)
{
int i = 0;
int id;
int ret;
struct sss_card_id *card_id = (struct sss_card_id *)id_info;
struct sss_card_node *node = NULL;
sss_hold_chip_node();
list_for_each_entry(node, &g_chip_list, node) {
ret = sscanf(node->chip_name, SSS_CHIP_NAME "%d", &id);
if (ret < 0) {
pr_err("Fail to get chip id\n");
continue;
}
card_id->id[i] = (u32)id;
i++;
}
sss_put_chip_node();
card_id->num = (u32)i;
}
void *sss_get_pcidev_hdl(void *hwdev)
{
struct sss_hwdev *dev = (struct sss_hwdev *)hwdev;
if (!hwdev)
return NULL;
return dev->pcidev_hdl;
}
struct sss_card_node *sss_get_card_node(struct sss_hal_dev *hal_dev)
{
struct sss_pci_adapter *adapter = pci_get_drvdata(hal_dev->pdev);
return adapter->chip_node;
}
void sss_get_card_func_info(const char *chip_name, struct sss_card_func_info *card_func)
{
struct sss_card_node *card_node = NULL;
struct sss_pci_adapter *adapter = NULL;
struct sss_func_pdev_info *info = NULL;
card_func->pf_num = 0;
sss_hold_chip_node();
list_for_each_entry(card_node, &g_chip_list, node) {
if (strncmp(card_node->chip_name, chip_name, IFNAMSIZ))
continue;
list_for_each_entry(adapter, &card_node->func_list, node) {
if (sss_get_func_type(adapter->hwdev) == SSS_FUNC_TYPE_VF)
continue;
info = &card_func->pdev_info[card_func->pf_num];
info->bar1_size =
pci_resource_len(adapter->pcidev, SSS_PF_PCI_CFG_REG_BAR);
info->bar1_pa =
pci_resource_start(adapter->pcidev, SSS_PF_PCI_CFG_REG_BAR);
info->bar3_size =
pci_resource_len(adapter->pcidev, SSS_PCI_MGMT_REG_BAR);
info->bar3_pa =
pci_resource_start(adapter->pcidev, SSS_PCI_MGMT_REG_BAR);
card_func->pf_num++;
if (card_func->pf_num >= SSS_PF_NUM_MAX) {
sss_put_chip_node();
return;
}
}
}
sss_put_chip_node();
}
int sss_get_pf_id(struct sss_card_node *card_node, u32 port_id, u32 *pf_id, u32 *valid)
{
struct sss_pci_adapter *adapter = NULL;
sss_hold_chip_node();
list_for_each_entry(adapter, &card_node->func_list, node) {
if (sss_get_func_type(adapter->hwdev) == SSS_FUNC_TYPE_VF)
continue;
if (SSS_TO_PHY_PORT_ID(adapter->hwdev) == port_id) {
*pf_id = sss_get_func_id(adapter->hwdev);
*valid = 1;
break;
}
}
sss_put_chip_node();
return 0;
}
void *sss_get_uld_dev(struct sss_hal_dev *hal_dev, enum sss_service_type type)
{
struct sss_pci_adapter *dev = NULL;
void *uld = NULL;
if (!hal_dev)
return NULL;
dev = pci_get_drvdata(hal_dev->pdev);
if (!dev)
return NULL;
spin_lock_bh(&dev->uld_lock);
if (!dev->uld_dev[type] || !test_bit(type, &dev->uld_attach_state)) {
spin_unlock_bh(&dev->uld_lock);
return NULL;
}
uld = dev->uld_dev[type];
atomic_inc(&dev->uld_ref_cnt[type]);
spin_unlock_bh(&dev->uld_lock);
return uld;
}
void sss_uld_dev_put(struct sss_hal_dev *hal_dev, enum sss_service_type type)
{
struct sss_pci_adapter *pci_adapter = pci_get_drvdata(hal_dev->pdev);
atomic_dec(&pci_adapter->uld_ref_cnt[type]);
}
static bool sss_is_pcidev_match_dev_name(const char *dev_name, struct sss_pci_adapter *dev,
enum sss_service_type type)
{
enum sss_service_type i;
char nic_uld_name[IFNAMSIZ] = {0};
int err;
if (type > SSS_SERVICE_TYPE_MAX)
return false;
if (type == SSS_SERVICE_TYPE_MAX) {
for (i = SSS_SERVICE_TYPE_OVS; i < SSS_SERVICE_TYPE_MAX; i++) {
if (!strncmp(dev->uld_dev_name[i], dev_name, IFNAMSIZ))
return true;
}
} else {
if (!strncmp(dev->uld_dev_name[type], dev_name, IFNAMSIZ))
return true;
}
err = sss_get_dynamic_uld_dev_name(dev, SSS_SERVICE_TYPE_NIC, (char *)nic_uld_name);
if (err == 0) {
if (!strncmp(nic_uld_name, dev_name, IFNAMSIZ))
return true;
}
return false;
}
struct sss_hal_dev *sss_get_lld_dev_by_dev_name(const char *dev_name, enum sss_service_type type)
{
struct sss_card_node *chip_node = NULL;
struct sss_pci_adapter *dev = NULL;
sss_hold_chip_node();
list_for_each_entry(chip_node, &g_chip_list, node) {
list_for_each_entry(dev, &chip_node->func_list, node) {
if (sss_is_pcidev_match_dev_name(dev_name, dev, type)) {
lld_dev_hold(&dev->hal_dev);
sss_put_chip_node();
return &dev->hal_dev;
}
}
}
sss_put_chip_node();
return NULL;
}
static bool sss_is_pcidev_match_chip_name(const char *ifname, struct sss_pci_adapter *dev,
struct sss_card_node *chip_node, enum sss_func_type type)
{
if (!strncmp(chip_node->chip_name, ifname, IFNAMSIZ)) {
if (sss_get_func_type(dev->hwdev) != type)
return false;
return true;
}
return false;
}
static struct sss_hal_dev *sss_get_dst_type_lld_dev_by_chip_name(const char *ifname,
enum sss_func_type type)
{
struct sss_card_node *chip_node = NULL;
struct sss_pci_adapter *dev = NULL;
list_for_each_entry(chip_node, &g_chip_list, node) {
list_for_each_entry(dev, &chip_node->func_list, node) {
if (sss_is_pcidev_match_chip_name(ifname, dev, chip_node, type))
return &dev->hal_dev;
}
}
return NULL;
}
struct sss_hal_dev *sss_get_lld_dev_by_chip_name(const char *chip_name)
{
struct sss_hal_dev *dev = NULL;
sss_hold_chip_node();
dev = sss_get_dst_type_lld_dev_by_chip_name(chip_name, SSS_FUNC_TYPE_PPF);
if (dev)
goto out;
dev = sss_get_dst_type_lld_dev_by_chip_name(chip_name, SSS_FUNC_TYPE_PF);
if (dev)
goto out;
dev = sss_get_dst_type_lld_dev_by_chip_name(chip_name, SSS_FUNC_TYPE_VF);
out:
if (dev)
lld_dev_hold(dev);
sss_put_chip_node();
return dev;
}
struct sss_hal_dev *sss_get_lld_dev_by_chip_and_port(const char *chip_name, u8 port_id)
{
struct sss_card_node *chip_node = NULL;
struct sss_pci_adapter *dev = NULL;
sss_hold_chip_node();
list_for_each_entry(chip_node, &g_chip_list, node) {
list_for_each_entry(dev, &chip_node->func_list, node) {
if (sss_get_func_type(dev->hwdev) == SSS_FUNC_TYPE_VF)
continue;
if (SSS_TO_PHY_PORT_ID(dev->hwdev) == port_id &&
!strncmp(chip_node->chip_name, chip_name, IFNAMSIZ)) {
lld_dev_hold(&dev->hal_dev);
sss_put_chip_node();
return &dev->hal_dev;
}
}
}
sss_put_chip_node();
return NULL;
}

View File

@ -0,0 +1,100 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_ADAPTER_MGMT_H
#define SSS_ADAPTER_MGMT_H
#include <linux/types.h>
#include <linux/bitops.h>
#include "sss_version.h"
#include "sss_adapter.h"
#define SSS_DRV_VERSION SSS_VERSION_STR
#define SSS_DRV_NAME "sssdk"
#define SSS_CHIP_NAME "sssnic"
#define SSS_VF_PCI_CFG_REG_BAR 0
#define SSS_PF_PCI_CFG_REG_BAR 1
#define SSS_PCI_INTR_REG_BAR 2
#define SSS_PCI_MGMT_REG_BAR 3 /* Only PF have mgmt bar */
#define SSS_PCI_DB_BAR 4
#define SSS_IS_VF_DEV(pdev) ((pdev)->device == SSS_DEV_ID_VF)
#define SSS_CARD_MAX_SIZE (64)
struct sss_card_id {
u32 id[SSS_CARD_MAX_SIZE];
u32 num;
};
struct sss_func_pdev_info {
u64 bar0_pa;
u64 bar0_size;
u64 bar1_pa;
u64 bar1_size;
u64 bar3_pa;
u64 bar3_size;
u64 rsvd[4];
};
struct sss_card_func_info {
u32 pf_num;
u32 rsvd;
u64 usr_adm_pa;
struct sss_func_pdev_info pdev_info[SSS_CARD_MAX_SIZE];
};
enum {
SSS_NO_PROBE = 1,
SSS_PROBE_START = 2,
SSS_PROBE_OK = 3,
SSS_IN_REMOVE = 4,
};
struct list_head *sss_get_chip_list(void);
int sss_alloc_chip_node(struct sss_pci_adapter *adapter);
void sss_free_chip_node(struct sss_pci_adapter *adapter);
void sss_pre_init(void);
struct sss_pci_adapter *sss_get_adapter_by_pcidev(struct pci_dev *pdev);
void sss_add_func_list(struct sss_pci_adapter *adapter);
void sss_del_func_list(struct sss_pci_adapter *adapter);
void sss_hold_chip_node(void);
void sss_put_chip_node(void);
void sss_set_adapter_probe_state(struct sss_pci_adapter *adapter, int state);
void lld_dev_hold(struct sss_hal_dev *dev);
void lld_dev_put(struct sss_hal_dev *dev);
void sss_chip_node_lock(void);
void sss_chip_node_unlock(void);
void *sss_get_pcidev_hdl(void *hwdev);
void *sss_get_uld_dev(struct sss_hal_dev *hal_dev, enum sss_service_type type);
void sss_uld_dev_put(struct sss_hal_dev *hal_dev, enum sss_service_type type);
struct sss_hal_dev *sss_get_lld_dev_by_dev_name(const char *dev_name, enum sss_service_type type);
struct sss_hal_dev *sss_get_lld_dev_by_chip_name(const char *chip_name);
struct sss_hal_dev *sss_get_lld_dev_by_chip_and_port(const char *chip_name, u8 port_id);
void sss_get_all_chip_id(void *id_info);
void sss_get_card_func_info
(const char *chip_name, struct sss_card_func_info *card_func);
void sss_get_card_info(const void *hwdev, void *bufin);
bool sss_is_in_host(void);
int sss_get_pf_id(struct sss_card_node *chip_node, u32 port_id, u32 *pf_id, u32 *valid);
struct sss_card_node *sss_get_card_node(struct sss_hal_dev *hal_dev);
#endif

View File

@ -0,0 +1,92 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#include <linux/kernel.h>
#include <linux/io-mapping.h>
#include <linux/delay.h>
#include "sss_kernel.h"
#include "sss_common.h"
#define SSS_MIN_SLEEP_TIME(us) ((us) - (us) / 10)
/* Sleep more than 20ms using msleep is accurate */
#define SSS_HANDLER_SLEEP(usleep_min, wait_once_us) \
do { \
if ((wait_once_us) >= 20 * USEC_PER_MSEC) \
msleep((wait_once_us) / USEC_PER_MSEC); \
else \
usleep_range((usleep_min), (wait_once_us)); \
} while (0)
int sss_dma_zalloc_coherent_align(void *dev_hdl, u64 size, u64 align,
unsigned int flag, struct sss_dma_addr_align *addr)
{
dma_addr_t pa;
dma_addr_t pa_align;
void *va = NULL;
void *va_align = NULL;
va = dma_zalloc_coherent(dev_hdl, size, &pa, flag);
if (!va)
return -ENOMEM;
pa_align = ALIGN(pa, align);
if (pa_align == pa) {
va_align = va;
goto same_addr_after_align;
}
dma_free_coherent(dev_hdl, size, va, pa);
va = dma_zalloc_coherent(dev_hdl, size + align, &pa, flag);
if (!va)
return -ENOMEM;
pa_align = ALIGN(pa, align);
va_align = (void *)((u64)va + (pa_align - pa));
same_addr_after_align:
addr->origin_paddr = pa;
addr->align_paddr = pa_align;
addr->origin_vaddr = va;
addr->align_vaddr = va_align;
addr->real_size = (u32)size;
return 0;
}
void sss_dma_free_coherent_align(void *dev_hdl, struct sss_dma_addr_align *addr)
{
dma_free_coherent(dev_hdl, addr->real_size, addr->origin_vaddr, addr->origin_paddr);
}
int sss_check_handler_timeout(void *priv_data, sss_wait_handler_t handler,
u32 wait_total_ms, u32 wait_once_us)
{
enum sss_process_ret ret;
unsigned long end;
u32 usleep_min = SSS_MIN_SLEEP_TIME(wait_once_us);
if (!handler)
return -EINVAL;
end = jiffies + msecs_to_jiffies(wait_total_ms);
do {
ret = handler(priv_data);
if (ret == SSS_PROCESS_OK)
return 0;
else if (ret == SSS_PROCESS_ERR)
return -EIO;
SSS_HANDLER_SLEEP(usleep_min, wait_once_us);
} while (time_before(jiffies, end));
ret = handler(priv_data);
if (ret == SSS_PROCESS_OK)
return 0;
else if (ret == SSS_PROCESS_ERR)
return -EIO;
return -ETIMEDOUT;
}

View File

@ -0,0 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_COMMON_H
#define SSS_COMMON_H
#include <linux/types.h>
#include "sss_hw_common.h"
int sss_dma_zalloc_coherent_align(void *dev_hdl, u64 size, u64 align,
unsigned int flag, struct sss_dma_addr_align *mem_align);
void sss_dma_free_coherent_align(void *dev_hdl, struct sss_dma_addr_align *mem_align);
int sss_check_handler_timeout(void *priv_data, sss_wait_handler_t handler,
u32 wait_total_ms, u32 wait_once_us);
#endif

View File

@ -0,0 +1,90 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <net/addrconf.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/io-mapping.h>
#include <linux/interrupt.h>
#include <linux/inetdevice.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/rtc.h>
#include <linux/aer.h>
#include <linux/debugfs.h>
#include "sss_kernel.h"
#include "sss_version.h"
#include "sss_adapter_mgmt.h"
#include "sss_pci_id_tbl.h"
#include "sss_pci_sriov.h"
#include "sss_pci_probe.h"
#include "sss_pci_remove.h"
#include "sss_pci_shutdown.h"
#include "sss_pci_error.h"
#define SSS_DRV_DESC "Intelligent Network Interface Card Driver"
MODULE_AUTHOR("steven.song@3snic.com");
MODULE_DESCRIPTION("3SNIC Network Interface Card Driver");
MODULE_VERSION(SSS_DRV_VERSION);
MODULE_LICENSE("GPL");
static const struct pci_device_id g_pci_table[] = {
{PCI_VDEVICE(SSSNIC, SSS_DEV_ID_STANDARD), 0},
{PCI_VDEVICE(SSSNIC, SSS_DEV_ID_SPN120), 0},
{PCI_VDEVICE(SSSNIC, SSS_DEV_ID_VF), 0},
{0, 0}
};
MODULE_DEVICE_TABLE(pci, g_pci_table);
#ifdef HAVE_RHEL6_SRIOV_CONFIGURE
static struct pci_driver_rh g_pci_driver_rh = {
.sriov_configure = sss_pci_configure_sriov,
};
#endif
static struct pci_error_handlers g_pci_err_handler = {
.error_detected = sss_detect_pci_error,
};
static struct pci_driver g_pci_driver = {
.name = SSS_DRV_NAME,
.id_table = g_pci_table,
.probe = sss_pci_probe,
.remove = sss_pci_remove,
.shutdown = sss_pci_shutdown,
#if defined(HAVE_SRIOV_CONFIGURE)
.sriov_configure = sss_pci_configure_sriov,
#elif defined(HAVE_RHEL6_SRIOV_CONFIGURE)
.rh_reserved = &g_pci_driver_rh,
#endif
.err_handler = &g_pci_err_handler
};
static __init int sss_init_pci(void)
{
int ret;
pr_info("%s - version %s\n", SSS_DRV_DESC, SSS_DRV_VERSION);
sss_pre_init();
ret = pci_register_driver(&g_pci_driver);
if (ret != 0)
return ret;
return 0;
}
static __exit void sss_exit_pci(void)
{
pci_unregister_driver(&g_pci_driver);
}
module_init(sss_init_pci);
module_exit(sss_exit_pci);

View File

@ -0,0 +1,136 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/msi.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/semaphore.h>
#include <linux/interrupt.h>
#include "sss_kernel.h"
#include "sss_hw.h"
#include "sss_csr.h"
#include "sss_hwdev.h"
#include "sss_hwdev_api.h"
#include "sss_hwif_api.h"
int sss_chip_sync_time(void *hwdev, u64 mstime)
{
int ret;
struct sss_cmd_sync_time cmd_time = {0};
u16 out_len = sizeof(cmd_time);
cmd_time.mstime = mstime;
ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_SYNC_TIME, &cmd_time,
sizeof(cmd_time), &cmd_time, &out_len);
if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_time)) {
sdk_err(SSS_TO_DEV(hwdev),
"Fail to sync time, ret: %d, status: 0x%x, out_len: 0x%x\n",
ret, cmd_time.head.state, out_len);
return -EIO;
}
return 0;
}
void sss_chip_disable_mgmt_channel(void *hwdev)
{
sss_chip_set_pf_status(SSS_TO_HWIF(hwdev), SSS_PF_STATUS_INIT);
}
int sss_chip_get_board_info(void *hwdev, struct sss_board_info *board_info)
{
int ret;
struct sss_cmd_board_info cmd_info = {0};
u16 out_len = sizeof(cmd_info);
ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_GET_BOARD_INFO,
&cmd_info, sizeof(cmd_info), &cmd_info, &out_len);
if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_info)) {
sdk_err(SSS_TO_DEV(hwdev),
"Fail to get board info, ret: %d, status: 0x%x, out_len: 0x%x\n",
ret, cmd_info.head.state, out_len);
return -EIO;
}
memcpy(board_info, &cmd_info.info, sizeof(*board_info));
return 0;
}
int sss_chip_do_nego_feature(void *hwdev, u8 opcode, u64 *feature, u16 feature_num)
{
int ret;
struct sss_cmd_feature_nego cmd_feature = {0};
u16 out_len = sizeof(cmd_feature);
cmd_feature.func_id = sss_get_global_func_id(hwdev);
cmd_feature.opcode = opcode;
if (opcode == SSS_MGMT_MSG_SET_CMD)
memcpy(cmd_feature.feature, feature, (feature_num * sizeof(u64)));
ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_FEATURE_NEGO,
&cmd_feature, sizeof(cmd_feature), &cmd_feature, &out_len);
if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_feature)) {
sdk_err(SSS_TO_DEV(hwdev),
"Fail to nego feature, opcode: %d, ret: %d, status: 0x%x, out_len: 0x%x\n",
opcode, ret, cmd_feature.head.state, out_len);
return -EINVAL;
}
if (opcode == SSS_MGMT_MSG_GET_CMD)
memcpy(feature, cmd_feature.feature, (feature_num * sizeof(u64)));
return 0;
}
int sss_chip_set_pci_bdf_num(void *hwdev, u8 bus_id, u8 device_id, u8 func_id)
{
int ret;
struct sss_cmd_bdf_info cmd_bdf = {0};
u16 out_len = sizeof(cmd_bdf);
cmd_bdf.bus = bus_id;
cmd_bdf.device = device_id;
cmd_bdf.function = func_id;
cmd_bdf.function_id = sss_get_global_func_id(hwdev);
ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_SEND_BDF_INFO,
&cmd_bdf, sizeof(cmd_bdf), &cmd_bdf, &out_len);
if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_bdf)) {
sdk_err(SSS_TO_DEV(hwdev),
"Fail to set bdf info, ret: %d, status: 0x%x, out_len: 0x%x\n",
ret, cmd_bdf.head.state, out_len);
return -EIO;
}
return 0;
}
int sss_chip_comm_channel_detect(struct sss_hwdev *hwdev)
{
int ret;
struct sss_cmd_channel_detect cmd_detect = {0};
u16 out_len = sizeof(cmd_detect);
if (!hwdev)
return -EINVAL;
cmd_detect.func_id = sss_get_global_func_id(hwdev);
ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_CHANNEL_DETECT,
&cmd_detect, sizeof(cmd_detect), &cmd_detect, &out_len);
if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_detect)) {
sdk_err(hwdev->dev_hdl,
"Fail to send channel detect, ret: %d, status: 0x%x, out_size: 0x%x\n",
ret, cmd_detect.head.state, out_len);
return -EINVAL;
}
return 0;
}

View File

@ -0,0 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HWDEV_API_H
#define SSS_HWDEV_API_H
#include <linux/types.h>
#include "sss_hw_mbx_msg.h"
#include "sss_hwdev.h"
int sss_chip_sync_time(void *hwdev, u64 mstime);
int sss_chip_get_board_info(void *hwdev, struct sss_board_info *board_info);
void sss_chip_disable_mgmt_channel(void *hwdev);
int sss_chip_do_nego_feature(void *hwdev, u8 opcode, u64 *feature, u16 feature_num);
int sss_chip_set_pci_bdf_num(void *hwdev, u8 bus_id, u8 device_id, u8 func_id);
int sss_chip_comm_channel_detect(struct sss_hwdev *hwdev);
#endif

View File

@ -0,0 +1,748 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/semaphore.h>
#include "sss_kernel.h"
#include "sss_hw.h"
#include "sss_hwdev.h"
#include "sss_hwif_api.h"
#include "sss_hwdev_cap.h"
/* RDMA resource */
#define K_UNIT BIT(10)
#define M_UNIT BIT(20)
#define G_UNIT BIT(30)
/* L2NIC */
#define SSS_CFG_MAX_QP 256
/* RDMA */
#define SSS_RDMA_RSVD_QP 2
#define SSS_ROCE_MAX_WQE (8 * K_UNIT - 1)
#define SSS_RDMA_MAX_SQ_SGE 16
#define SSS_ROCE_MAX_RQ_SGE 16
#define SSS_RDMA_MAX_SQ_DESC_SIZE 256
/* (256B(cache_line_len) - 16B(ctrl_seg_len) - 48B(max_task_seg_len)) */
#define SSS_ROCE_MAX_SQ_INLINE_DATA_SIZE 192
#define SSS_ROCE_MAX_RQ_DESC_SIZE 256
#define SSS_ROCE_QPC_ENTRY_SIZE 512
#define SSS_WQEBB_SIZE 64
#define SSS_ROCE_RDMARC_ENTRY_SIZE 32
#define SSS_ROCE_MAX_QP_INIT_RDMA 128
#define SSS_ROCE_MAX_QP_DEST_RDMA 128
#define SSS_ROCE_MAX_SRQ_WQE (16 * K_UNIT - 1)
#define SSS_ROCE_RSVD_SRQ 0
#define SSS_ROCE_MAX_SRQ_SGE 15
#define ROCE_SRQC_ENTERY_SIZE 64
#define SSS_ROCE_MAX_SRQ 0x400
#define SSS_ROCE_MAX_CQ 0x800
#define SSS_ROCE_MAX_QP 0x400
#define SSS_ROCE_MAX_MPT 0x400
#define SSS_ROCE_MAX_DRC_QP 0x40
#define SSS_RDMA_MAX_CQE (8 * M_UNIT - 1)
#define SSS_RDMA_RSVD_CQ 0
#define SSS_RDMA_CQC_ENTRY_SIZE 128
#define SSS_RDMA_CQE_SIZE 64
#define SSS_RDMA_RSVD_MRW 128
#define SSS_RDMA_MPT_ENTRY_SIZE 64
#define SSS_RDMA_MTT_NUM (1 * G_UNIT)
#define SSS_LOG_MTT_SEG 5
#define SSS_MTT_ENTRY_SIZE 8
#define SSS_LOG_RDMARC_SEG 3
#define SSS_LOCAL_ACK_DELAY 15
#define SSS_RDMA_PORT_NUM 1
#define SSS_ROCE_MAX_MSG_SIZE (2 * G_UNIT)
#define SSS_DB_PAGE_SIZE_K (4 * K_UNIT)
#define SSS_DWQE_SIZE 256
#define SSS_PD_NUM (128 * K_UNIT)
#define SSS_RSVD_PD 0
#define SSS_MAX_XRCD (64 * K_UNIT)
#define SSS_RSVD_XRCD 0
#define SSS_MAX_GID_PER_PORT 128
#define SSS_GID_ENTRY_SIZE 32
#define SSS_RSVD_LKEY ((SSS_RDMA_RSVD_MRW - 1) << 8)
#define SSS_PAGE_SIZE_CAP ((1UL << 12) | (1UL << 16) | (1UL << 21))
#define SSS_ROCE_MODE 1
#define SSS_MAX_FRPL_LEN 511
#define SSS_MAX_PKEY 1
/* ToE */
#define SSS_TOE_PCTX_SIZE 1024
#define SSS_TOE_SCQC_SIZE 64
/* FC */
#define SSS_FC_PQPC_SIZE 256
#define SSS_FC_CQPC_SIZE 256
#define SSS_FC_SQE_SIZE 128
#define SSS_FC_SCQC_SIZE 64
#define SSS_FC_SCQE_SIZE 64
#define SSS_FC_SRQC_SIZE 64
#define SSS_FC_SRQE_SIZE 32
/* OVS */
#define SSS_OVS_PCTX_SIZE 512
/* PPA */
#define SSS_PPA_PCTX_SIZE 512
/* IPsec */
#define SSS_IPSEC_SACTX_SIZE 512
/* VirtIO */
#define SSS_VIRTIO_BASE_VQ_SIZE 2048U
#define SSS_VIRTIO_DEFAULT_VQ_SIZE 8192U
struct sss_cmd_dev_cap_cfg {
struct sss_mgmt_msg_head head;
u16 func_id;
u16 rsvd;
u8 host_id;
u8 ep_id;
u8 er_id;
u8 port_id;
u16 host_total_function;
u8 pf_num;
u8 pf_id_start;
u16 vf_num;
u16 vf_id_start;
u8 host_oq_id_mask_val;
u8 timer_en;
u8 host_valid_bitmap;
u8 rsvd_host;
u16 svc_type;
u16 max_vf;
u8 flexq_en;
u8 cos_valid_bitmap;
u8 port_cos_valid_bitmap;
u8 rsvd_func1;
u32 rsvd_func2;
u8 sf_svc_attr;
u8 func_sf_en;
u8 lb_mode;
u8 smf_pg;
u32 max_connect_num;
u16 max_stick2cache_num;
u16 bfilter_start_addr;
u16 bfilter_len;
u16 hash_bucket_num;
u8 host_sf_en;
u8 master_host_id;
u8 srv_multi_host_mode;
u8 rsvd2_sr;
u32 rsvd_func3[5];
/* l2nic */
u16 nic_max_sq_id;
u16 nic_max_rq_id;
u16 nic_def_queue_num;
u16 rsvd_nic1;
u32 rsvd_nic2[2];
/* RoCE */
u32 roce_max_qp;
u32 roce_max_cq;
u32 roce_max_srq;
u32 roce_max_mpt;
u32 roce_max_drc_qp;
u32 roce_cmtt_cl_start;
u32 roce_cmtt_cl_end;
u32 roce_cmtt_cl_size;
u32 roce_dmtt_cl_start;
u32 roce_dmtt_cl_end;
u32 roce_dmtt_cl_size;
u32 roce_wqe_cl_start;
u32 roce_wqe_cl_end;
u32 roce_wqe_cl_size;
u8 roce_srq_container_mode;
u8 rsvd_roce1[3];
u32 rsvd_roce2[5];
/* IPsec */
u32 ipsec_max_sactx;
u16 ipsec_max_cq;
u16 rsvd_ipsec1;
u32 rsvd_ipsec2[2];
/* OVS */
u32 ovs_max_qpc;
u32 rsvd_ovs[3];
/* ToE */
u32 toe_max_pctx;
u32 toe_max_cq;
u16 toe_max_srq;
u16 toe_srq_id_start;
u16 toe_max_mpt;
u16 toe_max_cctxt;
u32 rsvd_toe[2];
/* FC */
u32 fc_max_pctx;
u32 fc_max_scq;
u32 fc_max_srq;
u32 fc_max_cctx;
u32 fc_cctx_id_start;
u8 fc_vp_id_start;
u8 fc_vp_id_end;
u8 rsvd_fc1[2];
u32 rsvd_fc2[5];
/* VBS */
u16 vbs_max_volq;
u16 rsvd_vbs1;
u32 rsvd_vbs2[3];
u16 pseudo_vf_start_id;
u16 pseudo_vf_num;
u32 pseudo_vf_max_pctx;
u16 pseudo_vf_bfilter_start_addr;
u16 pseudo_vf_bfilter_len;
u32 rsvd_glb[8];
};
enum {
SSS_SF_SVC_FT_BIT = (1 << 0),
SSS_SF_SVC_RDMA_BIT = (1 << 1),
};
enum sss_cfg_cmd {
SSS_CFG_CMD_GET_CAP_CFG = 0,
SSS_CFG_CMD_GET_HOST_TIMER = 1,
};
static void sss_print_pubic_cap(void *dev_hdl, const struct sss_service_cap *svc_cap)
{
sdk_info(dev_hdl,
"Get public capbility: svc_type: 0x%x, chip_svc_type: 0x%x\n",
svc_cap->svc_type, svc_cap->chip_svc_type);
sdk_info(dev_hdl,
"host_id: 0x%x, ep_id: 0x%x, er_id: 0x%x, port_id: 0x%x\n",
svc_cap->host_id, svc_cap->ep_id, svc_cap->er_id, svc_cap->port_id);
sdk_info(dev_hdl,
"host_total_function: 0x%x, host_oq_id_mask_val: 0x%x, max_vf: 0x%x\n",
svc_cap->host_total_function, svc_cap->host_oq_id_mask_val, svc_cap->max_vf);
sdk_info(dev_hdl,
"pf_num: 0x%x, pf_id_start: 0x%x, vf_num: 0x%x, vf_id_start: 0x%x\n",
svc_cap->pf_num, svc_cap->pf_id_start, svc_cap->vf_num, svc_cap->vf_id_start);
sdk_info(dev_hdl,
"host_valid_bitmap: 0x%x, master_host_id: 0x%x, srv_multi_host_mode: 0x%x\n",
svc_cap->host_valid_bitmap, svc_cap->master_host_id, svc_cap->srv_multi_host_mode);
sdk_info(dev_hdl,
"cos_valid_bitmap: 0x%x, port_cos_valid_bitmap: 0x%x, flexq_en: 0x%x, virtio_vq_size: 0x%x\n",
svc_cap->cos_valid_bitmap, svc_cap->port_cos_valid_bitmap, svc_cap->flexq_en,
svc_cap->virtio_vq_size);
sdk_info(dev_hdl,
"pseudo_vf_start_id: 0x%x, pseudo_vf_num: 0x%x, pseudo_vf_max_pctx: 0x%x\n",
svc_cap->pseudo_vf_start_id, svc_cap->pseudo_vf_num, svc_cap->pseudo_vf_max_pctx);
sdk_info(dev_hdl,
"pseudo_vf_bfilter_start_addr: 0x%x, pseudo_vf_bfilter_len: 0x%x\n",
svc_cap->pseudo_vf_bfilter_start_addr, svc_cap->pseudo_vf_bfilter_len);
}
static void sss_parse_qmm_cap(struct sss_hwdev *hwdev,
struct sss_service_cap *svc_cap, struct sss_cmd_dev_cap_cfg *cmd_cap)
{
struct sss_dev_sf_svc_attr *sf_svc_attr = &svc_cap->sf_svc_attr;
svc_cap->pseudo_vf_num = cmd_cap->pseudo_vf_num;
svc_cap->pseudo_vf_cfg_num = cmd_cap->pseudo_vf_num;
svc_cap->pseudo_vf_start_id = cmd_cap->pseudo_vf_start_id;
svc_cap->pseudo_vf_max_pctx = cmd_cap->pseudo_vf_max_pctx;
svc_cap->pseudo_vf_bfilter_start_addr = cmd_cap->pseudo_vf_bfilter_start_addr;
svc_cap->pseudo_vf_bfilter_len = cmd_cap->pseudo_vf_bfilter_len;
if (SSS_SUPPORT_VIRTIO_VQ_SIZE(hwdev))
svc_cap->virtio_vq_size = (u16)(SSS_VIRTIO_BASE_VQ_SIZE << svc_cap->virtio_vq_size);
else
svc_cap->virtio_vq_size = SSS_VIRTIO_DEFAULT_VQ_SIZE;
sf_svc_attr->rdma_en = !!(cmd_cap->sf_svc_attr & SSS_SF_SVC_RDMA_BIT);
svc_cap->smf_pg = cmd_cap->smf_pg;
svc_cap->lb_mode = cmd_cap->lb_mode;
svc_cap->timer_en = cmd_cap->timer_en;
svc_cap->bfilter_start_addr = cmd_cap->bfilter_start_addr;
svc_cap->bfilter_len = cmd_cap->bfilter_len;
svc_cap->host_oq_id_mask_val = cmd_cap->host_oq_id_mask_val;
svc_cap->hash_bucket_num = cmd_cap->hash_bucket_num;
svc_cap->max_stick2cache_num = cmd_cap->max_stick2cache_num;
svc_cap->max_connect_num = cmd_cap->max_connect_num;
}
static void sss_parse_pubic_cap(struct sss_hwdev *hwdev,
struct sss_service_cap *svc_cap,
struct sss_cmd_dev_cap_cfg *cmd_cap,
enum sss_func_type type)
{
svc_cap->svc_type = cmd_cap->svc_type;
svc_cap->chip_svc_type = cmd_cap->svc_type;
svc_cap->ep_id = cmd_cap->ep_id;
svc_cap->er_id = cmd_cap->er_id;
svc_cap->host_id = cmd_cap->host_id;
svc_cap->port_id = cmd_cap->port_id;
svc_cap->host_total_function = cmd_cap->host_total_function;
svc_cap->host_valid_bitmap = cmd_cap->host_valid_bitmap;
svc_cap->master_host_id = cmd_cap->master_host_id;
svc_cap->srv_multi_host_mode = cmd_cap->srv_multi_host_mode;
svc_cap->flexq_en = cmd_cap->flexq_en;
svc_cap->cos_valid_bitmap = cmd_cap->cos_valid_bitmap;
svc_cap->port_cos_valid_bitmap = cmd_cap->port_cos_valid_bitmap;
if (type != SSS_FUNC_TYPE_VF) {
svc_cap->pf_num = cmd_cap->pf_num;
svc_cap->pf_id_start = cmd_cap->pf_id_start;
svc_cap->vf_num = cmd_cap->vf_num;
svc_cap->vf_id_start = cmd_cap->vf_id_start;
svc_cap->max_vf = cmd_cap->max_vf;
} else {
svc_cap->max_vf = 0;
}
svc_cap->sf_en = (type == SSS_FUNC_TYPE_PPF) ?
(!!cmd_cap->host_sf_en) : (!!cmd_cap->func_sf_en);
sss_parse_qmm_cap(hwdev, svc_cap, cmd_cap);
sss_print_pubic_cap(hwdev->dev_hdl, svc_cap);
}
static void sss_parse_l2nic_cap(struct sss_hwdev *hwdev,
struct sss_service_cap *svc_cap,
struct sss_cmd_dev_cap_cfg *cmd_cap,
enum sss_func_type type)
{
struct sss_nic_service_cap *nic_svc_cap = &svc_cap->nic_cap;
if (!SSS_IS_NIC_TYPE(hwdev))
return;
nic_svc_cap->max_rq = cmd_cap->nic_max_rq_id + 1;
nic_svc_cap->max_sq = cmd_cap->nic_max_sq_id + 1;
nic_svc_cap->def_queue_num = cmd_cap->nic_def_queue_num;
sdk_info(hwdev->dev_hdl,
"Get Nic capbility, max_sq: 0x%x, max_rq: 0x%x, def_queue_num: 0x%x\n",
nic_svc_cap->max_sq, nic_svc_cap->max_rq, nic_svc_cap->def_queue_num);
/* Check parameters from firmware */
if (nic_svc_cap->max_sq > SSS_CFG_MAX_QP ||
nic_svc_cap->max_rq > SSS_CFG_MAX_QP) {
sdk_info(hwdev->dev_hdl, "Exceed limit[1-%d]:sq: %u, rq: %u\n",
SSS_CFG_MAX_QP, nic_svc_cap->max_sq, nic_svc_cap->max_rq);
nic_svc_cap->max_rq = SSS_CFG_MAX_QP;
nic_svc_cap->max_sq = SSS_CFG_MAX_QP;
}
}
static void sss_parse_fc_cap(struct sss_hwdev *hwdev,
struct sss_service_cap *svc_cap,
struct sss_cmd_dev_cap_cfg *cmd_cap,
enum sss_func_type type)
{
struct sss_fc_service_cap *fc_svc_cap = &svc_cap->fc_cap;
struct sss_dev_fc_svc_cap *dev_fc_cap = &fc_svc_cap->dev_fc_cap;
if (!SSS_IS_FC_TYPE(hwdev))
return;
/* FC without virtulization */
if (type != SSS_FUNC_TYPE_PF && type != SSS_FUNC_TYPE_PPF)
return;
dev_fc_cap->srq_num = cmd_cap->fc_max_srq;
dev_fc_cap->scq_num = cmd_cap->fc_max_scq;
dev_fc_cap->max_parent_qpc_num = cmd_cap->fc_max_pctx;
dev_fc_cap->max_child_qpc_num = cmd_cap->fc_max_cctx;
dev_fc_cap->child_qpc_id_start = cmd_cap->fc_cctx_id_start;
dev_fc_cap->vp_id_start = cmd_cap->fc_vp_id_start;
dev_fc_cap->vp_id_end = cmd_cap->fc_vp_id_end;
fc_svc_cap->parent_qpc_size = SSS_FC_PQPC_SIZE;
fc_svc_cap->child_qpc_size = SSS_FC_CQPC_SIZE;
fc_svc_cap->sqe_size = SSS_FC_SQE_SIZE;
fc_svc_cap->scqc_size = SSS_FC_SCQC_SIZE;
fc_svc_cap->scqe_size = SSS_FC_SCQE_SIZE;
fc_svc_cap->srqc_size = SSS_FC_SRQC_SIZE;
fc_svc_cap->srqe_size = SSS_FC_SRQE_SIZE;
sdk_info(hwdev->dev_hdl, "Get FC capbility, type: 0x%x\n", type);
sdk_info(hwdev->dev_hdl,
"max_parent_qpc_num: 0x%x, max_child_qpc_num: 0x%x, scq_num: 0x%x, srq_num: 0x%x\n",
dev_fc_cap->max_parent_qpc_num, dev_fc_cap->max_child_qpc_num,
dev_fc_cap->scq_num, dev_fc_cap->srq_num);
sdk_info(hwdev->dev_hdl, "child_qpc_id_start: 0x%x, vp_id_start: 0x%x, vp_id_end: 0x%x\n",
dev_fc_cap->child_qpc_id_start, dev_fc_cap->vp_id_start, dev_fc_cap->vp_id_end);
}
static void sss_init_rdma_cap_param(struct sss_hwdev *hwdev)
{
struct sss_rdma_service_cap *rdma_svc_cap = &hwdev->mgmt_info->svc_cap.rdma_cap;
struct sss_dev_roce_svc_own_cap *roce_own_cap =
&rdma_svc_cap->dev_rdma_cap.roce_own_cap;
rdma_svc_cap->log_mtt = SSS_LOG_MTT_SEG;
rdma_svc_cap->log_rdmarc = SSS_LOG_RDMARC_SEG;
rdma_svc_cap->reserved_qp = SSS_RDMA_RSVD_QP;
rdma_svc_cap->max_sq_sg = SSS_RDMA_MAX_SQ_SGE;
/* RoCE */
roce_own_cap->qpc_entry_size = SSS_ROCE_QPC_ENTRY_SIZE;
roce_own_cap->max_wqe = SSS_ROCE_MAX_WQE;
roce_own_cap->max_rq_sg = SSS_ROCE_MAX_RQ_SGE;
roce_own_cap->max_sq_inline_data_size = SSS_ROCE_MAX_SQ_INLINE_DATA_SIZE;
roce_own_cap->max_rq_desc_size = SSS_ROCE_MAX_RQ_DESC_SIZE;
roce_own_cap->rdmarc_entry_size = SSS_ROCE_RDMARC_ENTRY_SIZE;
roce_own_cap->max_qp_init_rdma = SSS_ROCE_MAX_QP_INIT_RDMA;
roce_own_cap->max_qp_dest_rdma = SSS_ROCE_MAX_QP_DEST_RDMA;
roce_own_cap->max_srq_wqe = SSS_ROCE_MAX_SRQ_WQE;
roce_own_cap->reserved_srq = SSS_ROCE_RSVD_SRQ;
roce_own_cap->max_srq_sge = SSS_ROCE_MAX_SRQ_SGE;
roce_own_cap->srqc_entry_size = ROCE_SRQC_ENTERY_SIZE;
roce_own_cap->max_msg_size = SSS_ROCE_MAX_MSG_SIZE;
rdma_svc_cap->max_sq_desc_size = SSS_RDMA_MAX_SQ_DESC_SIZE;
rdma_svc_cap->wqebb_size = SSS_WQEBB_SIZE;
rdma_svc_cap->max_cqe = SSS_RDMA_MAX_CQE;
rdma_svc_cap->reserved_cq = SSS_RDMA_RSVD_CQ;
rdma_svc_cap->cqc_entry_size = SSS_RDMA_CQC_ENTRY_SIZE;
rdma_svc_cap->cqe_size = SSS_RDMA_CQE_SIZE;
rdma_svc_cap->reserved_mrw = SSS_RDMA_RSVD_MRW;
rdma_svc_cap->mpt_entry_size = SSS_RDMA_MPT_ENTRY_SIZE;
rdma_svc_cap->max_fmr_map = 0xff;
rdma_svc_cap->mtt_num = SSS_RDMA_MTT_NUM;
rdma_svc_cap->log_mtt_seg = SSS_LOG_MTT_SEG;
rdma_svc_cap->mtt_entry_size = SSS_MTT_ENTRY_SIZE;
rdma_svc_cap->log_rdmarc_seg = SSS_LOG_RDMARC_SEG;
rdma_svc_cap->local_ca_ack_delay = SSS_LOCAL_ACK_DELAY;
rdma_svc_cap->port_num = SSS_RDMA_PORT_NUM;
rdma_svc_cap->db_page_size = SSS_DB_PAGE_SIZE_K;
rdma_svc_cap->direct_wqe_size = SSS_DWQE_SIZE;
rdma_svc_cap->pd_num = SSS_PD_NUM;
rdma_svc_cap->reserved_pd = SSS_RSVD_PD;
rdma_svc_cap->max_xrcd = SSS_MAX_XRCD;
rdma_svc_cap->reserved_xrcd = SSS_RSVD_XRCD;
rdma_svc_cap->max_gid_per_port = SSS_MAX_GID_PER_PORT;
rdma_svc_cap->gid_entry_size = SSS_GID_ENTRY_SIZE;
rdma_svc_cap->reserved_lkey = SSS_RSVD_LKEY;
rdma_svc_cap->comp_vector_num = (u32)hwdev->mgmt_info->eq_info.ceq_num;
rdma_svc_cap->page_size_cap = SSS_PAGE_SIZE_CAP;
rdma_svc_cap->flag = (SSS_RDMA_BMME_FLAG_LOCAL_INV |
SSS_RDMA_BMME_FLAG_REMOTE_INV |
SSS_RDMA_BMME_FLAG_FAST_REG_WR |
SSS_RDMA_DEV_CAP_FLAG_XRC |
SSS_RDMA_DEV_CAP_FLAG_MEM_WINDOW |
SSS_RDMA_BMME_FLAG_TYPE_2_WIN |
SSS_RDMA_BMME_FLAG_WIN_TYPE_2B |
SSS_RDMA_DEV_CAP_FLAG_ATOMIC);
rdma_svc_cap->max_frpl_len = SSS_MAX_FRPL_LEN;
rdma_svc_cap->max_pkey = SSS_MAX_PKEY;
}
static void sss_parse_roce_cap(struct sss_hwdev *hwdev,
struct sss_service_cap *svc_cap,
struct sss_cmd_dev_cap_cfg *cmd_cap,
enum sss_func_type type)
{
struct sss_dev_roce_svc_own_cap *roce_own_cap =
&svc_cap->rdma_cap.dev_rdma_cap.roce_own_cap;
if (!SSS_IS_ROCE_TYPE(hwdev))
return;
roce_own_cap->max_srq = cmd_cap->roce_max_srq;
roce_own_cap->max_cq = cmd_cap->roce_max_cq;
roce_own_cap->max_qp = cmd_cap->roce_max_qp;
roce_own_cap->max_mpt = cmd_cap->roce_max_mpt;
roce_own_cap->max_drc_qp = cmd_cap->roce_max_drc_qp;
roce_own_cap->wqe_cl_size = cmd_cap->roce_wqe_cl_size;
roce_own_cap->wqe_cl_start = cmd_cap->roce_wqe_cl_start;
roce_own_cap->wqe_cl_end = cmd_cap->roce_wqe_cl_end;
if (roce_own_cap->max_qp == 0) {
roce_own_cap->max_drc_qp = SSS_ROCE_MAX_DRC_QP;
if (type == SSS_FUNC_TYPE_PF || type == SSS_FUNC_TYPE_PPF) {
roce_own_cap->max_srq = SSS_ROCE_MAX_SRQ;
roce_own_cap->max_cq = SSS_ROCE_MAX_CQ;
roce_own_cap->max_qp = SSS_ROCE_MAX_QP;
roce_own_cap->max_mpt = SSS_ROCE_MAX_MPT;
} else {
roce_own_cap->max_srq = SSS_ROCE_MAX_SRQ / 2;
roce_own_cap->max_cq = SSS_ROCE_MAX_CQ / 2;
roce_own_cap->max_qp = SSS_ROCE_MAX_QP / 2;
roce_own_cap->max_mpt = SSS_ROCE_MAX_MPT / 2;
}
}
sss_init_rdma_cap_param(hwdev);
sdk_info(hwdev->dev_hdl, "Get ROCE capbility, type: 0x%x\n", type);
sdk_info(hwdev->dev_hdl,
"max_qps: 0x%x, max_srq: 0x%x, max_cq: 0x%x, max_mpt: 0x%x, max_drct: 0x%x\n",
roce_own_cap->max_qp, roce_own_cap->max_srq, roce_own_cap->max_cq,
roce_own_cap->max_mpt, roce_own_cap->max_drc_qp);
sdk_info(hwdev->dev_hdl, "wqe_start: 0x%x, wqe_end: 0x%x, wqe_sz: 0x%x\n",
roce_own_cap->wqe_cl_start, roce_own_cap->wqe_cl_end, roce_own_cap->wqe_cl_size);
}
static void sss_parse_rdma_cap(struct sss_hwdev *hwdev,
struct sss_service_cap *svc_cap,
struct sss_cmd_dev_cap_cfg *cmd_cap,
enum sss_func_type type)
{
struct sss_rdma_service_cap *rdma_svc_cap = &svc_cap->rdma_cap;
struct sss_dev_roce_svc_own_cap *roce_own_cap =
&rdma_svc_cap->dev_rdma_cap.roce_own_cap;
if (!SSS_IS_RDMA_ENABLE(hwdev))
return;
roce_own_cap->dmtt_cl_start = cmd_cap->roce_dmtt_cl_start;
roce_own_cap->dmtt_cl_end = cmd_cap->roce_dmtt_cl_end;
roce_own_cap->dmtt_cl_size = cmd_cap->roce_dmtt_cl_size;
roce_own_cap->cmtt_cl_start = cmd_cap->roce_cmtt_cl_start;
roce_own_cap->cmtt_cl_end = cmd_cap->roce_cmtt_cl_end;
roce_own_cap->cmtt_cl_size = cmd_cap->roce_cmtt_cl_size;
rdma_svc_cap->log_mtt = SSS_LOG_MTT_SEG;
rdma_svc_cap->log_mtt_seg = SSS_LOG_MTT_SEG;
rdma_svc_cap->mtt_entry_size = SSS_MTT_ENTRY_SIZE;
rdma_svc_cap->mpt_entry_size = SSS_RDMA_MPT_ENTRY_SIZE;
rdma_svc_cap->mtt_num = SSS_RDMA_MTT_NUM;
sdk_info(hwdev->dev_hdl, "Get RDMA capbility, type: 0x%x\n", type);
sdk_info(hwdev->dev_hdl, "cmtt_cl_start: 0x%x, cmtt_cl_end: 0x%x, cmtt_cl_size: 0x%x\n",
roce_own_cap->cmtt_cl_start, roce_own_cap->cmtt_cl_end,
roce_own_cap->cmtt_cl_size);
sdk_info(hwdev->dev_hdl, "dmtt_cl_start: 0x%x, dmtt_cl_end: 0x%x, dmtt_cl_size: 0x%x\n",
roce_own_cap->dmtt_cl_start, roce_own_cap->dmtt_cl_end,
roce_own_cap->dmtt_cl_size);
}
static void sss_parse_ovs_cap(struct sss_hwdev *hwdev,
struct sss_service_cap *svc_cap,
struct sss_cmd_dev_cap_cfg *cmd_cap,
enum sss_func_type type)
{
struct sss_ovs_service_cap *ovs_cap = &svc_cap->ovs_cap;
struct sss_dev_ovs_svc_cap *dev_ovs_cap = &ovs_cap->dev_ovs_cap;
if (!SSS_IS_OVS_TYPE(hwdev))
return;
dev_ovs_cap->max_pctx = cmd_cap->ovs_max_qpc;
dev_ovs_cap->pseudo_vf_start_id = cmd_cap->pseudo_vf_start_id;
dev_ovs_cap->pseudo_vf_num = cmd_cap->pseudo_vf_num;
dev_ovs_cap->pseudo_vf_max_pctx = cmd_cap->pseudo_vf_max_pctx;
dev_ovs_cap->dynamic_qp_en = cmd_cap->flexq_en;
ovs_cap->pctx_size = SSS_OVS_PCTX_SIZE;
sdk_info(hwdev->dev_hdl, "Get OVS capbility, type: 0x%x\n", type);
sdk_info(hwdev->dev_hdl, "max_pctxs: 0x%x, pseudo_vf_start_id: 0x%x, pseudo_vf_num: 0x%x\n",
dev_ovs_cap->max_pctx, dev_ovs_cap->pseudo_vf_start_id,
dev_ovs_cap->pseudo_vf_num);
sdk_info(hwdev->dev_hdl, "pseudo_vf_max_pctx: 0x%x, dynamic_qp_en: 0x%x\n",
dev_ovs_cap->pseudo_vf_max_pctx, dev_ovs_cap->dynamic_qp_en);
}
static void sss_parse_ppa_cap(struct sss_hwdev *hwdev,
struct sss_service_cap *svc_cap,
struct sss_cmd_dev_cap_cfg *cmd_cap,
enum sss_func_type type)
{
struct sss_ppa_service_cap *ppa_cap = &svc_cap->ppa_cap;
if (!SSS_IS_PPA_TYPE(hwdev))
return;
ppa_cap->qpc_pseudo_vf_start = cmd_cap->pseudo_vf_start_id;
ppa_cap->qpc_pseudo_vf_num = cmd_cap->pseudo_vf_num;
ppa_cap->qpc_pseudo_vf_ctx_num = cmd_cap->pseudo_vf_max_pctx;
ppa_cap->bloomfilter_len = cmd_cap->pseudo_vf_bfilter_len;
ppa_cap->bloomfilter_en = !!cmd_cap->pseudo_vf_bfilter_len;
ppa_cap->pctx_size = SSS_PPA_PCTX_SIZE;
sdk_info(hwdev->dev_hdl, "Get PPA capbility, type: 0x%x\n", type);
sdk_info(hwdev->dev_hdl,
"qpc_pseudo_vf_start: 0x%x, qpc_pseudo_vf_num: 0x%x, qpc_pseudo_vf_ctx_num: 0x%x\n",
ppa_cap->qpc_pseudo_vf_start, ppa_cap->qpc_pseudo_vf_num,
ppa_cap->qpc_pseudo_vf_ctx_num);
}
static void sss_parse_toe_cap(struct sss_hwdev *hwdev,
struct sss_service_cap *svc_cap,
struct sss_cmd_dev_cap_cfg *cmd_cap,
enum sss_func_type type)
{
struct sss_toe_service_cap *toe_svc_cap = &svc_cap->toe_cap;
struct sss_dev_toe_svc_cap *dev_toe_cap = &toe_svc_cap->dev_toe_cap;
if (!SSS_IS_TOE_TYPE(hwdev))
return;
dev_toe_cap->max_srq = cmd_cap->toe_max_srq;
dev_toe_cap->max_cq = cmd_cap->toe_max_cq;
dev_toe_cap->srq_id_start = cmd_cap->toe_srq_id_start;
dev_toe_cap->max_pctx = cmd_cap->toe_max_pctx;
dev_toe_cap->max_cctxt = cmd_cap->toe_max_cctxt;
dev_toe_cap->max_mpt = cmd_cap->toe_max_mpt;
toe_svc_cap->pctx_size = SSS_TOE_PCTX_SIZE;
toe_svc_cap->scqc_size = SSS_TOE_SCQC_SIZE;
sdk_info(hwdev->dev_hdl, "Get TOE capbility, type: 0x%x\n", type);
sdk_info(hwdev->dev_hdl,
"max_pctx: 0x%x, max_cq: 0x%x, max_srq: 0x%x, srq_id_start: 0x%x, max_mpt: 0x%x\n",
dev_toe_cap->max_pctx, dev_toe_cap->max_cq, dev_toe_cap->max_srq,
dev_toe_cap->srq_id_start, dev_toe_cap->max_mpt);
}
static void sss_parse_ipsec_cap(struct sss_hwdev *hwdev,
struct sss_service_cap *svc_cap,
struct sss_cmd_dev_cap_cfg *cmd_cap,
enum sss_func_type type)
{
struct sss_ipsec_service_cap *ipsec_cap = &svc_cap->ipsec_cap;
struct sss_dev_ipsec_svc_cap *dev_ipsec_cap = &ipsec_cap->dev_ipsec_cap;
if (!SSS_IS_IPSEC_TYPE(hwdev))
return;
dev_ipsec_cap->max_sactx = cmd_cap->ipsec_max_sactx;
dev_ipsec_cap->max_cq = cmd_cap->ipsec_max_cq;
ipsec_cap->sactx_size = SSS_IPSEC_SACTX_SIZE;
sdk_info(hwdev->dev_hdl, "Get IPSEC capbility, type: 0x%x\n", type);
sdk_info(hwdev->dev_hdl, "max_sactx: 0x%x, max_cq: 0x%x\n",
dev_ipsec_cap->max_sactx, dev_ipsec_cap->max_cq);
}
static void sss_parse_vbs_cap(struct sss_hwdev *hwdev,
struct sss_service_cap *svc_cap,
struct sss_cmd_dev_cap_cfg *cmd_cap,
enum sss_func_type type)
{
struct sss_vbs_service_cap *vbs_cap = &svc_cap->vbs_cap;
if (!SSS_IS_VBS_TYPE(hwdev))
return;
vbs_cap->vbs_max_volq = cmd_cap->vbs_max_volq;
sdk_info(hwdev->dev_hdl, "Get VBS capbility, type: 0x%x, vbs_max_volq: 0x%x\n",
type, vbs_cap->vbs_max_volq);
}
static void sss_parse_dev_cap(struct sss_hwdev *hwdev,
struct sss_cmd_dev_cap_cfg *cmd_cap, enum sss_func_type type)
{
struct sss_service_cap *svc_cap = &hwdev->mgmt_info->svc_cap;
sss_parse_pubic_cap(hwdev, svc_cap, cmd_cap, type);
sss_parse_l2nic_cap(hwdev, svc_cap, cmd_cap, type);
sss_parse_fc_cap(hwdev, svc_cap, cmd_cap, type);
sss_parse_toe_cap(hwdev, svc_cap, cmd_cap, type);
sss_parse_rdma_cap(hwdev, svc_cap, cmd_cap, type);
sss_parse_roce_cap(hwdev, svc_cap, cmd_cap, type);
sss_parse_ovs_cap(hwdev, svc_cap, cmd_cap, type);
sss_parse_ipsec_cap(hwdev, svc_cap, cmd_cap, type);
sss_parse_ppa_cap(hwdev, svc_cap, cmd_cap, type);
sss_parse_vbs_cap(hwdev, svc_cap, cmd_cap, type);
}
static int sss_chip_get_cap(struct sss_hwdev *hwdev, struct sss_cmd_dev_cap_cfg *cmd_cap)
{
int ret;
u16 out_len = sizeof(*cmd_cap);
cmd_cap->func_id = sss_get_global_func_id(hwdev);
sdk_info(hwdev->dev_hdl, "Get svc_cap, func_id: %u\n", cmd_cap->func_id);
ret = sss_sync_mbx_send_msg(hwdev, SSS_MOD_TYPE_CFGM, SSS_CFG_CMD_GET_CAP_CFG,
cmd_cap, sizeof(*cmd_cap), cmd_cap, &out_len, 0,
SSS_CHANNEL_COMM);
if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, cmd_cap)) {
sdk_err(hwdev->dev_hdl,
"Fail to get capability, err: %d, status: 0x%x, out_len: 0x%x\n",
ret, cmd_cap->head.state, out_len);
return -EIO;
}
return 0;
}
int sss_init_capability(struct sss_hwdev *hwdev)
{
int ret;
enum sss_func_type type = SSS_GET_FUNC_TYPE(hwdev);
struct sss_cmd_dev_cap_cfg cmd_cap = {0};
if (type != SSS_FUNC_TYPE_PF &&
type != SSS_FUNC_TYPE_VF &&
type != SSS_FUNC_TYPE_PPF) {
sdk_err(hwdev->dev_hdl, "Unsupported PCI Function type: %d\n", type);
return -EINVAL;
}
ret = sss_chip_get_cap(hwdev, &cmd_cap);
if (ret != 0)
return ret;
sss_parse_dev_cap(hwdev, &cmd_cap, type);
sdk_info(hwdev->dev_hdl, "Success to init capability\n");
return 0;
}
void sss_deinit_capability(struct sss_hwdev *hwdev)
{
sdk_info(hwdev->dev_hdl, "Success to deinit capability");
}

View File

@ -0,0 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HWDEV_CAP_H
#define SSS_HWDEV_CAP_H
#include "sss_hwdev.h"
int sss_init_capability(struct sss_hwdev *dev);
void sss_deinit_capability(struct sss_hwdev *dev);
#endif

View File

@ -0,0 +1,599 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
#include "sss_kernel.h"
#include "sss_hw.h"
#include "sss_hwdev.h"
#include "sss_csr.h"
#include "sss_hwif_api.h"
#include "sss_hw_svc_cap.h"
#define SSS_DEFAULT_RX_BUF_SIZE_LEVEL ((u16)0xB)
enum sss_rx_buf_size {
SSS_RX_BUF_SIZE_32B = 0x20,
SSS_RX_BUF_SIZE_64B = 0x40,
SSS_RX_BUF_SIZE_96B = 0x60,
SSS_RX_BUF_SIZE_128B = 0x80,
SSS_RX_BUF_SIZE_192B = 0xC0,
SSS_RX_BUF_SIZE_256B = 0x100,
SSS_RX_BUF_SIZE_384B = 0x180,
SSS_RX_BUF_SIZE_512B = 0x200,
SSS_RX_BUF_SIZE_768B = 0x300,
SSS_RX_BUF_SIZE_1K = 0x400,
SSS_RX_BUF_SIZE_1_5K = 0x600,
SSS_RX_BUF_SIZE_2K = 0x800,
SSS_RX_BUF_SIZE_3K = 0xC00,
SSS_RX_BUF_SIZE_4K = 0x1000,
SSS_RX_BUF_SIZE_8K = 0x2000,
SSS_RX_BUF_SIZE_16K = 0x4000,
};
const int sss_rx_buf_size_level[] = {
SSS_RX_BUF_SIZE_32B,
SSS_RX_BUF_SIZE_64B,
SSS_RX_BUF_SIZE_96B,
SSS_RX_BUF_SIZE_128B,
SSS_RX_BUF_SIZE_192B,
SSS_RX_BUF_SIZE_256B,
SSS_RX_BUF_SIZE_384B,
SSS_RX_BUF_SIZE_512B,
SSS_RX_BUF_SIZE_768B,
SSS_RX_BUF_SIZE_1K,
SSS_RX_BUF_SIZE_1_5K,
SSS_RX_BUF_SIZE_2K,
SSS_RX_BUF_SIZE_3K,
SSS_RX_BUF_SIZE_4K,
SSS_RX_BUF_SIZE_8K,
SSS_RX_BUF_SIZE_16K,
};
static u16 sss_get_rx_buf_size_level(int buf_size)
{
u16 i;
u16 cnt = ARRAY_LEN(sss_rx_buf_size_level);
for (i = 0; i < cnt; i++) {
if (sss_rx_buf_size_level[i] == buf_size)
return i;
}
return SSS_DEFAULT_RX_BUF_SIZE_LEVEL; /* default 2K */
}
static int sss_chip_get_interrupt_cfg(void *hwdev,
struct sss_irq_cfg *intr_cfg, u16 channel)
{
int ret;
struct sss_cmd_msix_config cmd_msix = {0};
u16 out_len = sizeof(cmd_msix);
cmd_msix.opcode = SSS_MGMT_MSG_GET_CMD;
cmd_msix.func_id = sss_get_global_func_id(hwdev);
cmd_msix.msix_index = intr_cfg->msix_id;
ret = sss_sync_send_msg_ch(hwdev, SSS_COMM_MGMT_CMD_CFG_MSIX_CTRL_REG,
&cmd_msix, sizeof(cmd_msix), &cmd_msix, &out_len, channel);
if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_msix)) {
sdk_err(SSS_TO_DEV(hwdev),
"Fail to get intr config, ret: %d, status: 0x%x, out_len: 0x%x, channel: 0x%x\n",
ret, cmd_msix.head.state, out_len, channel);
return -EINVAL;
}
intr_cfg->lli_credit = cmd_msix.lli_credit_cnt;
intr_cfg->lli_timer = cmd_msix.lli_timer_cnt;
intr_cfg->pending = cmd_msix.pending_cnt;
intr_cfg->coalesc_timer = cmd_msix.coalesce_timer_cnt;
intr_cfg->resend_timer = cmd_msix.resend_timer_cnt;
return 0;
}
int sss_chip_set_msix_attr(void *hwdev,
struct sss_irq_cfg intr_cfg, u16 channel)
{
int ret;
struct sss_irq_cfg temp_cfg = {0};
if (!hwdev)
return -EINVAL;
temp_cfg.msix_id = intr_cfg.msix_id;
ret = sss_chip_get_interrupt_cfg(hwdev, &temp_cfg, channel);
if (ret != 0)
return -EINVAL;
if (intr_cfg.lli_set == 0) {
intr_cfg.lli_credit = temp_cfg.lli_credit;
intr_cfg.lli_timer = temp_cfg.lli_timer;
}
if (intr_cfg.coalesc_intr_set == 0) {
intr_cfg.pending = temp_cfg.pending;
intr_cfg.coalesc_timer = temp_cfg.coalesc_timer;
intr_cfg.resend_timer = temp_cfg.resend_timer;
}
return sss_chip_set_eq_msix_attr(hwdev, &intr_cfg, channel);
}
EXPORT_SYMBOL(sss_chip_set_msix_attr);
void sss_chip_clear_msix_resend_bit(void *hwdev, u16 msix_id, bool clear_en)
{
u32 val;
if (!hwdev)
return;
val = SSS_SET_MSI_CLR_INDIR(msix_id, SIMPLE_INDIR_ID) |
SSS_SET_MSI_CLR_INDIR(!!clear_en, RESEND_TIMER_CLR);
sss_chip_write_reg(SSS_TO_HWIF(hwdev), SSS_CSR_FUNC_MSI_CLR_WR_ADDR, val);
}
EXPORT_SYMBOL(sss_chip_clear_msix_resend_bit);
int sss_chip_reset_function(void *hwdev, u16 func_id, u64 flag, u16 channel)
{
int ret = 0;
struct sss_cmd_func_reset cmd_reset = {0};
u16 out_len = sizeof(cmd_reset);
if (!hwdev)
return -EINVAL;
cmd_reset.func_id = func_id;
cmd_reset.reset_flag = flag;
sdk_info(SSS_TO_DEV(hwdev), "Func reset, flag: 0x%llx, channel:0x%x\n", flag, channel);
ret = sss_sync_send_msg_ch(hwdev, SSS_COMM_MGMT_CMD_FUNC_RESET,
&cmd_reset, sizeof(cmd_reset), &cmd_reset, &out_len, channel);
if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_reset)) {
sdk_err(SSS_TO_DEV(hwdev),
"Fail to reset func, flag 0x%llx, ret: %d, status: 0x%x, out_len: 0x%x\n",
flag, ret, cmd_reset.head.state, out_len);
return -EIO;
}
return 0;
}
EXPORT_SYMBOL(sss_chip_reset_function);
int sss_chip_set_root_ctx(void *hwdev,
u32 rq_depth, u32 sq_depth, int rx_size, u16 channel)
{
int ret;
struct sss_cmd_root_ctxt cmd_root = {0};
u16 out_len = sizeof(cmd_root);
if (!hwdev)
return -EINVAL;
cmd_root.func_id = sss_get_global_func_id(hwdev);
if (rq_depth != 0 || sq_depth != 0 || rx_size != 0) {
cmd_root.rx_buf_sz = sss_get_rx_buf_size_level(rx_size);
cmd_root.rq_depth = (u16)ilog2(rq_depth);
cmd_root.sq_depth = (u16)ilog2(sq_depth);
cmd_root.lro_en = 1;
}
ret = sss_sync_send_msg_ch(hwdev, SSS_COMM_MGMT_CMD_SET_VAT,
&cmd_root, sizeof(cmd_root), &cmd_root, &out_len, channel);
if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_root)) {
sdk_err(SSS_TO_DEV(hwdev),
"Fail to set root ctx, ret: %d, status: 0x%x, out_len: 0x%x, channel: 0x%x\n",
ret, cmd_root.head.state, out_len, channel);
return -EFAULT;
}
return 0;
}
EXPORT_SYMBOL(sss_chip_set_root_ctx);
int sss_chip_clean_root_ctx(void *hwdev, u16 channel)
{
return sss_chip_set_root_ctx(hwdev, 0, 0, 0, channel);
}
EXPORT_SYMBOL(sss_chip_clean_root_ctx);
static int sss_get_fw_ver(struct sss_hwdev *hwdev,
enum sss_fw_ver_type fw_type, u8 *buf, u8 buf_size, u16 channel)
{
int ret;
struct sss_cmd_get_fw_version cmd_version = {0};
u16 out_len = sizeof(cmd_version);
if (!hwdev || !buf)
return -EINVAL;
cmd_version.fw_type = fw_type;
ret = sss_sync_send_msg_ch(hwdev, SSS_COMM_MGMT_CMD_GET_FW_VERSION,
&cmd_version, sizeof(cmd_version), &cmd_version,
&out_len, channel);
if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_version)) {
sdk_err(hwdev->dev_hdl,
"Fail to get fw version, ret: %d, status: 0x%x, out_len: 0x%x, channel: 0x%x\n",
ret, cmd_version.head.state, out_len, channel);
return -EIO;
}
ret = snprintf(buf, buf_size, "%s", cmd_version.ver);
if (ret < 0)
return -EINVAL;
return 0;
}
int sss_get_mgmt_version(void *hwdev, u8 *buf, u8 buf_size, u16 channel)
{
return sss_get_fw_ver(hwdev, SSS_FW_VER_TYPE_MPU, buf,
buf_size, channel);
}
EXPORT_SYMBOL(sss_get_mgmt_version);
int sss_chip_set_func_used_state(void *hwdev,
u16 service_type, bool state, u16 channel)
{
int ret;
struct sss_cmd_func_svc_used_state cmd_state = {0};
u16 out_len = sizeof(cmd_state);
if (!hwdev)
return -EINVAL;
cmd_state.func_id = sss_get_global_func_id(hwdev);
cmd_state.svc_type = service_type;
cmd_state.used_state = !!state;
ret = sss_sync_send_msg_ch(hwdev,
SSS_COMM_MGMT_CMD_SET_FUNC_SVC_USED_STATE,
&cmd_state, sizeof(cmd_state), &cmd_state, &out_len, channel);
if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_state)) {
sdk_err(SSS_TO_DEV(hwdev),
"Fail to set func used state, ret: %d, status: 0x%x, out_len: 0x%x, channel: 0x%x\n\n",
ret, cmd_state.head.state, out_len, channel);
return -EIO;
}
return 0;
}
EXPORT_SYMBOL(sss_chip_set_func_used_state);
bool sss_get_nic_capability(void *hwdev, struct sss_nic_service_cap *capability)
{
struct sss_hwdev *dev = hwdev;
if (!capability || !hwdev)
return false;
if (SSS_IS_NIC_TYPE(dev)) {
memcpy(capability, SSS_TO_NIC_CAP(hwdev), sizeof(*capability));
return true;
} else {
return false;
}
}
EXPORT_SYMBOL(sss_get_nic_capability);
bool sss_support_nic(void *hwdev)
{
return (hwdev && SSS_IS_NIC_TYPE((struct sss_hwdev *)hwdev));
}
EXPORT_SYMBOL(sss_support_nic);
bool sss_support_ppa(void *hwdev, struct sss_ppa_service_cap *cap)
{
struct sss_hwdev *dev = hwdev;
if (!hwdev)
return false;
if (!SSS_IS_PPA_TYPE(dev))
return false;
if (cap)
memcpy(cap, &dev->mgmt_info->svc_cap.ppa_cap, sizeof(*cap));
return true;
}
EXPORT_SYMBOL(sss_support_ppa);
u16 sss_get_max_sq_num(void *hwdev)
{
if (!hwdev) {
pr_err("Get max sq num: hwdev is NULL\n");
return 0;
}
return SSS_TO_MAX_SQ_NUM(hwdev);
}
EXPORT_SYMBOL(sss_get_max_sq_num);
u8 sss_get_phy_port_id(void *hwdev)
{
if (!hwdev) {
pr_err("Get phy port id: hwdev is NULL\n");
return 0;
}
return SSS_TO_PHY_PORT_ID(hwdev);
}
EXPORT_SYMBOL(sss_get_phy_port_id);
u16 sss_get_max_vf_num(void *hwdev)
{
if (!hwdev) {
pr_err("Get max vf num: hwdev is NULL\n");
return 0;
}
return SSS_TO_MAX_VF_NUM(hwdev);
}
EXPORT_SYMBOL(sss_get_max_vf_num);
u16 sss_nic_intr_num(void *hwdev)
{
struct sss_hwif *hwif = NULL;
if (!hwdev)
return 0;
hwif = ((struct sss_hwdev *)hwdev)->hwif;
return hwif->attr.irq_num;
}
EXPORT_SYMBOL(sss_nic_intr_num);
int sss_get_cos_valid_bitmap(void *hwdev, u8 *func_cos_bitmap, u8 *port_cos_bitmap)
{
if (!hwdev) {
pr_err("Get cos valid bitmap: hwdev is NULL\n");
return -EINVAL;
}
*func_cos_bitmap = SSS_TO_FUNC_COS_BITMAP(hwdev);
*port_cos_bitmap = SSS_TO_PORT_COS_BITMAP(hwdev);
return 0;
}
EXPORT_SYMBOL(sss_get_cos_valid_bitmap);
u16 sss_alloc_irq(void *hwdev, enum sss_service_type service_type,
struct sss_irq_desc *alloc_array, u16 alloc_num)
{
int i;
int j;
u16 need_num = alloc_num;
u16 act_num = 0;
struct sss_irq_info *irq_info = NULL;
struct sss_irq *irq = NULL;
if (!hwdev || !alloc_array)
return 0;
irq_info = SSS_TO_IRQ_INFO(hwdev);
irq = irq_info->irq;
mutex_lock(&irq_info->irq_mutex);
if (irq_info->free_num == 0) {
sdk_err(SSS_TO_DEV(hwdev), "Fail to alloc irq, free_num is zero\n");
mutex_unlock(&irq_info->irq_mutex);
return 0;
}
if (alloc_num > irq_info->free_num) {
sdk_warn(SSS_TO_DEV(hwdev), "Adjust need_num to %u\n", irq_info->free_num);
need_num = irq_info->free_num;
}
for (i = 0; i < need_num; i++) {
for (j = 0; j < irq_info->total_num; j++) {
if (irq[j].busy != SSS_CFG_FREE)
continue;
if (irq_info->free_num == 0) {
sdk_err(SSS_TO_DEV(hwdev), "Fail to alloc irq, free_num is zero\n");
mutex_unlock(&irq_info->irq_mutex);
memset(alloc_array, 0, sizeof(*alloc_array) * alloc_num);
return 0;
}
irq[j].type = service_type;
irq[j].busy = SSS_CFG_BUSY;
alloc_array[i].irq_id = irq[j].desc.irq_id;
alloc_array[i].msix_id = irq[j].desc.msix_id;
irq_info->free_num--;
act_num++;
break;
}
}
mutex_unlock(&irq_info->irq_mutex);
return act_num;
}
EXPORT_SYMBOL(sss_alloc_irq);
void sss_free_irq(void *hwdev, enum sss_service_type service_type, u32 irq_id)
{
int i;
struct sss_irq_info *irq_info = NULL;
struct sss_irq *irq = NULL;
if (!hwdev)
return;
irq_info = SSS_TO_IRQ_INFO(hwdev);
irq = irq_info->irq;
mutex_lock(&irq_info->irq_mutex);
for (i = 0; i < irq_info->total_num; i++) {
if (irq_id != irq[i].desc.irq_id ||
service_type != irq[i].type)
continue;
if (irq[i].busy == SSS_CFG_FREE)
continue;
irq[i].busy = SSS_CFG_FREE;
irq_info->free_num++;
if (irq_info->free_num > irq_info->total_num) {
sdk_err(SSS_TO_DEV(hwdev), "Free_num out of range :[0, %u]\n",
irq_info->total_num);
mutex_unlock(&irq_info->irq_mutex);
return;
}
break;
}
if (i >= irq_info->total_num)
sdk_warn(SSS_TO_DEV(hwdev), "Irq %u don`t need to free\n", irq_id);
mutex_unlock(&irq_info->irq_mutex);
}
EXPORT_SYMBOL(sss_free_irq);
void sss_register_dev_event(void *hwdev, void *data, sss_event_handler_t callback)
{
struct sss_hwdev *dev = hwdev;
if (!hwdev) {
pr_err("Register event: hwdev is NULL\n");
return;
}
dev->event_handler = callback;
dev->event_handler_data = data;
}
EXPORT_SYMBOL(sss_register_dev_event);
void sss_unregister_dev_event(void *hwdev)
{
struct sss_hwdev *dev = hwdev;
if (!hwdev) {
pr_err("Unregister event: hwdev is NULL\n");
return;
}
dev->event_handler = NULL;
dev->event_handler_data = NULL;
}
EXPORT_SYMBOL(sss_unregister_dev_event);
int sss_get_dev_present_flag(const void *hwdev)
{
return hwdev && !!((struct sss_hwdev *)hwdev)->chip_present_flag;
}
EXPORT_SYMBOL(sss_get_dev_present_flag);
u8 sss_get_max_pf_num(void *hwdev)
{
if (!hwdev)
return 0;
return SSS_MAX_PF_NUM((struct sss_hwdev *)hwdev);
}
EXPORT_SYMBOL(sss_get_max_pf_num);
int sss_get_chip_present_state(void *hwdev, bool *present_state)
{
if (!hwdev || !present_state)
return -EINVAL;
*present_state = sss_chip_get_present_state(hwdev);
return 0;
}
EXPORT_SYMBOL(sss_get_chip_present_state);
void sss_fault_event_report(void *hwdev, u16 src, u16 level)
{
if (!hwdev)
return;
sdk_info(SSS_TO_DEV(hwdev),
"Fault event report, src: %u, level: %u\n", src, level);
}
EXPORT_SYMBOL(sss_fault_event_report);
int sss_register_service_adapter(void *hwdev, enum sss_service_type service_type,
void *service_adapter)
{
struct sss_hwdev *dev = hwdev;
if (!hwdev || !service_adapter || service_type >= SSS_SERVICE_TYPE_MAX)
return -EINVAL;
if (dev->service_adapter[service_type])
return -EINVAL;
dev->service_adapter[service_type] = service_adapter;
return 0;
}
EXPORT_SYMBOL(sss_register_service_adapter);
void sss_unregister_service_adapter(void *hwdev,
enum sss_service_type service_type)
{
struct sss_hwdev *dev = hwdev;
if (!hwdev || service_type >= SSS_SERVICE_TYPE_MAX)
return;
dev->service_adapter[service_type] = NULL;
}
EXPORT_SYMBOL(sss_unregister_service_adapter);
void *sss_get_service_adapter(void *hwdev, enum sss_service_type service_type)
{
struct sss_hwdev *dev = hwdev;
if (!hwdev || service_type >= SSS_SERVICE_TYPE_MAX)
return NULL;
return dev->service_adapter[service_type];
}
EXPORT_SYMBOL(sss_get_service_adapter);
void sss_do_event_callback(void *hwdev, struct sss_event_info *event)
{
struct sss_hwdev *dev = hwdev;
if (!hwdev) {
pr_err("Event callback: hwdev is NULL\n");
return;
}
if (!dev->event_handler) {
sdk_info(dev->dev_hdl, "Event callback: handler is NULL\n");
return;
}
dev->event_handler(dev->event_handler_data, event);
}
EXPORT_SYMBOL(sss_do_event_callback);
void sss_update_link_stats(void *hwdev, bool link_state)
{
struct sss_hwdev *dev = hwdev;
if (!hwdev)
return;
if (link_state)
atomic_inc(&dev->hw_stats.link_event_stats.link_up_stats);
else
atomic_inc(&dev->hw_stats.link_event_stats.link_down_stats);
}
EXPORT_SYMBOL(sss_update_link_stats);

View File

@ -0,0 +1,548 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include "sss_kernel.h"
#include "sss_hw.h"
#include "sss_hwdev.h"
#include "sss_adapter.h"
#include "sss_hwdev_api.h"
#include "sss_hwdev_mgmt_info.h"
#include "sss_hwdev_mgmt_channel.h"
#include "sss_hwdev_cap.h"
#include "sss_hwdev_link.h"
#include "sss_hwdev_io_flush.h"
#include "sss_hwif_init.h"
#include "sss_hwif_api.h"
#include "sss_hwif_export.h"
#include "sss_hwif_mgmt_init.h"
enum sss_host_mode {
SSS_HOST_MODE_NORMAL = 0,
SSS_HOST_MODE_VM,
SSS_HOST_MODE_BM,
SSS_HOST_MODE_MAX,
};
#define SSS_HWDEV_WQ_NAME "sssnic_hardware"
#define SSS_WQ_MAX_REQ 10
#define SSS_DETECT_PCIE_LINK_DOWN_RETRY 2
#define SSS_CHN_BUSY_TIMEOUT 25
#define SSS_HEARTBEAT_TIMER_EXPIRES 5000
#define SSS_HEARTBEAT_PERIOD 1000
#define SSS_GET_PCIE_LINK_STATUS(hwdev) \
((hwdev)->heartbeat.pcie_link_down ? \
SSS_EVENT_PCIE_LINK_DOWN : SSS_EVENT_HEART_LOST)
#define SSS_SET_FUNC_HOST_MODE(hwdev, mode) \
do { \
if ((mode) >= SSS_FUNC_MOD_MIN && (mode) <= SSS_FUNC_MOD_MAX) { \
(hwdev)->func_mode = (mode); \
} else \
(hwdev)->func_mode = SSS_FUNC_MOD_NORMAL_HOST; \
} while (0)
#define SSS_SYNFW_TIME_PERIOD (60 * 60 * 1000)
#define SSS_CHANNEL_DETECT_PERIOD (5 * 1000)
#define SSS_COMM_SUPPORT_CHANNEL_DETECT(hwdev) \
((hwdev)->features[0] & SSS_COMM_F_CHANNEL_DETECT)
typedef void (*sss_set_mode_handler_t)(struct sss_hwdev *hwdev);
static struct sss_hwdev *sss_alloc_hwdev(void)
{
struct sss_hwdev *hwdev;
hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL);
if (!hwdev)
return NULL;
hwdev->chip_fault_stats = vzalloc(SSS_CHIP_FAULT_SIZE);
if (!hwdev->chip_fault_stats) {
kfree(hwdev);
return NULL;
}
return hwdev;
}
static void sss_free_hwdev(struct sss_hwdev *hwdev)
{
vfree(hwdev->chip_fault_stats);
kfree(hwdev);
}
static void sss_init_hwdev_param(struct sss_hwdev *hwdev,
struct sss_pci_adapter *adapter)
{
struct pci_dev *pdev = adapter->pcidev;
hwdev->adapter_hdl = adapter;
hwdev->pcidev_hdl = pdev;
hwdev->dev_hdl = &pdev->dev;
hwdev->chip_node = adapter->chip_node;
spin_lock_init(&hwdev->channel_lock);
}
static void sss_set_chip_present_flag(struct sss_hwdev *hwdev, bool present)
{
hwdev->chip_present_flag = !!present;
}
static bool sss_is_chip_abnormal(struct sss_hwdev *hwdev)
{
u32 pcie_status;
if (!sss_get_dev_present_flag(hwdev))
return false;
pcie_status = sss_chip_get_pcie_link_status(hwdev);
if (pcie_status == SSS_PCIE_LINK_DOWN) {
hwdev->heartbeat.pcie_link_down_cnt++;
sdk_warn(hwdev->dev_hdl, "Pcie link down\n");
if (hwdev->heartbeat.pcie_link_down_cnt >= SSS_DETECT_PCIE_LINK_DOWN_RETRY) {
sss_set_chip_present_flag(hwdev, false);
sss_force_complete_all(hwdev);
hwdev->heartbeat.pcie_link_down = true;
return true;
}
return false;
}
if (pcie_status != SSS_PCIE_LINK_UP) {
hwdev->heartbeat.heartbeat_lost = true;
return true;
}
hwdev->heartbeat.pcie_link_down_cnt = 0;
return false;
}
static void sss_update_aeq_stat(struct sss_hwdev *hwdev)
{
if (hwdev->aeq_stat.last_recv_cnt != hwdev->aeq_stat.cur_recv_cnt) {
hwdev->aeq_stat.last_recv_cnt = hwdev->aeq_stat.cur_recv_cnt;
hwdev->aeq_stat.busy_cnt = 0;
} else {
hwdev->aeq_stat.busy_cnt++;
}
}
static void sss_update_channel_status(struct sss_hwdev *hwdev)
{
struct sss_card_node *node = hwdev->chip_node;
if (!node)
return;
if (sss_get_func_type(hwdev) != SSS_FUNC_TYPE_PPF ||
!SSS_COMM_SUPPORT_CHANNEL_DETECT(hwdev) ||
atomic_read(&node->channel_timeout_cnt))
return;
if (test_bit(SSS_HW_MBX_INIT_OK, &hwdev->func_state)) {
sss_update_aeq_stat(hwdev);
if (hwdev->aeq_stat.busy_cnt > SSS_CHN_BUSY_TIMEOUT) {
sdk_err(hwdev->dev_hdl, "Detect channel busy\n");
atomic_inc(&node->channel_timeout_cnt);
}
}
}
static void sss_heartbeat_timer_handler(struct timer_list *t)
{
struct sss_hwdev *hwdev = from_timer(hwdev, t, heartbeat.heartbeat_timer);
if (sss_is_chip_abnormal(hwdev)) {
queue_work(hwdev->workq, &hwdev->heartbeat.lost_work);
} else {
mod_timer(&hwdev->heartbeat.heartbeat_timer,
jiffies + msecs_to_jiffies(SSS_HEARTBEAT_PERIOD));
}
sss_update_channel_status(hwdev);
}
static void sss_heartbeat_lost_handler(struct work_struct *work)
{
u16 fault_level;
u16 pcie_src;
struct sss_event_info event_info = {0};
struct sss_hwdev *hwdev = container_of(work, struct sss_hwdev,
heartbeat.lost_work);
atomic_inc(&hwdev->hw_stats.heart_lost_stats);
if (hwdev->event_handler) {
event_info.type = SSS_GET_PCIE_LINK_STATUS(hwdev);
event_info.service = SSS_EVENT_SRV_COMM;
hwdev->event_handler(hwdev->event_handler_data, &event_info);
}
if (hwdev->heartbeat.pcie_link_down) {
sdk_err(hwdev->dev_hdl, "Detect pcie is link down\n");
fault_level = SSS_FAULT_LEVEL_HOST;
pcie_src = SSS_FAULT_SRC_PCIE_LINK_DOWN;
} else {
sdk_err(hwdev->dev_hdl, "Heart lost report received, func_id: %d\n",
sss_get_global_func_id(hwdev));
fault_level = SSS_FAULT_LEVEL_FATAL;
pcie_src = SSS_FAULT_SRC_HOST_HEARTBEAT_LOST;
}
sss_dump_chip_err_info(hwdev);
}
static void sss_create_heartbeat_timer(struct sss_hwdev *hwdev)
{
timer_setup(&hwdev->heartbeat.heartbeat_timer, sss_heartbeat_timer_handler, 0);
hwdev->heartbeat.heartbeat_timer.expires =
jiffies + msecs_to_jiffies(SSS_HEARTBEAT_TIMER_EXPIRES);
add_timer(&hwdev->heartbeat.heartbeat_timer);
INIT_WORK(&hwdev->heartbeat.lost_work, sss_heartbeat_lost_handler);
}
static void sss_destroy_heartbeat_timer(struct sss_hwdev *hwdev)
{
destroy_work(&hwdev->heartbeat.lost_work);
del_timer_sync(&hwdev->heartbeat.heartbeat_timer);
}
static void sss_set_bm_host_mode(struct sss_hwdev *hwdev)
{
struct sss_service_cap *svc_cap = &hwdev->mgmt_info->svc_cap;
u8 host_id = SSS_GET_HWIF_PCI_INTF_ID(hwdev->hwif);
if (host_id == svc_cap->master_host_id)
SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_MULTI_BM_MASTER);
else
SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_MULTI_BM_SLAVE);
}
static void sss_set_vm_host_mode(struct sss_hwdev *hwdev)
{
struct sss_service_cap *svc_cap = &hwdev->mgmt_info->svc_cap;
u8 host_id = SSS_GET_HWIF_PCI_INTF_ID(hwdev->hwif);
if (host_id == svc_cap->master_host_id)
SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_MULTI_VM_MASTER);
else
SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_MULTI_VM_SLAVE);
}
static void sss_set_normal_host_mode(struct sss_hwdev *hwdev)
{
SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_NORMAL_HOST);
}
static int sss_enable_multi_host(struct sss_hwdev *hwdev)
{
if (!SSS_IS_PPF(hwdev) || !SSS_IS_MULTI_HOST(hwdev))
return 0;
if (SSS_IS_SLAVE_HOST(hwdev))
sss_chip_set_slave_host_status(hwdev, sss_get_pcie_itf_id(hwdev), true);
return 0;
}
static int sss_disable_multi_host(struct sss_hwdev *hwdev)
{
if (!SSS_IS_PPF(hwdev) || !SSS_IS_MULTI_HOST(hwdev))
return 0;
if (SSS_IS_SLAVE_HOST(hwdev))
sss_chip_set_slave_host_status(hwdev, sss_get_pcie_itf_id(hwdev), false);
return 0;
}
static int sss_init_host_mode(struct sss_hwdev *hwdev)
{
int ret;
struct sss_service_cap *svc_cap = &hwdev->mgmt_info->svc_cap;
sss_set_mode_handler_t handler[SSS_HOST_MODE_MAX] = {
sss_set_normal_host_mode,
sss_set_vm_host_mode,
sss_set_bm_host_mode
};
if (SSS_GET_FUNC_TYPE(hwdev) == SSS_FUNC_TYPE_VF) {
SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_NORMAL_HOST);
return 0;
}
if (svc_cap->srv_multi_host_mode >= SSS_HOST_MODE_MAX) {
SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_NORMAL_HOST);
return 0;
}
handler[svc_cap->srv_multi_host_mode](hwdev);
ret = sss_enable_multi_host(hwdev);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init function mode\n");
return ret;
}
return 0;
}
static void sss_deinit_host_mode(struct sss_hwdev *hwdev)
{
sss_disable_multi_host(hwdev);
}
static u64 sss_get_real_time(void)
{
struct timeval val = {0};
do_gettimeofday(&val);
return (u64)val.tv_sec * MSEC_PER_SEC +
(u64)val.tv_usec / USEC_PER_MSEC;
}
static void sss_auto_sync_time_work(struct work_struct *work)
{
struct delayed_work *delay = to_delayed_work(work);
struct sss_hwdev *hwdev = container_of(delay,
struct sss_hwdev, sync_time_task);
int ret;
ret = sss_chip_sync_time(hwdev, sss_get_real_time());
if (ret != 0)
sdk_err(hwdev->dev_hdl,
"Fail to sync UTC time to firmware, errno:%d.\n", ret);
queue_delayed_work(hwdev->workq, &hwdev->sync_time_task,
msecs_to_jiffies(SSS_SYNFW_TIME_PERIOD));
}
static void sss_auto_channel_detect_work(struct work_struct *work)
{
struct delayed_work *delay = to_delayed_work(work);
struct sss_hwdev *hwdev = container_of(delay,
struct sss_hwdev, channel_detect_task);
struct sss_card_node *chip_node = NULL;
sss_chip_comm_channel_detect(hwdev);
chip_node = hwdev->chip_node;
if (!atomic_read(&chip_node->channel_timeout_cnt))
queue_delayed_work(hwdev->workq, &hwdev->channel_detect_task,
msecs_to_jiffies(SSS_CHANNEL_DETECT_PERIOD));
}
static void sss_hwdev_init_work(struct sss_hwdev *hwdev)
{
if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_PPF)
return;
INIT_DELAYED_WORK(&hwdev->sync_time_task, sss_auto_sync_time_work);
queue_delayed_work(hwdev->workq, &hwdev->sync_time_task,
msecs_to_jiffies(SSS_SYNFW_TIME_PERIOD));
if (SSS_COMM_SUPPORT_CHANNEL_DETECT(hwdev)) {
INIT_DELAYED_WORK(&hwdev->channel_detect_task,
sss_auto_channel_detect_work);
queue_delayed_work(hwdev->workq, &hwdev->channel_detect_task,
msecs_to_jiffies(SSS_CHANNEL_DETECT_PERIOD));
}
}
static void sss_hwdev_deinit_work(struct sss_hwdev *hwdev)
{
if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_PPF)
return;
if (SSS_COMM_SUPPORT_CHANNEL_DETECT(hwdev)) {
hwdev->features[0] &= ~(SSS_COMM_F_CHANNEL_DETECT);
cancel_delayed_work_sync(&hwdev->channel_detect_task);
}
cancel_delayed_work_sync(&hwdev->sync_time_task);
}
int sss_init_hwdev(struct sss_pci_adapter *adapter)
{
struct sss_hwdev *hwdev;
int ret;
hwdev = sss_alloc_hwdev();
if (!hwdev)
return -ENOMEM;
sss_init_hwdev_param(hwdev, adapter);
adapter->hwdev = hwdev;
ret = sss_hwif_init(adapter);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init hwif\n");
goto init_hwif_err;
}
sss_set_chip_present_flag(hwdev, true);
hwdev->workq = alloc_workqueue(SSS_HWDEV_WQ_NAME, WQ_MEM_RECLAIM, SSS_WQ_MAX_REQ);
if (!hwdev->workq) {
sdk_err(hwdev->dev_hdl, "Fail to alloc hardware workq\n");
goto alloc_workq_err;
}
sss_create_heartbeat_timer(hwdev);
ret = sss_init_mgmt_info(hwdev);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init mgmt info\n");
goto init_mgmt_info_err;
}
ret = sss_init_mgmt_channel(hwdev);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init mgmt channel\n");
goto init_mgmt_channel_err;
}
#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS
ret = sss_init_devlink(hwdev);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init devlink\n");
goto init_devlink_err;
}
#endif
ret = sss_init_capability(hwdev);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init capability\n");
goto init_cap_err;
}
ret = sss_init_host_mode(hwdev);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init capability\n");
goto init_multi_host_fail;
}
sss_hwdev_init_work(hwdev);
ret = sss_chip_do_nego_feature(hwdev, SSS_MGMT_MSG_SET_CMD,
hwdev->features, SSS_MAX_FEATURE_QWORD);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to set comm features\n");
goto set_feature_err;
}
return 0;
set_feature_err:
sss_hwdev_deinit_work(hwdev);
sss_deinit_host_mode(hwdev);
init_multi_host_fail:
sss_deinit_capability(hwdev);
init_cap_err:
#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS
sss_deinit_devlink(hwdev);
init_devlink_err:
#endif
sss_deinit_mgmt_channel(hwdev);
init_mgmt_channel_err:
sss_deinit_mgmt_info(hwdev);
init_mgmt_info_err:
sss_destroy_heartbeat_timer(hwdev);
destroy_workqueue(hwdev->workq);
alloc_workq_err:
sss_hwif_deinit(hwdev);
init_hwif_err:
sss_free_hwdev(hwdev);
adapter->hwdev = NULL;
return -EFAULT;
}
void sss_deinit_hwdev(void *hwdev)
{
struct sss_hwdev *dev = hwdev;
u64 drv_features[SSS_MAX_FEATURE_QWORD] = {0};
sss_chip_do_nego_feature(hwdev, SSS_MGMT_MSG_SET_CMD,
drv_features, SSS_MAX_FEATURE_QWORD);
sss_hwdev_deinit_work(dev);
if (SSS_IS_MULTI_HOST(dev))
sss_disable_multi_host(dev);
sss_hwdev_flush_io(dev, SSS_CHANNEL_COMM);
sss_deinit_capability(dev);
#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS
sss_deinit_devlink(dev);
#endif
sss_deinit_mgmt_channel(dev);
sss_deinit_mgmt_info(dev);
sss_destroy_heartbeat_timer(hwdev);
destroy_workqueue(dev->workq);
sss_hwif_deinit(dev);
sss_free_hwdev(dev);
}
void sss_hwdev_stop(void *hwdev)
{
struct sss_hwdev *dev = hwdev;
if (!hwdev)
return;
sss_set_chip_present_flag(hwdev, false);
sdk_info(dev->dev_hdl, "Set card absent\n");
sss_force_complete_all(dev);
sdk_info(dev->dev_hdl, "All messages interacting with the chip will stop\n");
}
void sss_hwdev_detach(void *hwdev)
{
if (!sss_chip_get_present_state((struct sss_hwdev *)hwdev)) {
sss_set_chip_present_flag(hwdev, false);
sss_force_complete_all(hwdev);
}
}
void sss_hwdev_shutdown(void *hwdev)
{
struct sss_hwdev *dev = hwdev;
if (!hwdev)
return;
if (SSS_IS_SLAVE_HOST(dev))
sss_chip_set_slave_host_status(hwdev, sss_get_pcie_itf_id(hwdev), false);
}

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HWDEV_INIT_H
#define SSS_HWDEV_INIT_H
#include "sss_adapter.h"
int sss_init_hwdev(struct sss_pci_adapter *adapter);
void sss_deinit_hwdev(void *hwdev);
void sss_hwdev_detach(void *hwdev);
void sss_hwdev_stop(void *hwdev);
void sss_hwdev_shutdown(void *hwdev);
#endif

View File

@ -0,0 +1,141 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include "sss_kernel.h"
#include "sss_hw.h"
#include "sss_hwdev.h"
#include "sss_hwif_ctrlq_init.h"
#include "sss_hwif_api.h"
#include "sss_hwif_mbx.h"
#include "sss_common.h"
#define SSS_FLR_TIMEOUT 1000
#define SSS_FLR_TIMEOUT_ONCE 10000
static enum sss_process_ret sss_check_flr_finish_handler(void *priv_data)
{
struct sss_hwif *hwif = priv_data;
enum sss_pf_status status;
status = sss_chip_get_pf_status(hwif);
if (status == SSS_PF_STATUS_FLR_FINISH_FLAG) {
sss_chip_set_pf_status(hwif, SSS_PF_STATUS_ACTIVE_FLAG);
return SSS_PROCESS_OK;
}
return SSS_PROCESS_DOING;
}
static int sss_wait_for_flr_finish(struct sss_hwif *hwif)
{
return sss_check_handler_timeout(hwif, sss_check_flr_finish_handler,
SSS_FLR_TIMEOUT, SSS_FLR_TIMEOUT_ONCE);
}
static int sss_msg_to_mgmt_no_ack(void *hwdev, u8 mod, u16 cmd,
void *buf_in, u16 in_size, u16 channel)
{
if (!hwdev)
return -EINVAL;
if (sss_get_dev_present_flag(hwdev) == 0)
return -EPERM;
return sss_send_mbx_to_mgmt_no_ack(hwdev, mod, cmd, buf_in,
in_size, channel);
}
static int sss_chip_flush_doorbell(struct sss_hwdev *hwdev, u16 channel)
{
struct sss_hwif *hwif = hwdev->hwif;
struct sss_cmd_clear_doorbell clear_db = {0};
u16 out_len = sizeof(clear_db);
int ret;
clear_db.func_id = SSS_GET_HWIF_GLOBAL_ID(hwif);
ret = sss_sync_send_msg_ch(hwdev, SSS_COMM_MGMT_CMD_FLUSH_DOORBELL,
&clear_db, sizeof(clear_db),
&clear_db, &out_len, channel);
if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &clear_db)) {
sdk_warn(hwdev->dev_hdl,
"Fail to flush doorbell, ret: %d, status: 0x%x, out_size: 0x%x, channel: 0x%x\n",
ret, clear_db.head.state, out_len, channel);
if (ret == 0)
return -EFAULT;
}
return ret;
}
static int sss_chip_flush_resource(struct sss_hwdev *hwdev, u16 channel)
{
struct sss_hwif *hwif = hwdev->hwif;
struct sss_cmd_clear_resource clr_res = {0};
int ret;
clr_res.func_id = SSS_GET_HWIF_GLOBAL_ID(hwif);
ret = sss_msg_to_mgmt_no_ack(hwdev, SSS_MOD_TYPE_COMM,
SSS_COMM_MGMT_CMD_START_FLUSH, &clr_res,
sizeof(clr_res), channel);
if (ret != 0) {
sdk_warn(hwdev->dev_hdl, "Fail to notice flush message, ret: %d, channel: 0x%x\n",
ret, channel);
}
return ret;
}
int sss_hwdev_flush_io(struct sss_hwdev *hwdev, u16 channel)
{
struct sss_hwif *hwif = hwdev->hwif;
int err;
int ret = 0;
if (hwdev->chip_present_flag == 0)
return 0;
if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_VF)
msleep(100);
err = sss_wait_ctrlq_stop(hwdev);
if (err != 0) {
sdk_warn(hwdev->dev_hdl, "Fail to wait ctrlq stop\n");
ret = err;
}
sss_chip_disable_doorbell(hwif);
err = sss_chip_flush_doorbell(hwdev, channel);
if (err != 0)
ret = err;
if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_VF)
sss_chip_set_pf_status(hwif, SSS_PF_STATUS_FLR_START_FLAG);
else
msleep(100);
err = sss_chip_flush_resource(hwdev, channel);
if (err != 0)
ret = err;
if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_VF) {
err = sss_wait_for_flr_finish(hwif);
if (err != 0) {
sdk_warn(hwdev->dev_hdl, "Wait firmware FLR timeout\n");
ret = err;
}
}
sss_chip_enable_doorbell(hwif);
err = sss_reinit_ctrlq_ctx(hwdev);
if (err != 0) {
sdk_warn(hwdev->dev_hdl, "Fail to reinit ctrlq ctx\n");
ret = err;
}
return ret;
}

View File

@ -0,0 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HWDEV_IO_FLUSH_H
#define SSS_HWDEV_IO_FLUSH_H
#include "sss_hwdev.h"
int sss_hwdev_flush_io(struct sss_hwdev *hwdev, u16 channel);
#endif

View File

@ -0,0 +1,725 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <linux/netlink.h>
#include <linux/pci.h>
#include <linux/firmware.h>
#include "sss_hwdev_link.h"
#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS
#include "sss_hw_common.h"
#include "sss_hwdev_api.h"
#include "sss_hwif_adm.h"
#include "sss_hwif_adm_common.h"
#define SSS_FW_MAGIC_NUM 0x5a5a1100
#define SSS_FW_IMAGE_HEAD_SIZE 4096
#define SSS_FW_FRAGMENT_MAX_LEN 1536
#define SSS_FW_CFG_DEFAULT_INDEX 0xFF
#define SSS_FW_UPDATE_MGMT_TIMEOUT 3000000U
#define SSS_FW_TYPE_MAX_NUM 0x40
#define SSS_FW_CFG_MAX_INDEX 8
#define SSS_FW_CFG_MIN_INDEX 1
enum sss_devlink_param_id {
SSS_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
SSS_DEVLINK_PARAM_ID_ACTIVATE_FW,
SSS_DEVLINK_PARAM_ID_SWITCH_CFG,
};
enum sss_firmware_type {
SSS_UP_FW_UPDATE_MIN_TYPE1 = 0x0,
SSS_UP_FW_UPDATE_UP_TEXT = 0x0,
SSS_UP_FW_UPDATE_UP_DATA = 0x1,
SSS_UP_FW_UPDATE_UP_DICT = 0x2,
SSS_UP_FW_UPDATE_TILE_PCPTR = 0x3,
SSS_UP_FW_UPDATE_TILE_TEXT = 0x4,
SSS_UP_FW_UPDATE_TILE_DATA = 0x5,
SSS_UP_FW_UPDATE_TILE_DICT = 0x6,
SSS_UP_FW_UPDATE_PPE_STATE = 0x7,
SSS_UP_FW_UPDATE_PPE_BRANCH = 0x8,
SSS_UP_FW_UPDATE_PPE_EXTACT = 0x9,
SSS_UP_FW_UPDATE_MAX_TYPE1 = 0x9,
SSS_UP_FW_UPDATE_CFG0 = 0xa,
SSS_UP_FW_UPDATE_CFG1 = 0xb,
SSS_UP_FW_UPDATE_CFG2 = 0xc,
SSS_UP_FW_UPDATE_CFG3 = 0xd,
SSS_UP_FW_UPDATE_MAX_TYPE1_CFG = 0xd,
SSS_UP_FW_UPDATE_MIN_TYPE2 = 0x14,
SSS_UP_FW_UPDATE_MAX_TYPE2 = 0x14,
SSS_UP_FW_UPDATE_MIN_TYPE3 = 0x18,
SSS_UP_FW_UPDATE_PHY = 0x18,
SSS_UP_FW_UPDATE_BIOS = 0x19,
SSS_UP_FW_UPDATE_HLINK_ONE = 0x1a,
SSS_UP_FW_UPDATE_HLINK_TWO = 0x1b,
SSS_UP_FW_UPDATE_HLINK_THR = 0x1c,
SSS_UP_FW_UPDATE_MAX_TYPE3 = 0x1c,
SSS_UP_FW_UPDATE_MIN_TYPE4 = 0x20,
SSS_UP_FW_UPDATE_L0FW = 0x20,
SSS_UP_FW_UPDATE_L1FW = 0x21,
SSS_UP_FW_UPDATE_BOOT = 0x22,
SSS_UP_FW_UPDATE_SEC_DICT = 0x23,
SSS_UP_FW_UPDATE_HOT_PATCH0 = 0x24,
SSS_UP_FW_UPDATE_HOT_PATCH1 = 0x25,
SSS_UP_FW_UPDATE_HOT_PATCH2 = 0x26,
SSS_UP_FW_UPDATE_HOT_PATCH3 = 0x27,
SSS_UP_FW_UPDATE_HOT_PATCH4 = 0x28,
SSS_UP_FW_UPDATE_HOT_PATCH5 = 0x29,
SSS_UP_FW_UPDATE_HOT_PATCH6 = 0x2a,
SSS_UP_FW_UPDATE_HOT_PATCH7 = 0x2b,
SSS_UP_FW_UPDATE_HOT_PATCH8 = 0x2c,
SSS_UP_FW_UPDATE_HOT_PATCH9 = 0x2d,
SSS_UP_FW_UPDATE_HOT_PATCH10 = 0x2e,
SSS_UP_FW_UPDATE_HOT_PATCH11 = 0x2f,
SSS_UP_FW_UPDATE_HOT_PATCH12 = 0x30,
SSS_UP_FW_UPDATE_HOT_PATCH13 = 0x31,
SSS_UP_FW_UPDATE_HOT_PATCH14 = 0x32,
SSS_UP_FW_UPDATE_HOT_PATCH15 = 0x33,
SSS_UP_FW_UPDATE_HOT_PATCH16 = 0x34,
SSS_UP_FW_UPDATE_HOT_PATCH17 = 0x35,
SSS_UP_FW_UPDATE_HOT_PATCH18 = 0x36,
SSS_UP_FW_UPDATE_HOT_PATCH19 = 0x37,
SSS_UP_FW_UPDATE_MAX_TYPE4 = 0x37,
SSS_UP_FW_UPDATE_MIN_TYPE5 = 0x3a,
SSS_UP_FW_UPDATE_OPTION_ROM = 0x3a,
SSS_UP_FW_UPDATE_MAX_TYPE5 = 0x3a,
SSS_UP_FW_UPDATE_MIN_TYPE6 = 0x3e,
SSS_UP_FW_UPDATE_MAX_TYPE6 = 0x3e,
SSS_UP_FW_UPDATE_MIN_TYPE7 = 0x40,
SSS_UP_FW_UPDATE_MAX_TYPE7 = 0x40,
};
#define SSS_IMAGE_MPU_ALL_IN (BIT_ULL(SSS_UP_FW_UPDATE_UP_TEXT) | \
BIT_ULL(SSS_UP_FW_UPDATE_UP_DATA) | \
BIT_ULL(SSS_UP_FW_UPDATE_UP_DICT))
#define SSS_IMAGE_NPU_ALL_IN (BIT_ULL(SSS_UP_FW_UPDATE_TILE_PCPTR) | \
BIT_ULL(SSS_UP_FW_UPDATE_TILE_TEXT) | \
BIT_ULL(SSS_UP_FW_UPDATE_TILE_DATA) | \
BIT_ULL(SSS_UP_FW_UPDATE_TILE_DICT) | \
BIT_ULL(SSS_UP_FW_UPDATE_PPE_STATE) | \
BIT_ULL(SSS_UP_FW_UPDATE_PPE_BRANCH) | \
BIT_ULL(SSS_UP_FW_UPDATE_PPE_EXTACT))
#define SSS_IMAGE_COLD_ALL_IN (SSS_IMAGE_MPU_ALL_IN | SSS_IMAGE_NPU_ALL_IN)
#define SSS_IMAGE_CFG_ALL_IN (BIT_ULL(SSS_UP_FW_UPDATE_CFG0) | \
BIT_ULL(SSS_UP_FW_UPDATE_CFG1) | \
BIT_ULL(SSS_UP_FW_UPDATE_CFG2) | \
BIT_ULL(SSS_UP_FW_UPDATE_CFG3))
#define SSS_CHECK_IMAGE_INTEGRATY(mask) \
(((mask) & SSS_IMAGE_COLD_ALL_IN) == SSS_IMAGE_COLD_ALL_IN && \
((mask) & SSS_IMAGE_CFG_ALL_IN) != 0)
#define SSS_LINK_HWDEV(link) \
((struct sss_hwdev *)((struct sss_devlink *)devlink_priv(link))->hwdev)
struct sss_firmware_section {
u32 section_len;
u32 section_offset;
u32 section_version;
u32 section_type;
u32 section_crc;
u32 section_flag;
};
struct sss_firmware_image {
u32 fw_version;
u32 fw_len;
u32 fw_magic;
struct {
u32 section_cnt : 16;
u32 rsvd : 16;
} fw_info;
struct sss_firmware_section section_info[SSS_FW_TYPE_MAX_NUM];
u32 device_id;
u32 rsvd0[101];
u32 rsvd1[534];
u32 bin_data;
};
struct sss_host_image {
struct sss_firmware_section section_info[SSS_FW_TYPE_MAX_NUM];
struct {
u32 total_len;
u32 fw_version;
} image_info;
u32 section_cnt;
u32 device_id;
};
struct sss_cmd_update_firmware {
struct sss_mgmt_msg_head head;
struct {
u32 sl : 1;
u32 sf : 1;
u32 flag : 1;
u32 bit_signed : 1;
u32 reserved : 12;
u32 fragment_len : 16;
} ctl_info;
struct {
u32 section_crc;
u32 section_type;
} section_info;
u32 total_len;
u32 section_len;
u32 section_version;
u32 section_offset;
u32 data[384];
};
struct sss_cmd_activate_firmware {
struct sss_mgmt_msg_head head;
u8 index; /* 0 ~ 7 */
u8 data[7];
};
struct sss_cmd_switch_config {
struct sss_mgmt_msg_head head;
u8 index; /* 0 ~ 7 */
u8 data[7];
};
static bool sss_check_image_valid(struct sss_hwdev *hwdev,
struct sss_firmware_image *image, u32 image_size)
{
u32 i;
u32 length = 0;
u32 cnt;
if (image->fw_magic != SSS_FW_MAGIC_NUM) {
sdk_err(hwdev->dev_hdl, "Err fw magic: 0x%x read from file\n", image->fw_magic);
return false;
}
cnt = image->fw_info.section_cnt;
if (cnt > SSS_FW_TYPE_MAX_NUM) {
sdk_err(hwdev->dev_hdl, "Err fw type num: 0x%x read from file\n", cnt);
return false;
}
for (i = 0; i < cnt; i++)
length += image->section_info[i].section_len;
if (length != image->fw_len ||
(u32)(image->fw_len + SSS_FW_IMAGE_HEAD_SIZE) != image_size) {
sdk_err(hwdev->dev_hdl, "Err data size: 0x%x read from file\n", length);
return false;
}
return true;
}
static void sss_init_host_image(struct sss_host_image *host_image,
struct sss_firmware_image *image)
{
int i;
for (i = 0; i < image->fw_info.section_cnt; i++) {
memcpy(&host_image->section_info[i], &image->section_info[i],
sizeof(image->section_info[i]));
}
host_image->image_info.fw_version = image->fw_version;
host_image->section_cnt = image->fw_info.section_cnt;
host_image->device_id = image->device_id;
host_image->image_info.total_len = image->fw_len;
}
static bool sss_check_image_integrity(struct sss_hwdev *hwdev,
struct sss_host_image *host_image)
{
u32 i;
u32 section_type;
u64 mask = 0;
for (i = 0; i < host_image->section_cnt; i++) {
section_type = host_image->section_info[i].section_type;
if (mask & (1ULL << section_type)) {
sdk_err(hwdev->dev_hdl, "Duplicate section type: %u\n", section_type);
return false;
}
mask |= (1ULL << section_type);
}
if (SSS_CHECK_IMAGE_INTEGRATY(mask))
return true;
sdk_err(hwdev->dev_hdl,
"Fail to check file integrity, valid: 0x%llx, current: 0x%llx\n",
(SSS_IMAGE_COLD_ALL_IN | SSS_IMAGE_CFG_ALL_IN), mask);
return false;
}
static bool sss_check_image_device_id(struct sss_hwdev *hwdev, u32 dev_id)
{
struct sss_cmd_board_info info = {0};
if (sss_chip_get_board_info(hwdev, &info.info) != 0) {
sdk_err(hwdev->dev_hdl, "Fail to get board info\n");
return false;
}
if (dev_id == info.info.board_type)
return true;
sdk_err(hwdev->dev_hdl,
"The image device type: 0x%x don't match the fw dev id: 0x%x\n",
dev_id, info.info.board_type);
return false;
}
static void sss_init_update_cmd_param(struct sss_cmd_update_firmware *cmd_update,
struct sss_firmware_section *info, int remain,
u32 send_offset)
{
cmd_update->ctl_info.sl = (remain <= SSS_FW_FRAGMENT_MAX_LEN) ? true : false;
cmd_update->ctl_info.sf = (remain == info->section_len) ? true : false;
cmd_update->ctl_info.bit_signed = info->section_flag & 0x1;
cmd_update->ctl_info.fragment_len = min(remain, SSS_FW_FRAGMENT_MAX_LEN);
cmd_update->section_info.section_crc = info->section_crc;
cmd_update->section_info.section_type = info->section_type;
cmd_update->section_version = info->section_version;
cmd_update->section_len = info->section_len;
cmd_update->section_offset = send_offset;
}
static int sss_chip_update_firmware(struct sss_hwdev *hwdev,
struct sss_cmd_update_firmware *cmd_update)
{
int ret;
u16 out_len = sizeof(*cmd_update);
ret = sss_sync_send_adm_msg(hwdev, SSS_MOD_TYPE_COMM,
SSS_COMM_MGMT_CMD_UPDATE_FW, cmd_update, sizeof(*cmd_update),
cmd_update, &out_len, SSS_FW_UPDATE_MGMT_TIMEOUT);
if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, cmd_update)) {
sdk_err(hwdev->dev_hdl,
"Fail to update fw, ret: %d, status: 0x%x, out_len: 0x%x\n",
ret, cmd_update->head.state, out_len);
return (cmd_update->head.state != 0) ?
cmd_update->head.state : -EIO;
}
return 0;
}
static int sss_update_firmware(struct sss_hwdev *hwdev, const u8 *data,
struct sss_host_image *host_image)
{
int ret;
int remain;
u32 i;
u32 send_offset;
u32 offset;
bool flag = false;
struct sss_cmd_update_firmware *cmd_update = NULL;
cmd_update = kzalloc(sizeof(*cmd_update), GFP_KERNEL);
if (!cmd_update)
return -ENOMEM;
for (i = 0; i < host_image->section_cnt; i++) {
offset = host_image->section_info[i].section_offset;
remain = (int)(host_image->section_info[i].section_len);
send_offset = 0;
while (remain > 0) {
if (flag) {
cmd_update->total_len = 0;
} else {
cmd_update->total_len = host_image->image_info.total_len;
flag = true;
}
sss_init_update_cmd_param(cmd_update, &host_image->section_info[i],
remain, send_offset);
memcpy(cmd_update->data,
((data + SSS_FW_IMAGE_HEAD_SIZE) + offset) + send_offset,
cmd_update->ctl_info.fragment_len);
ret = sss_chip_update_firmware(hwdev, cmd_update);
if (ret != 0) {
kfree(cmd_update);
return ret;
}
send_offset += cmd_update->ctl_info.fragment_len;
remain = (int)(host_image->section_info[i].section_len - send_offset);
}
}
kfree(cmd_update);
return 0;
}
static int sss_flash_update_notify(struct devlink *devlink,
const struct firmware *fw, struct sss_host_image *image,
struct netlink_ext_ack *extack)
{
struct sss_devlink *devlink_dev = devlink_priv(devlink);
struct sss_hwdev *hwdev = devlink_dev->hwdev;
int ret;
#if defined(HAVE_DEVLINK_FW_FILE_NAME_MEMBER) || defined(DEVLINK_FLASH_UPDATE_NEED_TO_NOTIFY)
devlink_flash_update_begin_notify(devlink);
#endif
devlink_flash_update_status_notify(devlink, "Flash firmware begin", NULL, 0, 0);
sdk_info(hwdev->dev_hdl, "Flash firmware begin\n");
ret = sss_update_firmware(hwdev, fw->data, image);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to flash firmware, ret: %d\n", ret);
NL_SET_ERR_MSG_MOD(extack, "Fail to flash firmware");
devlink_flash_update_status_notify(devlink, "Fail to flash firmware", NULL, 0, 0);
} else {
sdk_info(hwdev->dev_hdl, "Flash firmware end\n");
devlink_flash_update_status_notify(devlink, "Flash firmware end", NULL, 0, 0);
}
#if defined(HAVE_DEVLINK_FW_FILE_NAME_MEMBER) || defined(DEVLINK_FLASH_UPDATE_NEED_TO_NOTIFY)
devlink_flash_update_end_notify(devlink);
#endif
return ret;
}
#ifdef HAVE_DEVLINK_FW_FILE_NAME_PARAM
static int sss_devlink_flash_update(struct devlink *link, const char *file_name,
const char *component, struct netlink_ext_ack *extack)
#else
static int sss_devlink_flash_update(struct devlink *link,
struct devlink_flash_update_params *param,
struct netlink_ext_ack *extack)
#endif
{
int ret;
struct sss_host_image *host_image = NULL;
struct sss_devlink *link_dev = devlink_priv(link);
struct sss_hwdev *hwdev = link_dev->hwdev;
#ifdef HAVE_DEVLINK_FW_FILE_NAME_MEMBER
const struct firmware *fw = NULL;
#else
const struct firmware *fw = param->fw;
#endif
host_image = kzalloc(sizeof(*host_image), GFP_KERNEL);
if (!host_image) {
ret = -ENOMEM;
goto alloc_host_image_err;
}
#ifdef HAVE_DEVLINK_FW_FILE_NAME_PARAM
ret = request_firmware_direct(&fw, file_name, hwdev->dev_hdl);
#else
#ifdef HAVE_DEVLINK_FW_FILE_NAME_MEMBER
ret = request_firmware_direct(&fw, param->file_name, hwdev->dev_hdl);
#else
ret = 0;
#endif
#endif
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to request firmware\n");
goto request_fw_err;
}
if (!sss_check_image_valid(hwdev, (struct sss_firmware_image *)fw->data,
(u32)(fw->size))) {
sdk_err(hwdev->dev_hdl, "Fail to check image valid\n");
NL_SET_ERR_MSG_MOD(extack, "Fail to check image valid");
ret = -EINVAL;
goto check_image_err;
}
sss_init_host_image(host_image, (struct sss_firmware_image *)fw->data);
if (!sss_check_image_integrity(hwdev, host_image)) {
sdk_err(hwdev->dev_hdl, "Fail to check image integrity\n");
NL_SET_ERR_MSG_MOD(extack, "Fail to check image integrity");
ret = -EINVAL;
goto check_image_err;
}
if (!sss_check_image_device_id(hwdev, host_image->device_id)) {
sdk_err(hwdev->dev_hdl, "Fail to check image device id\n");
NL_SET_ERR_MSG_MOD(extack, "Fail to check image device id");
ret = -EINVAL;
goto check_image_err;
}
ret = sss_flash_update_notify(link, fw, host_image, extack);
check_image_err:
#ifdef HAVE_DEVLINK_FW_FILE_NAME_PARAM
release_firmware(fw);
#endif
request_fw_err:
kfree(host_image);
alloc_host_image_err:
link_dev->switch_cfg_id = SSS_FW_CFG_DEFAULT_INDEX;
link_dev->active_cfg_id = SSS_FW_CFG_DEFAULT_INDEX;
return ret;
}
static const struct devlink_ops g_devlink_ops = {
#ifdef DEVLINK_HAVE_SUPPORTED_FLASH_UPDATE_PARAMS
.supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT,
#endif
.flash_update = sss_devlink_flash_update,
};
static int sss_chip_activate_firmware(struct sss_hwdev *hwdev, u8 cfg_num)
{
int ret;
struct sss_cmd_activate_firmware cmd_activate = {0};
u16 out_len = sizeof(cmd_activate);
if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_PF &&
SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_PPF)
return -EOPNOTSUPP;
if (!SSS_SUPPORT_ADM_MSG(hwdev))
return -EPERM;
cmd_activate.index = cfg_num;
ret = sss_sync_send_adm_msg(hwdev, SSS_MOD_TYPE_COMM, SSS_COMM_MGMT_CMD_ACTIVE_FW,
&cmd_activate, sizeof(cmd_activate), &cmd_activate,
&out_len, SSS_FW_UPDATE_MGMT_TIMEOUT);
if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_activate)) {
sdk_err(hwdev->dev_hdl,
"Fail to activate firmware, ret: %d, status: 0x%x, out_len: 0x%x\n",
ret, cmd_activate.head.state, out_len);
return (cmd_activate.head.state != 0) ?
cmd_activate.head.state : -EIO;
}
return 0;
}
static int sss_devlink_get_activate_fw_config(struct devlink *link, u32 id,
struct devlink_param_gset_ctx *param_ctx)
{
struct sss_devlink *link_dev = devlink_priv(link);
param_ctx->val.vu8 = link_dev->active_cfg_id;
return 0;
}
static int sss_devlink_set_activate_fw_config(struct devlink *link, u32 id,
struct devlink_param_gset_ctx *param_ctx)
{
int ret;
struct sss_devlink *link_dev = devlink_priv(link);
struct sss_hwdev *hwdev = link_dev->hwdev;
link_dev->active_cfg_id = param_ctx->val.vu8;
sdk_info(hwdev->dev_hdl, "Begin activate firmware\n");
ret = sss_chip_activate_firmware(hwdev, link_dev->active_cfg_id - 1);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to activate firmware, ret: %d\n", ret);
return ret;
}
sdk_info(hwdev->dev_hdl, "End activate firmware\n");
return 0;
}
static int sss_chip_switch_config(struct sss_hwdev *hwdev, u8 cfg_num)
{
int ret;
struct sss_cmd_switch_config cmd_switch = {0};
u16 out_len = sizeof(cmd_switch);
if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_PF)
return -EOPNOTSUPP;
if (!SSS_SUPPORT_ADM_MSG(hwdev))
return -EPERM;
cmd_switch.index = cfg_num;
ret = sss_sync_send_adm_msg(hwdev, SSS_MOD_TYPE_COMM, SSS_COMM_MGMT_CMD_SWITCH_CFG,
&cmd_switch, sizeof(cmd_switch), &cmd_switch,
&out_len, SSS_FW_UPDATE_MGMT_TIMEOUT);
if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_switch)) {
sdk_err(hwdev->dev_hdl,
"Fail to switch cfg, ret: %d, status: 0x%x, out_len: 0x%x\n",
ret, cmd_switch.head.state, out_len);
return (cmd_switch.head.state != 0) ?
cmd_switch.head.state : -EIO;
}
return 0;
}
static int sss_devlink_get_switch_config(struct devlink *link, u32 id,
struct devlink_param_gset_ctx *param_ctx)
{
struct sss_devlink *link_dev = devlink_priv(link);
param_ctx->val.vu8 = link_dev->switch_cfg_id;
return 0;
}
static int sss_devlink_set_switch_config(struct devlink *link, u32 id,
struct devlink_param_gset_ctx *param_ctx)
{
int ret;
struct sss_devlink *link_dev = devlink_priv(link);
struct sss_hwdev *hwdev = link_dev->hwdev;
link_dev->switch_cfg_id = param_ctx->val.vu8;
sdk_info(hwdev->dev_hdl, "Begin switch cfg");
ret = sss_chip_switch_config(hwdev, link_dev->switch_cfg_id - 1);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to switch cfg, ret: %d\n", ret);
return ret;
}
sdk_info(hwdev->dev_hdl, "End Switch cfg\n");
return 0;
}
static int sss_devlink_validate_firmware_config(struct devlink *link, u32 id,
union devlink_param_value param_val,
struct netlink_ext_ack *ext_ack)
{
struct sss_hwdev *hwdev = SSS_LINK_HWDEV(link);
if (param_val.vu8 < SSS_FW_CFG_MIN_INDEX ||
param_val.vu8 > SSS_FW_CFG_MAX_INDEX) {
sdk_err(hwdev->dev_hdl, "Firmware cfg id out of range [1,8]\n");
NL_SET_ERR_MSG_MOD(ext_ack, "Firmware cfg id out of range [1,8]\n");
return -ERANGE;
}
return 0;
}
static const struct devlink_param g_devlink_param[] = {
DEVLINK_PARAM_DRIVER(SSS_DEVLINK_PARAM_ID_ACTIVATE_FW,
"activate_fw", DEVLINK_PARAM_TYPE_U8,
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
sss_devlink_get_activate_fw_config,
sss_devlink_set_activate_fw_config,
sss_devlink_validate_firmware_config),
DEVLINK_PARAM_DRIVER(SSS_DEVLINK_PARAM_ID_SWITCH_CFG,
"switch_cfg", DEVLINK_PARAM_TYPE_U8,
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
sss_devlink_get_switch_config,
sss_devlink_set_switch_config,
sss_devlink_validate_firmware_config),
};
int sss_init_devlink(struct sss_hwdev *hwdev)
{
int ret;
struct devlink *link = NULL;
struct pci_dev *pdev = hwdev->pcidev_hdl;
#ifdef HAS_DEVLINK_ALLOC_SETS_DEV
link = devlink_alloc(&g_devlink_ops, sizeof(struct sss_devlink), &pdev->dev);
#else
link = devlink_alloc(&g_devlink_ops, sizeof(struct sss_devlink));
#endif
if (!link) {
sdk_err(hwdev->dev_hdl, "Fail to alloc devlink\n");
return -ENOMEM;
}
hwdev->devlink_dev = devlink_priv(link);
hwdev->devlink_dev->hwdev = hwdev;
hwdev->devlink_dev->switch_cfg_id = SSS_FW_CFG_DEFAULT_INDEX;
hwdev->devlink_dev->active_cfg_id = SSS_FW_CFG_DEFAULT_INDEX;
#ifdef REGISTER_DEVLINK_PARAMETER_PREFERRED
ret = devlink_params_register(link, g_devlink_param,
ARRAY_SIZE(g_devlink_param));
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to register devlink param\n");
goto register_err;
}
#endif
#ifdef NO_DEVLINK_REGISTER_SETS_DEV
#ifdef DEVLINK_REGISTER_RETURN_VOID
devlink_register(link);
ret = 0;
#else
ret = devlink_register(link);
#endif
#else
ret = devlink_register(link, &pdev->dev);
#endif
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to register devlink\n");
#ifdef REGISTER_DEVLINK_PARAMETER_PREFERRED
devlink_params_unregister(link, g_devlink_param,
ARRAY_SIZE(g_devlink_param));
#endif
goto register_err;
}
#ifndef REGISTER_DEVLINK_PARAMETER_PREFERRED
ret = devlink_params_register(link, g_devlink_param,
ARRAY_SIZE(g_devlink_param));
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to register devlink param\n");
goto register_param_err;
}
#endif
devlink_params_publish(link);
return 0;
#ifndef REGISTER_DEVLINK_PARAMETER_PREFERRED
register_param_err:
devlink_unregister(link);
#endif
register_err:
devlink_free(link);
return -EFAULT;
}
void sss_deinit_devlink(struct sss_hwdev *hwdev)
{
struct devlink *link = priv_to_devlink(hwdev->devlink_dev);
devlink_params_unpublish(link);
devlink_params_unregister(link, g_devlink_param,
ARRAY_SIZE(g_devlink_param));
devlink_unregister(link);
devlink_free(link);
}
#endif

View File

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HWDEV_LINK_H
#define SSS_HWDEV_LINK_H
#include "sss_kernel.h"
#include "sss_hwdev.h"
#include "sss_hw_mbx_msg.h"
int sss_init_devlink(struct sss_hwdev *hwdev);
void sss_deinit_devlink(struct sss_hwdev *hwdev);
#endif

View File

@ -0,0 +1,770 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <linux/types.h>
#include <linux/spinlock.h>
#include "sss_kernel.h"
#include "sss_hw.h"
#include "sss_hwdev.h"
#include "sss_hwdev_api.h"
#include "sss_hwdev_mgmt_channel.h"
#include "sss_hwif_mbx.h"
#include "sss_hwif_mbx_init.h"
#include "sss_hwif_aeq.h"
#include "sss_hwif_export.h"
#include "sss_hwif_api.h"
#include "sss_hwif_adm_init.h"
#include "sss_hwif_mgmt_init.h"
#include "sss_hwif_ctrlq_init.h"
#include "sss_csr.h"
#define SSS_DRV_FEATURE_DEF \
(SSS_COMM_F_ADM | SSS_COMM_F_CLP | SSS_COMM_F_MBX_SEGMENT | \
SSS_COMM_F_CTRLQ_NUM | SSS_COMM_F_VIRTIO_VQ_SIZE)
#define SSS_COMM_SUPPORT_CLP(hwdev) \
((hwdev)->features[0] & SSS_COMM_F_CLP)
#define SSS_DMA_ATTR_INDIR_ID_SHIFT 0
#define SSS_DMA_ATTR_INDIR_ID_MASK 0x3FF
#define SSS_SET_DMA_ATTR_INDIR_ID(val, member) \
(((u32)(val) & SSS_DMA_ATTR_INDIR_##member##_MASK) << \
SSS_DMA_ATTR_INDIR_##member##_SHIFT)
#define SSS_CLEAR_DMA_ATTR_INDIR_ID(val, member) \
((val) & (~(SSS_DMA_ATTR_INDIR_##member##_MASK \
<< SSS_DMA_ATTR_INDIR_##member##_SHIFT)))
#define SSS_DMA_ATTR_ENTRY_ST_SHIFT 0
#define SSS_DMA_ATTR_ENTRY_AT_SHIFT 8
#define SSS_DMA_ATTR_ENTRY_PH_SHIFT 10
#define SSS_DMA_ATTR_ENTRY_NO_SNOOPING_SHIFT 12
#define SSS_DMA_ATTR_ENTRY_TPH_EN_SHIFT 13
#define SSS_DMA_ATTR_ENTRY_ST_MASK 0xFF
#define SSS_DMA_ATTR_ENTRY_AT_MASK 0x3
#define SSS_DMA_ATTR_ENTRY_PH_MASK 0x3
#define SSS_DMA_ATTR_ENTRY_NO_SNOOPING_MASK 0x1
#define SSS_DMA_ATTR_ENTRY_TPH_EN_MASK 0x1
#define SSS_SET_DMA_ATTR_ENTRY(val, member) \
(((u32)(val) & SSS_DMA_ATTR_ENTRY_##member##_MASK) << \
SSS_DMA_ATTR_ENTRY_##member##_SHIFT)
#define SSS_PCIE_ST_DISABLE 0
#define SSS_PCIE_AT_DISABLE 0
#define SSS_PCIE_PH_DISABLE 0
#define SSS_PCIE_MSIX_ATTR_ENTRY 0
#define SSS_PCIE_SNOOP 0
#define SSS_PCIE_NO_SNOOP 1
#define SSS_PCIE_TPH_DISABLE 0
#define SSS_PCIE_TPH_ENABLE 1
#define SSS_FAULT_LEVEL_STR_FATAL "fatal"
#define SSS_FAULT_LEVEL_STR_RESET "reset"
#define SSS_FAULT_LEVEL_STR_HOST "host"
#define SSS_FAULT_LEVEL_STR_FLR "flr"
#define SSS_FAULT_LEVEL_STR_GENERAL "general"
#define SSS_FAULT_LEVEL_STR_SUGGESTION "suggestion"
#define SSS_FAULT_LEVEL_STR_UNKNOWN "Unknown"
#define SSS_FAULT_TYPE_STR_CHIP "chip"
#define SSS_FAULT_TYPE_STR_NPU "ucode"
#define SSS_FAULT_TYPE_STR_MEM_RD "mem rd timeout"
#define SSS_FAULT_TYPE_STR_MEM_WR "mem wr timeout"
#define SSS_FAULT_TYPE_STR_REG_RD "reg rd timeout"
#define SSS_FAULT_TYPE_STR_REG_WR "reg wr timeout"
#define SSS_FAULT_TYPE_STR_PHY "phy fault"
#define SSS_FAULT_TYPE_STR_TSENSOR "tsensor fault"
#define SSS_FAULT_TYPE_STR_UNKNOWN "Unknown"
#define SSS_COMM_RESET_TYPE \
((1 << SSS_RESET_TYPE_COMM) | (1 << SSS_RESET_TYPE_COMM_CMD_CH) | \
(1 << SSS_RESET_TYPE_FLUSH_BIT) | (1 << SSS_RESET_TYPE_MQM) | \
(1 << SSS_RESET_TYPE_SMF) | (1 << SSS_RESET_TYPE_PF_BW_CFG))
#define SSS_FOUR_REG_LEN 16
#define SSS_X_CSR_INDEX 30
#define SSS_DUMP_16B_PER_LINE 16
#define SSS_DUMP_4_VAR_PER_LINE 4
typedef void (*sss_print_err_handler_t)(struct sss_hwdev *hwdev,
struct sss_fault_event *fault_event);
typedef void (*sss_mgmt_event_handler_t)(void *data, void *in_buf, u16 in_size,
void *out_buf, u16 *out_size);
struct sss_mgmt_event {
u16 event_type;
sss_mgmt_event_handler_t handler;
};
static void sss_fault_event_handler(void *data, void *in_buf, u16 in_size,
void *out_buf, u16 *out_size);
static void sss_show_watchdog_mgmt_register_info(struct sss_hwdev *hwdev,
struct sss_watchdog_info *watchdog_info)
{
u32 i;
u64 *reg = NULL;
sdk_err(hwdev->dev_hdl, "Mgmt deadloop time: 0x%x 0x%x, task id: 0x%x, sp: 0x%llx\n",
watchdog_info->cur_time_h, watchdog_info->cur_time_l,
watchdog_info->task_id, watchdog_info->sp);
sdk_err(hwdev->dev_hdl,
"Stack current used: 0x%x, peak used: 0x%x, overflow flag: 0x%x, top: 0x%llx, bottom: 0x%llx\n",
watchdog_info->cur_used, watchdog_info->peak_used,
watchdog_info->is_overflow, watchdog_info->stack_top, watchdog_info->stack_bottom);
sdk_err(hwdev->dev_hdl, "Mgmt pc: 0x%llx, elr: 0x%llx, spsr: 0x%llx, far: 0x%llx, esr: 0x%llx, xzr: 0x%llx\n",
watchdog_info->pc, watchdog_info->elr, watchdog_info->spsr, watchdog_info->far,
watchdog_info->esr, watchdog_info->xzr);
sdk_err(hwdev->dev_hdl, "Mgmt register info\n");
reg = &watchdog_info->x30;
for (i = 0; i <= SSS_X_CSR_INDEX; i++)
sdk_err(hwdev->dev_hdl, "x%02u:0x%llx\n",
SSS_X_CSR_INDEX - i, reg[i]);
}
static void sss_show_watchdog_stack_info(struct sss_hwdev *hwdev,
struct sss_watchdog_info *watchdog_info)
{
u32 i;
u32 j;
u32 tmp;
u32 stack_len;
u32 *dump_addr = NULL;
if (watchdog_info->stack_actlen <= SSS_STACK_DATA_LEN) {
stack_len = watchdog_info->stack_actlen;
} else {
sdk_err(hwdev->dev_hdl, "Oops stack length: 0x%x is wrong\n",
watchdog_info->stack_actlen);
stack_len = SSS_STACK_DATA_LEN;
}
sdk_err(hwdev->dev_hdl, "Mgmt dump stack, 16 bytes per line(start from sp)\n");
for (i = 0; i < (stack_len / SSS_DUMP_16B_PER_LINE); i++) {
dump_addr = (u32 *)(watchdog_info->stack_data + (u32)(i * SSS_DUMP_16B_PER_LINE));
sdk_err(hwdev->dev_hdl, "0x%08x 0x%08x 0x%08x 0x%08x\n",
*dump_addr, *(dump_addr + 0x1), *(dump_addr + 0x2), *(dump_addr + 0x3));
}
tmp = (stack_len % SSS_DUMP_16B_PER_LINE) / SSS_DUMP_4_VAR_PER_LINE;
for (j = 0; j < tmp; j++) {
dump_addr = (u32 *)(watchdog_info->stack_data +
(u32)(i * SSS_DUMP_16B_PER_LINE + j * SSS_DUMP_4_VAR_PER_LINE));
sdk_err(hwdev->dev_hdl, "0x%08x ", *dump_addr);
}
}
static void sss_show_watchdog_timeout_info(struct sss_hwdev *hwdev,
void *buf_in, u16 in_size, void *buf_out, u16 *out_size)
{
struct sss_watchdog_info *watchdog_info = buf_in;
if (in_size != sizeof(*watchdog_info)) {
sdk_err(hwdev->dev_hdl, "Invalid mgmt watchdog report, length: %d, should be %ld\n",
in_size, sizeof(*watchdog_info));
return;
}
sss_show_watchdog_mgmt_register_info(hwdev, watchdog_info);
sss_show_watchdog_stack_info(hwdev, watchdog_info);
*out_size = sizeof(*watchdog_info);
watchdog_info = buf_out;
watchdog_info->head.state = 0;
}
static void sss_watchdog_timeout_event_handler(void *hwdev,
void *buf_in, u16 in_size,
void *buf_out, u16 *out_size)
{
struct sss_event_info event_info = {0};
struct sss_hwdev *dev = hwdev;
sss_show_watchdog_timeout_info(dev, buf_in, in_size, buf_out, out_size);
if (dev->event_handler) {
event_info.type = SSS_EVENT_MGMT_WATCHDOG;
dev->event_handler(dev->event_handler_data, &event_info);
}
}
static void sss_show_exc_info(struct sss_hwdev *hwdev, struct sss_exc_info *exc_info)
{
u32 i;
/* key information */
sdk_err(hwdev->dev_hdl, "==================== Exception Info Begin ====================\n");
sdk_err(hwdev->dev_hdl, "Exception CpuTick : 0x%08x 0x%08x\n",
exc_info->cpu_tick.tick_cnt_h, exc_info->cpu_tick.tick_cnt_l);
sdk_err(hwdev->dev_hdl, "Exception Cause : %u\n", exc_info->exc_cause);
sdk_err(hwdev->dev_hdl, "Os Version : %s\n", exc_info->os_ver);
sdk_err(hwdev->dev_hdl, "App Version : %s\n", exc_info->app_ver);
sdk_err(hwdev->dev_hdl, "CPU Type : 0x%08x\n", exc_info->cpu_type);
sdk_err(hwdev->dev_hdl, "CPU ID : 0x%08x\n", exc_info->cpu_id);
sdk_err(hwdev->dev_hdl, "Thread Type : 0x%08x\n", exc_info->thread_type);
sdk_err(hwdev->dev_hdl, "Thread ID : 0x%08x\n", exc_info->thread_id);
sdk_err(hwdev->dev_hdl, "Byte Order : 0x%08x\n", exc_info->byte_order);
sdk_err(hwdev->dev_hdl, "Nest Count : 0x%08x\n", exc_info->nest_cnt);
sdk_err(hwdev->dev_hdl, "Fatal Error Num : 0x%08x\n", exc_info->fatal_errno);
sdk_err(hwdev->dev_hdl, "Current SP : 0x%016llx\n", exc_info->uw_sp);
sdk_err(hwdev->dev_hdl, "Stack Bottom : 0x%016llx\n", exc_info->stack_bottom);
/* register field */
sdk_err(hwdev->dev_hdl, "Register contents when exception occur.\n");
sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "TTBR0",
exc_info->reg_info.ttbr0, "TTBR1", exc_info->reg_info.ttbr1);
sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "TCR",
exc_info->reg_info.tcr, "MAIR", exc_info->reg_info.mair);
sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "SCTLR",
exc_info->reg_info.sctlr, "VBAR", exc_info->reg_info.vbar);
sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "CURRENTE1",
exc_info->reg_info.current_el, "SP", exc_info->reg_info.sp);
sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "ELR",
exc_info->reg_info.elr, "SPSR", exc_info->reg_info.spsr);
sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "FAR",
exc_info->reg_info.far_r, "ESR", exc_info->reg_info.esr);
sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx\n", "XZR", exc_info->reg_info.xzr);
for (i = 0; i < SSS_XREGS_NUM - 1; i += 0x2)
sdk_err(hwdev->dev_hdl, "XREGS[%02u]%-5s: 0x%016llx \t XREGS[%02u]%-5s: 0x%016llx",
i, " ", exc_info->reg_info.xregs[i],
(u32)(i + 0x1U), " ", exc_info->reg_info.xregs[(u32)(i + 0x1U)]);
sdk_err(hwdev->dev_hdl, "XREGS[%02u]%-5s: 0x%016llx \t ", SSS_XREGS_NUM - 1, " ",
exc_info->reg_info.xregs[SSS_XREGS_NUM - 1]);
}
static void sss_lastword_report_event_handler(void *hwdev,
void *buf_in, u16 in_size,
void *buf_out, u16 *out_size)
{
struct sss_lastword_info *lastword_info = buf_in;
struct sss_exc_info *exc_info = &lastword_info->stack_info;
u32 stack_len = lastword_info->stack_actlen;
struct sss_hwdev *dev = hwdev;
u32 *curr_reg = NULL;
u32 reg_i;
u32 cnt;
if (in_size != sizeof(*lastword_info)) {
sdk_err(dev->dev_hdl, "Invalid mgmt lastword, length: %u, should be %ld\n",
in_size, sizeof(*lastword_info));
return;
}
sss_show_exc_info(dev, exc_info);
/* call stack dump */
sdk_err(dev->dev_hdl, "Dump stack when exceptioin occurs, 16Bytes per line.\n");
cnt = stack_len / SSS_FOUR_REG_LEN;
for (reg_i = 0; reg_i < cnt; reg_i++) {
curr_reg = (u32 *)(lastword_info->stack_data +
((u64)(u32)(reg_i * SSS_FOUR_REG_LEN)));
sdk_err(dev->dev_hdl, "0x%08x 0x%08x 0x%08x 0x%08x\n",
*curr_reg, *(curr_reg + 0x1), *(curr_reg + 0x2), *(curr_reg + 0x3));
}
sdk_err(dev->dev_hdl, "==================== Exception Info End ====================\n");
}
const struct sss_mgmt_event g_mgmt_event_handler[] = {
{
.event_type = SSS_COMM_MGMT_CMD_FAULT_REPORT,
.handler = sss_fault_event_handler,
},
{
.event_type = SSS_COMM_MGMT_CMD_WATCHDOG_INFO,
.handler = sss_watchdog_timeout_event_handler,
},
{
.event_type = SSS_COMM_MGMT_CMD_LASTWORD_GET,
.handler = sss_lastword_report_event_handler,
},
};
static void sss_print_chip_fault(struct sss_hwdev *hwdev,
struct sss_fault_event *fault_event)
{
u8 err_level;
char *level_str = NULL;
char *fault_level[SSS_FAULT_LEVEL_MAX] = {
SSS_FAULT_LEVEL_STR_FATAL, SSS_FAULT_LEVEL_STR_RESET,
SSS_FAULT_LEVEL_STR_HOST, SSS_FAULT_LEVEL_STR_FLR,
SSS_FAULT_LEVEL_STR_GENERAL, SSS_FAULT_LEVEL_STR_SUGGESTION
};
err_level = fault_event->info.chip.err_level;
if (err_level < SSS_FAULT_LEVEL_MAX)
level_str = fault_level[err_level];
else
level_str = SSS_FAULT_LEVEL_STR_UNKNOWN;
if (err_level == SSS_FAULT_LEVEL_SERIOUS_FLR)
sdk_err(hwdev->dev_hdl, "Err_level: %u [%s], func_id: %u\n",
err_level, level_str, fault_event->info.chip.func_id);
sdk_err(hwdev->dev_hdl, "Node_id: 0x%x, err_type: 0x%x, err_level: %u[%s], err_csr_addr: 0x%08x, err_csr_value: 0x%08x\n",
fault_event->info.chip.node_id, fault_event->info.chip.err_type,
err_level, level_str,
fault_event->info.chip.err_csr_addr, fault_event->info.chip.err_csr_value);
}
static void sss_print_ucode_err(struct sss_hwdev *hwdev,
struct sss_fault_event *fault_event)
{
sdk_err(hwdev->dev_hdl, "Cause_id: %u, core_id: %u, c_id: %u, epc: 0x%08x\n",
fault_event->info.ucode.cause_id, fault_event->info.ucode.core_id,
fault_event->info.ucode.c_id, fault_event->info.ucode.epc);
}
static void sss_print_mem_rw_err(struct sss_hwdev *hwdev,
struct sss_fault_event *fault_event)
{
sdk_err(hwdev->dev_hdl, "Err_csr_ctrl: 0x%08x, err_csr_data: 0x%08x, ctrl_tab: 0x%08x, mem_id: 0x%08x\n",
fault_event->info.mem_timeout.err_csr_ctrl,
fault_event->info.mem_timeout.err_csr_data,
fault_event->info.mem_timeout.ctrl_tab, fault_event->info.mem_timeout.mem_id);
}
static void sss_print_reg_rw_err(struct sss_hwdev *hwdev,
struct sss_fault_event *fault_event)
{
sdk_err(hwdev->dev_hdl, "Err_csr: 0x%08x\n", fault_event->info.reg_timeout.err_csr);
}
static void sss_print_phy_err(struct sss_hwdev *hwdev,
struct sss_fault_event *fault_event)
{
sdk_err(hwdev->dev_hdl, "Op_type: %u, port_id: %u, dev_ad: %u, csr_addr: 0x%08x, op_data: 0x%08x\n",
fault_event->info.phy_fault.op_type, fault_event->info.phy_fault.port_id,
fault_event->info.phy_fault.dev_ad, fault_event->info.phy_fault.csr_addr,
fault_event->info.phy_fault.op_data);
}
static void sss_print_fault_info(struct sss_hwdev *hwdev,
struct sss_fault_event *fault_event)
{
struct sss_fault_event_stats *event_stats = &hwdev->hw_stats.fault_event_stats;
char *type = NULL;
char *fault_type[SSS_FAULT_TYPE_MAX] = {
SSS_FAULT_TYPE_STR_CHIP, SSS_FAULT_TYPE_STR_NPU,
SSS_FAULT_TYPE_STR_MEM_RD, SSS_FAULT_TYPE_STR_MEM_WR,
SSS_FAULT_TYPE_STR_REG_RD, SSS_FAULT_TYPE_STR_REG_WR,
SSS_FAULT_TYPE_STR_PHY, SSS_FAULT_TYPE_STR_TSENSOR
};
sss_print_err_handler_t print_handler[] = {
sss_print_chip_fault, sss_print_ucode_err,
sss_print_mem_rw_err, sss_print_mem_rw_err,
sss_print_reg_rw_err, sss_print_reg_rw_err,
sss_print_phy_err
};
if (fault_event->type < SSS_FAULT_TYPE_MAX) {
type = fault_type[fault_event->type];
atomic_inc(&event_stats->fault_type_stat[fault_event->type]);
} else {
type = SSS_FAULT_TYPE_STR_UNKNOWN;
}
sdk_err(hwdev->dev_hdl, "Fault event report received, func_id: %u\n",
sss_get_global_func_id(hwdev));
sdk_err(hwdev->dev_hdl, "Fault type: %u [%s]\n", fault_event->type, type);
sdk_err(hwdev->dev_hdl, "Fault val[0]: 0x%08x, val[1]: 0x%08x, val[2]: 0x%08x, val[3]: 0x%08x\n",
fault_event->info.val[0x0], fault_event->info.val[0x1],
fault_event->info.val[0x2], fault_event->info.val[0x3]);
sss_dump_chip_err_info(hwdev);
if (fault_event->type >= ARRAY_LEN(print_handler))
return;
print_handler[fault_event->type](hwdev, fault_event);
}
static void sss_fault_event_handler(void *data, void *in_buf, u16 in_size,
void *out_buf, u16 *out_size)
{
struct sss_hwdev *hwdev = data;
struct sss_cmd_fault_event *cmd_event = in_buf;
struct sss_event_info info;
struct sss_fault_event *fault_event = (void *)info.event_data;
if (in_size != sizeof(*cmd_event)) {
sdk_err(hwdev->dev_hdl, "Invalid size: %u.\n", in_size);
return;
}
sss_print_fault_info(hwdev, &cmd_event->fault_event);
if (hwdev->event_handler) {
info.type = SSS_EVENT_FAULT;
info.service = SSS_EVENT_SRV_COMM;
memcpy(info.event_data, &cmd_event->fault_event, sizeof(cmd_event->fault_event));
fault_event->fault_level = (cmd_event->fault_event.type == SSS_FAULT_TYPE_CHIP) ?
cmd_event->fault_event.info.chip.err_level :
SSS_FAULT_LEVEL_FATAL;
hwdev->event_handler(hwdev->event_handler_data, &info);
}
}
static void sss_pf_handle_mgmt_event(void *data, u16 event_type,
void *in_buf, u16 in_size, void *out_buf, u16 *out_size)
{
u32 i;
u32 num = ARRAY_LEN(g_mgmt_event_handler);
for (i = 0; i < num; i++) {
if (event_type == g_mgmt_event_handler[i].event_type &&
g_mgmt_event_handler[i].handler) {
g_mgmt_event_handler[i].handler(data, in_buf, in_size,
out_buf, out_size);
return;
}
}
*out_size = sizeof(struct sss_mgmt_msg_head);
((struct sss_mgmt_msg_head *)out_buf)->state = SSS_MGMT_CMD_UNSUPPORTED;
sdk_warn(SSS_TO_DEV(data), "Unsupported mgmt event %u.\n", event_type);
}
static int sss_hwdev_init_mbx(struct sss_hwdev *hwdev)
{
int ret;
ret = sss_hwif_init_mbx(hwdev);
if (ret != 0)
return ret;
sss_aeq_register_hw_cb(hwdev, hwdev, SSS_MBX_FROM_FUNC, sss_recv_mbx_aeq_handler);
sss_aeq_register_hw_cb(hwdev, hwdev, SSS_MSG_FROM_MGMT, sss_mgmt_msg_aeqe_handler);
set_bit(SSS_HW_MBX_INIT_OK, &hwdev->func_state);
return 0;
}
static void sss_hwdev_deinit_mbx(struct sss_hwdev *hwdev)
{
spin_lock_bh(&hwdev->channel_lock);
clear_bit(SSS_HW_MBX_INIT_OK, &hwdev->func_state);
spin_unlock_bh(&hwdev->channel_lock);
sss_aeq_unregister_hw_cb(hwdev, SSS_MBX_FROM_FUNC);
if (!SSS_IS_VF(hwdev)) {
sss_unregister_pf_mbx_handler(hwdev, SSS_MOD_TYPE_COMM);
} else {
sss_unregister_vf_mbx_handler(hwdev, SSS_MOD_TYPE_COMM);
sss_aeq_unregister_hw_cb(hwdev, SSS_MSG_FROM_MGMT);
}
sss_hwif_deinit_mbx(hwdev);
}
static int sss_chip_get_global_attr(struct sss_hwdev *hwdev)
{
int ret = 0;
struct sss_cmd_get_glb_attr attr_cmd = {0};
u16 out_len = sizeof(attr_cmd);
ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_GET_GLOBAL_ATTR,
&attr_cmd, sizeof(attr_cmd), &attr_cmd, &out_len);
if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &attr_cmd)) {
sdk_err(((struct sss_hwdev *)hwdev)->dev_hdl,
"Fail to get global attr, ret: %d, status: 0x%x, out_len: 0x%x\n",
ret, attr_cmd.head.state, out_len);
return -EIO;
}
memcpy(&hwdev->glb_attr, &attr_cmd.attr, sizeof(hwdev->glb_attr));
return 0;
}
static int sss_chip_get_feature(struct sss_hwdev *hwdev)
{
int i;
int ret;
u64 feature[SSS_MAX_FEATURE_QWORD] = {SSS_DRV_FEATURE_DEF, 0, 0, 0};
ret = sss_chip_do_nego_feature(hwdev, SSS_MGMT_MSG_GET_CMD,
hwdev->features, SSS_MAX_FEATURE_QWORD);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to get comm feature\n");
return ret;
}
if (sss_get_func_type(hwdev) == SSS_FUNC_TYPE_PPF)
feature[0] |= SSS_COMM_F_CHANNEL_DETECT;
for (i = 0; i < SSS_MAX_FEATURE_QWORD; i++)
hwdev->features[i] &= feature[i];
return 0;
}
static int sss_get_global_info(struct sss_hwdev *hwdev)
{
int ret;
ret = sss_chip_get_board_info(hwdev, &hwdev->board_info);
if (ret != 0)
return ret;
ret = sss_chip_get_feature(hwdev);
if (ret != 0)
return ret;
ret = sss_chip_get_global_attr(hwdev);
if (ret != 0)
return ret;
return 0;
}
static void sss_hwdev_deinit_adm(struct sss_hwdev *hwdev)
{
if (sss_get_func_type(hwdev) == SSS_FUNC_TYPE_VF)
return;
spin_lock_bh(&hwdev->channel_lock);
clear_bit(SSS_HW_ADM_INIT_OK, &hwdev->func_state);
spin_unlock_bh(&hwdev->channel_lock);
sss_unregister_mgmt_msg_handler(hwdev, SSS_MOD_TYPE_COMM);
sss_aeq_unregister_hw_cb(hwdev, SSS_MSG_FROM_MGMT);
sss_hwif_deinit_adm(hwdev);
}
static int sss_hwdev_init_adm(struct sss_hwdev *hwdev)
{
int ret;
if (sss_get_func_type(hwdev) == SSS_FUNC_TYPE_VF)
return 0;
ret = sss_hwif_init_adm(hwdev);
if (ret != 0)
return ret;
sss_register_mgmt_msg_handler(hwdev, SSS_MOD_TYPE_COMM, hwdev,
sss_pf_handle_mgmt_event);
set_bit(SSS_HW_ADM_INIT_OK, &hwdev->func_state);
return 0;
}
static int sss_chip_set_dma_attr_table(struct sss_hwdev *hwdev)
{
int ret;
struct sss_cmd_dma_attr_config attr = {0};
u16 out_len = sizeof(attr);
attr.ph = SSS_PCIE_PH_DISABLE;
attr.at = SSS_PCIE_AT_DISABLE;
attr.st = SSS_PCIE_ST_DISABLE;
attr.no_snooping = SSS_PCIE_SNOOP;
attr.tph_en = SSS_PCIE_TPH_DISABLE;
attr.func_id = sss_get_global_func_id(hwdev);
attr.entry_id = SSS_PCIE_MSIX_ATTR_ENTRY;
ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_SET_DMA_ATTR, &attr, sizeof(attr),
&attr, &out_len);
if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &attr)) {
sdk_err(hwdev->dev_hdl,
"Fail to set dma attr, ret: %d, status: 0x%x, out_len: 0x%x\n",
ret, attr.head.state, out_len);
return -EIO;
}
return 0;
}
static int sss_chip_init_dma_attr(struct sss_hwdev *hwdev)
{
u32 set;
u32 get;
u32 dst;
set = sss_chip_read_reg(hwdev->hwif, SSS_CSR_DMA_ATTR_INDIR_ID_ADDR);
set = SSS_CLEAR_DMA_ATTR_INDIR_ID(set, ID);
set |= SSS_SET_DMA_ATTR_INDIR_ID(SSS_PCIE_MSIX_ATTR_ENTRY, ID);
sss_chip_write_reg(hwdev->hwif, SSS_CSR_DMA_ATTR_INDIR_ID_ADDR, set);
/* make sure reset dma attr */
wmb();
dst = SSS_SET_DMA_ATTR_ENTRY(SSS_PCIE_TPH_DISABLE, TPH_EN) |
SSS_SET_DMA_ATTR_ENTRY(SSS_PCIE_SNOOP, NO_SNOOPING) |
SSS_SET_DMA_ATTR_ENTRY(SSS_PCIE_ST_DISABLE, ST) |
SSS_SET_DMA_ATTR_ENTRY(SSS_PCIE_AT_DISABLE, AT) |
SSS_SET_DMA_ATTR_ENTRY(SSS_PCIE_PH_DISABLE, PH);
get = sss_chip_read_reg(hwdev->hwif, SSS_CSR_DMA_ATTR_TBL_ADDR);
if (get == dst)
return 0;
return sss_chip_set_dma_attr_table(hwdev);
}
static void sss_chip_set_pf_state(struct sss_hwdev *hwdev)
{
sss_chip_set_pf_status(hwdev->hwif, SSS_PF_STATUS_ACTIVE_FLAG);
}
static void sss_chip_reset_pf_state(struct sss_hwdev *hwdev)
{
sss_chip_set_pf_status(hwdev->hwif, SSS_PF_STATUS_INIT);
}
static int sss_init_basic_mgmt_channel(struct sss_hwdev *hwdev)
{
int ret;
ret = sss_hwif_init_aeq(hwdev);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init comm aeqs\n");
return ret;
}
ret = sss_hwdev_init_mbx(hwdev);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init mbx\n");
goto init_mbx_err;
}
ret = sss_init_aeq_msix_attr(hwdev);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init aeqs msix attr\n");
goto init_aeq_msix_attr_err;
}
return 0;
init_aeq_msix_attr_err:
sss_hwdev_deinit_mbx(hwdev);
init_mbx_err:
sss_hwif_deinit_aeq(hwdev);
return ret;
}
static void sss_free_base_mgmt_channel(struct sss_hwdev *hwdev)
{
sss_hwdev_deinit_mbx(hwdev);
sss_hwif_deinit_aeq(hwdev);
}
int sss_init_mgmt_channel(struct sss_hwdev *hwdev)
{
int ret;
/* init aeq, mbx */
ret = sss_init_basic_mgmt_channel(hwdev);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init basic mgmt channel\n");
return ret;
}
ret = sss_chip_reset_function(hwdev, sss_get_global_func_id(hwdev),
SSS_COMM_RESET_TYPE, SSS_CHANNEL_COMM);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to reset func\n");
goto out;
}
ret = sss_get_global_info(hwdev);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init hwdev attr\n");
goto out;
}
ret = sss_hwdev_init_adm(hwdev);
if (ret != 0)
goto out;
ret = sss_chip_set_func_used_state(hwdev, SSS_SVC_TYPE_COM,
true, SSS_CHANNEL_COMM);
if (ret != 0)
goto set_use_state_err;
ret = sss_chip_init_dma_attr(hwdev);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init dma attr table\n");
goto init_dma_attr_err;
}
ret = sss_init_ctrlq_channel(hwdev);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init ctrlq channel\n");
goto init_ctrlq_channel_err;
}
sss_chip_set_pf_state(hwdev);
ret = sss_aeq_register_swe_cb(hwdev, hwdev, SSS_STL_EVENT, sss_sw_aeqe_handler);
if (ret != 0) {
sdk_err(hwdev->dev_hdl,
"Fail to register sw aeqe handler\n");
goto register_ucode_aeqe_err;
}
return 0;
register_ucode_aeqe_err:
sss_chip_reset_pf_state(hwdev);
sss_deinit_ctrlq_channel(hwdev);
init_ctrlq_channel_err:
init_dma_attr_err:
sss_chip_set_func_used_state(hwdev, SSS_SVC_TYPE_COM,
false, SSS_CHANNEL_COMM);
set_use_state_err:
sss_hwdev_deinit_adm(hwdev);
out:
sss_free_base_mgmt_channel(hwdev);
return ret;
}
void sss_deinit_mgmt_channel(struct sss_hwdev *hwdev)
{
sss_aeq_unregister_swe_cb(hwdev, SSS_STL_EVENT);
sss_chip_reset_pf_state(hwdev);
sss_deinit_ctrlq_channel(hwdev);
sss_chip_set_func_used_state(hwdev, SSS_SVC_TYPE_COM,
false, SSS_CHANNEL_COMM);
sss_hwdev_deinit_adm(hwdev);
sss_free_base_mgmt_channel(hwdev);
}

View File

@ -0,0 +1,127 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HWDEV_MGMT_CHANNEL_H
#define SSS_HWDEV_MGMT_CHANNEL_H
#include "sss_hwdev.h"
#define SSS_STACK_DATA_LEN 1024
#define SSS_XREGS_NUM 31
#define SSS_MPU_LASTWORD_SIZE 1024
struct sss_watchdog_info {
struct sss_mgmt_msg_head head;
u32 cur_time_h;
u32 cur_time_l;
u32 task_id;
u32 rsvd;
u64 pc;
u64 elr;
u64 spsr;
u64 far;
u64 esr;
u64 xzr;
u64 x30;
u64 x29;
u64 x28;
u64 x27;
u64 x26;
u64 x25;
u64 x24;
u64 x23;
u64 x22;
u64 x21;
u64 x20;
u64 x19;
u64 x18;
u64 x17;
u64 x16;
u64 x15;
u64 x14;
u64 x13;
u64 x12;
u64 x11;
u64 x10;
u64 x09;
u64 x08;
u64 x07;
u64 x06;
u64 x05;
u64 x04;
u64 x03;
u64 x02;
u64 x01;
u64 x00;
u64 stack_top;
u64 stack_bottom;
u64 sp;
u32 cur_used;
u32 peak_used;
u32 is_overflow;
u32 stack_actlen;
u8 stack_data[SSS_STACK_DATA_LEN];
};
struct sss_cpu_tick {
u32 tick_cnt_h; /* The cycle count higher 32 bits */
u32 tick_cnt_l; /* The cycle count lower 32 bits */
};
struct sss_ax_exc_reg_info {
u64 ttbr0;
u64 ttbr1;
u64 tcr;
u64 mair;
u64 sctlr;
u64 vbar;
u64 current_el;
u64 sp;
u64 elr;
u64 spsr;
u64 far_r;
u64 esr;
u64 xzr;
u64 xregs[SSS_XREGS_NUM]; /* 0~30: x30~x0 */
};
struct sss_exc_info {
char os_ver[48]; /* OS version */
char app_ver[64]; /* Product version */
u32 exc_cause; /* Cause of exception */
u32 thread_type; /* The thread type before the exception */
u32 thread_id; /* Thread PID before exception */
u16 byte_order; /* Byte order */
u16 cpu_type; /* CPU type */
u32 cpu_id; /* CPU ID */
struct sss_cpu_tick cpu_tick; /* CPU Tick */
u32 nest_cnt; /* The exception nested count */
u32 fatal_errno; /* Fatal error code */
u64 uw_sp; /* The stack pointer before the exception */
u64 stack_bottom; /* Bottom of the stack before the exception */
/* The in-core register context information,*/
/* 82\57 must be at 152 bytes; if it has changed, */
/* the OS_EXC_REGINFO_OFFSET macro in sre_platform.eh must be updated */
struct sss_ax_exc_reg_info reg_info;
};
struct sss_lastword_info {
struct sss_mgmt_msg_head head;
struct sss_exc_info stack_info;
/* Stack details, Actual stack size(<=1024) */
u32 stack_actlen;
/* More than 1024, it will be truncated */
u8 stack_data[SSS_MPU_LASTWORD_SIZE];
};
int sss_init_mgmt_channel(struct sss_hwdev *hwdev);
void sss_deinit_mgmt_channel(struct sss_hwdev *hwdev);
#endif

View File

@ -0,0 +1,97 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include "sss_kernel.h"
#include "sss_hwdev.h"
#include "sss_hw_svc_cap.h"
#include "sss_hwif_irq.h"
static int sss_init_ceq_info(struct sss_hwdev *hwdev)
{
u8 i;
struct sss_eq_info *ceq_info = &hwdev->mgmt_info->eq_info;
struct sss_eq_cfg *ceq = NULL;
ceq_info->ceq_num = SSS_GET_HWIF_CEQ_NUM(hwdev->hwif);
ceq_info->remain_ceq_num = ceq_info->ceq_num;
mutex_init(&ceq_info->eq_mutex);
sdk_info(hwdev->dev_hdl, "Mgmt ceq info: ceq_num = 0x%x, remain_ceq_num = 0x%x\n",
ceq_info->ceq_num, ceq_info->remain_ceq_num);
if (ceq_info->ceq_num == 0) {
sdk_err(hwdev->dev_hdl, "Mgmt ceq info: ceq_num = 0\n");
return -EFAULT;
}
ceq = kcalloc(ceq_info->ceq_num, sizeof(*ceq), GFP_KERNEL);
if (!ceq)
return -ENOMEM;
for (i = 0; i < ceq_info->ceq_num; i++) {
ceq[i].id = i + 1;
ceq[i].free = SSS_CFG_FREE;
ceq[i].type = SSS_SERVICE_TYPE_MAX;
}
ceq_info->eq = ceq;
return 0;
}
static void sss_deinit_ceq_info(struct sss_hwdev *hwdev)
{
struct sss_eq_info *ceq_info = &hwdev->mgmt_info->eq_info;
kfree(ceq_info->eq);
}
int sss_init_mgmt_info(struct sss_hwdev *hwdev)
{
int ret;
struct sss_mgmt_info *mgmt_info;
mgmt_info = kzalloc(sizeof(*mgmt_info), GFP_KERNEL);
if (!mgmt_info)
return -ENOMEM;
mgmt_info->hwdev = hwdev;
hwdev->mgmt_info = mgmt_info;
ret = sss_init_ceq_info(hwdev);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init ceq info, ret: %d\n", ret);
goto init_ceq_info_err;
}
ret = sss_init_irq_info(hwdev);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init irq info, ret: %d\n", ret);
goto init_irq_info_err;
}
return 0;
init_irq_info_err:
sss_deinit_ceq_info(hwdev);
init_ceq_info_err:
kfree(mgmt_info);
hwdev->mgmt_info = NULL;
return ret;
}
void sss_deinit_mgmt_info(struct sss_hwdev *hwdev)
{
sss_deinit_irq_info(hwdev);
sss_deinit_ceq_info(hwdev);
kfree(hwdev->mgmt_info);
hwdev->mgmt_info = NULL;
}

View File

@ -0,0 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HWDEV_MGMT_INFO_H
#define SSS_HWDEV_MGMT_INFO_H
#include "sss_hwdev.h"
int sss_init_mgmt_info(struct sss_hwdev *dev);
void sss_deinit_mgmt_info(struct sss_hwdev *dev);
#endif

View File

@ -0,0 +1,805 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/completion.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/semaphore.h>
#include <linux/jiffies.h>
#include <linux/delay.h>
#include "sss_kernel.h"
#include "sss_hw.h"
#include "sss_common.h"
#include "sss_hwdev.h"
#include "sss_csr.h"
#include "sss_hwif_api.h"
#include "sss_hwif_adm_common.h"
#include "sss_hwif_aeq.h"
#define SSS_ADM_MSG_ELEM_DESC_SIZE 8
#define SSS_ADM_MSG_ELEM_DATA_ADDR_SIZE 8
#define SSS_ADM_MSG_ELEM_WB_ADDR_SIZE 8
#define SSS_ADM_MSG_ELEM_ALIGNMENT 8
#define SSS_ADM_MSG_STATE_TIMEOUT 10000
/* adm_msg_state header */
#define SSS_ADM_MSG_STATE_HEAD_VALID_SHIFT 0
#define SSS_ADM_MSG_STATE_HEAD_MSG_ID_SHIFT 16
#define SSS_ADM_MSG_STATE_HEAD_VALID_MASK 0xFFU
#define SSS_ADM_MSG_STATE_HEAD_MSG_ID_MASK 0xFFU
#define COMPLETION_TIMEOUT_DEFAULT 1000UL
#define POLLING_COMPLETION_TIMEOUT_DEFAULT 1000U
#define SSS_ADM_MSG_STATE_HEAD_GET(val, member) \
(((val) >> SSS_ADM_MSG_STATE_HEAD_##member##_SHIFT) & \
SSS_ADM_MSG_STATE_HEAD_##member##_MASK)
enum sss_adm_msg_data_format {
SSS_SGL_TYPE = 1,
};
enum sss_adm_msg_opt {
SSS_ADM_MSG_WRITE = 0,
SSS_ADM_MSG_READ = 1,
};
enum sss_adm_msg_bypass {
SSS_NO_BYPASS = 0,
SSS_BYPASS = 1,
};
enum sss_adm_msg_reply_aeq {
SSS_NO_TRIGGER = 0,
SSS_TRIGGER = 1,
};
enum sss_adm_msg_chn_code {
SSS_ADM_MSG_CHANNEL_0 = 0,
};
enum sss_adm_msg_chn_rsvd {
SSS_VALID_MSG_CHANNEL = 0,
SSS_INVALID_MSG_CHANNEL = 1,
};
#define SSS_ADM_MSG_DESC_LEN 7
struct sss_msg_head {
u8 state;
u8 version;
u8 reply_aeq_num;
u8 rsvd0[5];
};
#define SSS_MGMT_MSG_SIZE_MIN 20
#define SSS_MGMT_MSG_SIZE_STEP 16
#define SSS_MGMT_MSG_RSVD_FOR_DEV 8
#define SSS_MSG_TO_MGMT_LEN_MAX 2016
#define SSS_SYNC_MSG_ID_MASK 0x7
#define SSS_SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id)
#define SSS_INCREASE_SYNC_MSG_ID(pf_to_mgmt) \
((pf_to_mgmt)->sync_msg_id = \
((pf_to_mgmt)->sync_msg_id + 1) & SSS_SYNC_MSG_ID_MASK)
#define SSS_MGMT_MSG_TIMEOUT 20000 /* millisecond */
#define SSS_MSG_CB_USLEEP_MIN 900
#define SSS_MSG_CB_USLEEP_MAX 1000
#define SSS_ENCAPSULATE_ADM_MSG_HEAD(func_id, msg_len, mod, cmd, msg_id) \
(SSS_SET_MSG_HEADER(msg_len, MSG_LEN) | \
SSS_SET_MSG_HEADER(mod, MODULE) | \
SSS_SET_MSG_HEADER(msg_len, SEG_LEN) | \
SSS_SET_MSG_HEADER(SSS_MSG_ACK, NO_ACK) | \
SSS_SET_MSG_HEADER(SSS_INLINE_DATA, DATA_TYPE) | \
SSS_SET_MSG_HEADER(0, SEQID) | \
SSS_SET_MSG_HEADER(SSS_ADM_MSG_AEQ_ID, AEQ_ID) | \
SSS_SET_MSG_HEADER(SSS_LAST_SEG, LAST) | \
SSS_SET_MSG_HEADER(SSS_DIRECT_SEND_MSG, DIRECTION) | \
SSS_SET_MSG_HEADER(cmd, CMD) | \
SSS_SET_MSG_HEADER(SSS_MSG_SRC_MGMT, SOURCE) | \
SSS_SET_MSG_HEADER(func_id, SRC_GLB_FUNC_ID) | \
SSS_SET_MSG_HEADER(msg_id, MSG_ID))
#define SSSNIC_API_CMD_RESP_HEAD_VALID_SHIFT 0
#define SSSNIC_API_CMD_RESP_HEAD_STATUS_SHIFT 8
#define SSSNIC_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT 16
#define SSSNIC_API_CMD_RESP_HEAD_RESP_LEN_SHIFT 24
#define SSSNIC_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT 40
#define SSSNIC_API_CMD_RESP_HEAD_VALID_MASK 0xFF
#define SSSNIC_API_CMD_RESP_HEAD_STATUS_MASK 0xFFU
#define SSSNIC_API_CMD_RESP_HEAD_CHAIN_ID_MASK 0xFFU
#define SSSNIC_API_CMD_RESP_HEAD_RESP_LEN_MASK 0x1FFU
#define SSSNIC_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK 0xFFFFFFU
#define SSSNIC_API_CMD_RESP_HEAD_VALID_CODE 0xFF
#define SSSNIC_API_CMD_RESP_HEADER_VALID(val) \
(((val) & SSSNIC_API_CMD_RESP_HEAD_VALID_MASK) == \
SSSNIC_API_CMD_RESP_HEAD_VALID_CODE)
#define SSSNIC_API_CMD_RESP_HEAD_GET(val, member) \
(((val) >> SSSNIC_API_CMD_RESP_HEAD_##member##_SHIFT) & \
SSSNIC_API_CMD_RESP_HEAD_##member##_MASK)
#define SSSNIC_API_CMD_RESP_HEAD_CHAIN_ID(val) \
(((val) >> SSSNIC_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT) & \
SSSNIC_API_CMD_RESP_HEAD_CHAIN_ID_MASK)
#define SSSNIC_API_CMD_RESP_HEAD_DRIVER_PRIV(val) \
((u16)(((val) >> SSSNIC_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT) & \
SSSNIC_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK))
static u8 sss_xor_chksum_set(void *data)
{
int id;
u8 checksum = 0;
u8 *val = data;
for (id = 0; id < SSS_ADM_MSG_DESC_LEN; id++)
checksum ^= val[id];
return checksum;
}
static void sss_chip_set_pi(struct sss_adm_msg *adm_msg)
{
enum sss_adm_msg_type msg_type = adm_msg->msg_type;
struct sss_hwif *hwif = SSS_TO_HWDEV(adm_msg)->hwif;
u32 hw_pi_addr = SSS_CSR_ADM_MSG_PI_ADDR(msg_type);
sss_chip_write_reg(hwif, hw_pi_addr, adm_msg->pi);
}
static u32 sss_chip_get_ci(struct sss_adm_msg *adm_msg)
{
u32 addr;
u32 val;
addr = SSS_CSR_ADM_MSG_STATE_0_ADDR(adm_msg->msg_type);
val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr);
return SSS_GET_ADM_MSG_STATE(val, CI);
}
static void sss_dump_adm_msg_reg(struct sss_adm_msg *adm_msg)
{
void *dev = SSS_TO_HWDEV(adm_msg)->dev_hdl;
u32 addr;
u32 val;
u16 pci_cmd = 0;
addr = SSS_CSR_ADM_MSG_STATE_0_ADDR(adm_msg->msg_type);
val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr);
sdk_err(dev, "Msg type: 0x%x, cpld error: 0x%x, check error: 0x%x, current fsm: 0x%x\n",
adm_msg->msg_type, SSS_GET_ADM_MSG_STATE(val, CPLD_ERR),
SSS_GET_ADM_MSG_STATE(val, CHKSUM_ERR),
SSS_GET_ADM_MSG_STATE(val, FSM));
sdk_err(dev, "Adm msg hw current ci: 0x%x\n",
SSS_GET_ADM_MSG_STATE(val, CI));
addr = SSS_CSR_ADM_MSG_PI_ADDR(adm_msg->msg_type);
val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr);
sdk_err(dev, "Adm msg hw current pi: 0x%x\n", val);
pci_read_config_word(SSS_TO_HWDEV(adm_msg)->pcidev_hdl, PCI_COMMAND, &pci_cmd);
sdk_err(dev, "PCI command reg: 0x%x\n", pci_cmd);
}
static int sss_adm_msg_busy(struct sss_adm_msg *adm_msg)
{
void *dev = SSS_TO_HWDEV(adm_msg)->dev_hdl;
struct sss_adm_msg_elem_ctx *ctx = &adm_msg->elem_ctx[adm_msg->pi];
u64 resp_header;
switch (adm_msg->msg_type) {
case SSS_ADM_MSG_MULTI_READ:
case SSS_ADM_MSG_POLL_READ:
resp_header = be64_to_cpu(ctx->reply_fmt->head);
if (ctx->state && !SSSNIC_API_CMD_RESP_HEADER_VALID(resp_header)) {
sdk_err(dev, "Context(0x%x) busy!, pi: %u, resp_header: 0x%08x%08x\n",
ctx->state, adm_msg->pi,
upper_32_bits(resp_header),
lower_32_bits(resp_header));
sss_dump_adm_msg_reg(adm_msg);
return -EBUSY;
}
break;
case SSS_ADM_MSG_POLL_WRITE:
case SSS_ADM_MSG_WRITE_TO_MGMT_MODULE:
case SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE:
adm_msg->ci = sss_chip_get_ci(adm_msg);
if (adm_msg->ci == SSS_MASK_ID(adm_msg, adm_msg->pi + 1)) {
sdk_err(dev, "API CMD chain %d is busy, cons_idx = %u, prod_idx = %u\n",
adm_msg->msg_type, adm_msg->ci,
adm_msg->pi);
sss_dump_adm_msg_reg(adm_msg);
return -EBUSY;
}
break;
default:
sdk_err(dev, "Unknown Chain type %d\n", adm_msg->msg_type);
return -EINVAL;
}
return 0;
}
static void sss_prepare_elem_ctrl(u64 *elem_ctrl, enum sss_adm_msg_type msg_type)
{
u64 control;
u8 chksum;
u16 elem_len = 0;
switch (msg_type) {
case SSS_ADM_MSG_POLL_READ:
elem_len = ALIGN(SSS_ADM_MSG_ELEM_DESC_SIZE + SSS_ADM_MSG_ELEM_WB_ADDR_SIZE +
SSS_ADM_MSG_ELEM_DATA_ADDR_SIZE, SSS_ADM_MSG_ELEM_ALIGNMENT);
break;
case SSS_ADM_MSG_WRITE_TO_MGMT_MODULE:
case SSS_ADM_MSG_POLL_WRITE:
case SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE:
elem_len = ALIGN(SSS_ADM_MSG_ELEM_DESC_SIZE +
SSS_ADM_MSG_ELEM_DATA_ADDR_SIZE, SSS_ADM_MSG_ELEM_ALIGNMENT);
break;
default:
break;
}
control = SSS_ADM_MSG_ELEM_CTRL_SET(SSS_SIZE_TO_8B(elem_len), ELEM_LEN) |
SSS_ADM_MSG_ELEM_CTRL_SET(0ULL, RD_DMA_ATTR_OFF) |
SSS_ADM_MSG_ELEM_CTRL_SET(0ULL, WR_DMA_ATTR_OFF);
chksum = sss_xor_chksum_set(&control);
control |= SSS_ADM_MSG_ELEM_CTRL_SET(chksum, XOR_CHKSUM);
/* The data in the HW should be in Big Endian Format */
*elem_ctrl = cpu_to_be64(control);
}
static void sss_prepare_elem_desc(struct sss_adm_msg *adm_msg,
u8 node_id, u16 cmd_size)
{
u32 priv;
struct sss_adm_msg_elem *elem = adm_msg->now_node;
struct sss_adm_msg_elem_ctx *ctx = &adm_msg->elem_ctx[adm_msg->pi];
switch (adm_msg->msg_type) {
case SSS_ADM_MSG_POLL_READ:
priv = SSS_READ_ADM_MSG_PRIV_DATA(adm_msg->msg_type, ctx->store_pi);
elem->desc = SSS_ADM_MSG_DESC_SET(SSS_SGL_TYPE, SGL_TYPE) |
SSS_ADM_MSG_DESC_SET(SSS_ADM_MSG_READ, RD_WR) |
SSS_ADM_MSG_DESC_SET(SSS_BYPASS, MGMT_BYPASS) |
SSS_ADM_MSG_DESC_SET(SSS_NO_TRIGGER, REPLY_AEQE_EN) |
SSS_ADM_MSG_DESC_SET(priv, PRIV_DATA);
break;
case SSS_ADM_MSG_POLL_WRITE:
priv = SSS_WRITE_ADM_MSG_PRIV_DATA(adm_msg->msg_type);
elem->desc = SSS_ADM_MSG_DESC_SET(SSS_SGL_TYPE, SGL_TYPE) |
SSS_ADM_MSG_DESC_SET(SSS_ADM_MSG_WRITE, RD_WR) |
SSS_ADM_MSG_DESC_SET(SSS_BYPASS, MGMT_BYPASS) |
SSS_ADM_MSG_DESC_SET(SSS_NO_TRIGGER, REPLY_AEQE_EN) |
SSS_ADM_MSG_DESC_SET(priv, PRIV_DATA);
break;
case SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE:
case SSS_ADM_MSG_WRITE_TO_MGMT_MODULE:
priv = SSS_WRITE_ADM_MSG_PRIV_DATA(adm_msg->msg_type);
elem->desc = SSS_ADM_MSG_DESC_SET(SSS_SGL_TYPE, SGL_TYPE) |
SSS_ADM_MSG_DESC_SET(SSS_ADM_MSG_WRITE, RD_WR) |
SSS_ADM_MSG_DESC_SET(SSS_NO_BYPASS, MGMT_BYPASS) |
SSS_ADM_MSG_DESC_SET(SSS_TRIGGER, REPLY_AEQE_EN) |
SSS_ADM_MSG_DESC_SET(priv, PRIV_DATA);
break;
default:
sdk_err(((struct sss_hwdev *)adm_msg->hwdev)->dev_hdl, "Unknown Chain type: %d\n",
adm_msg->msg_type);
return;
}
elem->desc |= SSS_ADM_MSG_DESC_SET(SSS_ADM_MSG_CHANNEL_0, MSG_CHANNEL) |
SSS_ADM_MSG_DESC_SET(SSS_VALID_MSG_CHANNEL, MSG_VALID);
elem->desc |= SSS_ADM_MSG_DESC_SET(node_id, DEST) |
SSS_ADM_MSG_DESC_SET(SSS_SIZE_TO_4B(cmd_size), SIZE);
elem->desc |= SSS_ADM_MSG_DESC_SET(sss_xor_chksum_set(&elem->desc), XOR_CHKSUM);
/* The data in the HW should be in Big Endian Format */
elem->desc = cpu_to_be64(elem->desc);
}
static void sss_prepare_elem_ctx(struct sss_adm_msg *adm_msg,
const void *cmd, u16 cmd_size)
{
struct sss_adm_msg_elem_ctx *elem_ctx = &adm_msg->elem_ctx[adm_msg->pi];
memcpy(elem_ctx->adm_msg_vaddr, cmd, cmd_size);
}
static void sss_prepare_elem(struct sss_adm_msg *adm_msg, u8 node_id,
const void *cmd, u16 cmd_size)
{
struct sss_adm_msg_elem *now_node = adm_msg->now_node;
sss_prepare_elem_ctrl(&now_node->control, adm_msg->msg_type);
sss_prepare_elem_desc(adm_msg, node_id, cmd_size);
sss_prepare_elem_ctx(adm_msg, cmd, cmd_size);
}
static inline void sss_adm_msg_increase_pi(struct sss_adm_msg *adm_msg)
{
adm_msg->pi = SSS_MASK_ID(adm_msg, adm_msg->pi + 1);
}
static void sss_issue_adm_msg(struct sss_adm_msg *adm_msg)
{
sss_chip_set_pi(adm_msg);
}
static void sss_update_adm_msg_state(struct sss_adm_msg *adm_msg)
{
struct sss_adm_msg_state *wb_state;
enum sss_adm_msg_type msg_type;
u64 status_header;
u32 desc_buf;
wb_state = adm_msg->wb_state;
desc_buf = be32_to_cpu(wb_state->desc_buf);
if (SSS_GET_ADM_MSG_STATE(desc_buf, CHKSUM_ERR))
return;
status_header = be64_to_cpu(wb_state->head);
msg_type = SSS_ADM_MSG_STATE_HEAD_GET(status_header, MSG_ID);
if (msg_type >= SSS_ADM_MSG_MAX)
return;
if (msg_type != adm_msg->msg_type)
return;
adm_msg->ci = SSS_GET_ADM_MSG_STATE(desc_buf, CI);
}
static enum sss_process_ret sss_wait_for_state_poll_handler(void *priv_data)
{
struct sss_adm_msg *adm_msg = priv_data;
if (!SSS_TO_HWDEV(adm_msg)->chip_present_flag)
return SSS_PROCESS_ERR;
sss_update_adm_msg_state(adm_msg);
/* SYNC ADM MSG cmd should start after prev cmd finished */
if (adm_msg->ci == adm_msg->pi)
return SSS_PROCESS_OK;
return SSS_PROCESS_DOING;
}
static enum sss_process_ret check_cmd_resp_handler(void *priv_data)
{
struct sss_adm_msg_elem_ctx *ctxt = priv_data;
u64 resp_header;
u8 resp_status;
if (!SSS_TO_HWDEV(ctxt)->chip_present_flag) {
pr_err("Fail to resp chip present");
return SSS_PROCESS_ERR;
}
resp_header = be64_to_cpu(ctxt->reply_fmt->head);
rmb(); /* read the latest header */
if (SSSNIC_API_CMD_RESP_HEADER_VALID(resp_header)) {
resp_status = SSSNIC_API_CMD_RESP_HEAD_GET(resp_header, STATUS);
if (resp_status) {
pr_err("Api chain response data err, status: %u\n",
resp_status);
return SSS_PROCESS_ERR;
}
return SSS_PROCESS_OK;
}
return SSS_PROCESS_DOING;
}
static int sss_wait_for_state_poll(struct sss_adm_msg *adm_msg)
{
return sss_check_handler_timeout(adm_msg, sss_wait_for_state_poll_handler,
SSS_ADM_MSG_STATE_TIMEOUT, 100); /* wait 100 us once */
}
static int wait_for_resp_polling(struct sss_adm_msg_elem_ctx *ctx)
{
return sss_check_handler_timeout(ctx, check_cmd_resp_handler,
POLLING_COMPLETION_TIMEOUT_DEFAULT,
USEC_PER_MSEC);
}
static void copy_resp_data(struct sss_adm_msg_elem_ctx *ctx, void *ack,
u16 ack_size)
{
struct sss_adm_msg_reply_fmt *resp = ctx->reply_fmt;
memcpy(ack, &resp->reply, ack_size);
ctx->state = 0;
}
static int sss_wait_for_adm_msg_completion(struct sss_adm_msg *adm_msg,
struct sss_adm_msg_elem_ctx *ctx,
void *ack, u16 ack_size)
{
int ret = 0;
switch (adm_msg->msg_type) {
case SSS_ADM_MSG_POLL_READ:
ret = wait_for_resp_polling(ctx);
if (ret == 0)
copy_resp_data(ctx, ack, ack_size);
else
sdk_err(SSS_TO_HWDEV(adm_msg)->dev_hdl, "API CMD poll response timeout\n");
break;
case SSS_ADM_MSG_POLL_WRITE:
case SSS_ADM_MSG_WRITE_TO_MGMT_MODULE:
ret = sss_wait_for_state_poll(adm_msg);
break;
case SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE:
/* No need to wait */
break;
default:
sdk_err(SSS_TO_HWDEV(adm_msg)->dev_hdl, "Unknown API CMD Chain type: %d\n",
adm_msg->msg_type);
ret = -EINVAL;
}
if (ret) {
sss_dump_adm_msg_reg(adm_msg);
sdk_err(SSS_TO_HWDEV(adm_msg)->dev_hdl, "Adm msg wait timeout,type :%d\n",
adm_msg->msg_type);
}
return ret;
}
static inline void sss_update_adm_msg_ctx(struct sss_adm_msg *adm_msg)
{
struct sss_adm_msg_elem_ctx *ctx = &adm_msg->elem_ctx[adm_msg->pi];
ctx->state = 1;
ctx->store_pi = adm_msg->pi;
if (ctx->reply_fmt) {
ctx->reply_fmt->head = 0;
/* make sure "header" was cleared */
wmb();
}
}
static void sss_adm_msg_lock(struct sss_adm_msg *adm_msg)
{
if (adm_msg->msg_type == SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE)
spin_lock(&adm_msg->async_lock);
else
down(&adm_msg->sem);
}
static void sss_adm_msg_unlock(struct sss_adm_msg *adm_msg)
{
if (adm_msg->msg_type == SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE)
spin_unlock(&adm_msg->async_lock);
else
up(&adm_msg->sem);
}
static int sss_adm_msg_io(struct sss_adm_msg *adm_msg, u8 node_id,
const void *cmd, u16 cmd_size, void *ack, u16 ack_size)
{
struct sss_adm_msg_elem_ctx *ctx = NULL;
sss_adm_msg_lock(adm_msg);
ctx = &adm_msg->elem_ctx[adm_msg->pi];
if (sss_adm_msg_busy(adm_msg)) {
sss_adm_msg_unlock(adm_msg);
return -EBUSY;
}
sss_update_adm_msg_ctx(adm_msg);
sss_prepare_elem(adm_msg, node_id, cmd, cmd_size);
sss_adm_msg_increase_pi(adm_msg);
wmb(); /* make sure issue correctly the command */
sss_issue_adm_msg(adm_msg);
adm_msg->now_node = adm_msg->elem_ctx[adm_msg->pi].elem_vaddr;
sss_adm_msg_unlock(adm_msg);
return sss_wait_for_adm_msg_completion(adm_msg, ctx, ack, ack_size);
}
int sss_adm_msg_write(struct sss_adm_msg *adm_msg, u8 node_id,
const void *cmd, u16 cmd_size)
{
return sss_adm_msg_io(adm_msg, node_id, cmd, cmd_size, NULL, 0);
}
int sss_adm_msg_read(struct sss_adm_msg *adm_msg, u8 node_id,
const void *cmd, u16 size, void *ack, u16 ack_size)
{
return sss_adm_msg_io(adm_msg, node_id, cmd, size, ack, ack_size);
}
static void sss_set_adm_event_flag(struct sss_msg_pf_to_mgmt *pf_to_mgmt,
int event_flag)
{
spin_lock(&pf_to_mgmt->sync_event_lock);
pf_to_mgmt->event_state = event_flag;
spin_unlock(&pf_to_mgmt->sync_event_lock);
}
static u16 sss_align_adm_msg_len(u16 msg_data_len)
{
/* u64 - the size of the header */
u16 msg_size;
msg_size = (u16)(SSS_MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len);
if (msg_size > SSS_MGMT_MSG_SIZE_MIN)
msg_size = SSS_MGMT_MSG_SIZE_MIN +
ALIGN((msg_size - SSS_MGMT_MSG_SIZE_MIN), SSS_MGMT_MSG_SIZE_STEP);
else
msg_size = SSS_MGMT_MSG_SIZE_MIN;
return msg_size;
}
static void sss_encapsulate_adm_msg(u8 *adm_msg, u64 *header,
const void *body, int body_len)
{
u8 *adm_msg_new = adm_msg;
memset(adm_msg_new, 0, SSS_MGMT_MSG_RSVD_FOR_DEV);
adm_msg_new += SSS_MGMT_MSG_RSVD_FOR_DEV;
memcpy(adm_msg_new, header, sizeof(*header));
adm_msg_new += sizeof(*header);
memcpy(adm_msg_new, body, (size_t)(u32)body_len);
}
#define SSS_MAX_PF_MGMT_BUF_MAX 2048L
int sss_adm_msg_read_ack(void *hwdev, u8 dest, const void *cmd,
u16 size, void *ack, u16 ack_size)
{
struct sss_msg_pf_to_mgmt *pf_to_mgmt = NULL;
struct sss_adm_msg *adm_mag = NULL;
if (!hwdev || !cmd || (ack_size && !ack) || size > SSS_MAX_PF_MGMT_BUF_MAX)
return -EINVAL;
if (!SSS_SUPPORT_ADM_MSG((struct sss_hwdev *)hwdev))
return -EPERM;
pf_to_mgmt = ((struct sss_hwdev *)hwdev)->pf_to_mgmt;
adm_mag = pf_to_mgmt->adm_msg[SSS_ADM_MSG_POLL_READ];
if (!(((struct sss_hwdev *)hwdev)->chip_present_flag))
return -EPERM;
return sss_adm_msg_read(adm_mag, dest, cmd, size, ack, ack_size);
}
int sss_adm_msg_write_nack(void *hwdev, u8 dest, const void *cmd, u16 size)
{
struct sss_msg_pf_to_mgmt *pf_to_mgmt = NULL;
struct sss_adm_msg *adm_mag = NULL;
if (!hwdev || !size || !cmd || size > SSS_MAX_PF_MGMT_BUF_MAX)
return -EINVAL;
if (!SSS_SUPPORT_ADM_MSG((struct sss_hwdev *)hwdev))
return -EPERM;
pf_to_mgmt = ((struct sss_hwdev *)hwdev)->pf_to_mgmt;
adm_mag = pf_to_mgmt->adm_msg[SSS_ADM_MSG_POLL_WRITE];
if (!(((struct sss_hwdev *)hwdev)->chip_present_flag))
return -EPERM;
return sss_adm_msg_write(adm_mag, dest, cmd, size);
}
#define SSS_MSG_NO_RESP 0xFFFF
static int sss_send_adm_msg(struct sss_msg_pf_to_mgmt *pf_to_mgmt,
u8 mod, u16 cmd, const void *msg_body, u16 msg_body_len)
{
struct sss_hwif *hwif = SSS_TO_HWDEV(pf_to_mgmt)->hwif;
void *msg_buf = pf_to_mgmt->sync_buf;
u16 adm_msg_len = sss_align_adm_msg_len(msg_body_len);
u32 func_id = SSS_GET_HWIF_GLOBAL_ID(hwif);
u8 node_id = SSS_MGMT_CPU_NODE_ID(SSS_TO_HWDEV(pf_to_mgmt));
u64 header;
struct sss_adm_msg *adm_mag;
if (sss_get_dev_present_flag(pf_to_mgmt->hwdev) == 0)
return -EFAULT;
if (adm_msg_len > SSS_MSG_TO_MGMT_LEN_MAX)
return -EFAULT;
sss_set_adm_event_flag(pf_to_mgmt, SSS_ADM_EVENT_START);
header = SSS_ENCAPSULATE_ADM_MSG_HEAD(func_id, msg_body_len, mod,
cmd, SSS_INCREASE_SYNC_MSG_ID(pf_to_mgmt));
sss_encapsulate_adm_msg((u8 *)msg_buf, &header, msg_body, msg_body_len);
adm_mag = pf_to_mgmt->adm_msg[SSS_ADM_MSG_WRITE_TO_MGMT_MODULE];
return sss_adm_msg_write(adm_mag, node_id, msg_buf, adm_msg_len);
}
static inline void sss_check_msg_body(u8 mod, void *buf_in)
{
struct sss_msg_head *msg_head = NULL;
/* set aeq fix num to 3, need to ensure response aeq id < 3 */
if (mod == SSS_MOD_TYPE_COMM || mod == SSS_MOD_TYPE_L2NIC) {
msg_head = buf_in;
if (msg_head->reply_aeq_num >= SSS_MAX_AEQ)
msg_head->reply_aeq_num = 0;
}
}
int sss_sync_send_adm_msg(void *hwdev, u8 mod, u16 cmd, void *buf_in,
u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
{
struct sss_msg_pf_to_mgmt *pf_to_mgmt = NULL;
void *dev = ((struct sss_hwdev *)hwdev)->dev_hdl;
struct sss_recv_msg *recv_msg = NULL;
struct completion *recv_done = NULL;
ulong timeo;
int err;
ulong ret;
if (!SSS_SUPPORT_ADM_MSG((struct sss_hwdev *)hwdev))
return -EPERM;
sss_check_msg_body(mod, buf_in);
pf_to_mgmt = ((struct sss_hwdev *)hwdev)->pf_to_mgmt;
/* Lock the sync_buf */
down(&pf_to_mgmt->sync_lock);
recv_msg = &pf_to_mgmt->recv_resp_msg;
recv_done = &recv_msg->done;
init_completion(recv_done);
err = sss_send_adm_msg(pf_to_mgmt, mod, cmd, buf_in, in_size);
if (err != 0) {
sdk_err(dev, "Fail to send adm msg to mgmt, sync_msg_id: %u\n",
pf_to_mgmt->sync_msg_id);
sss_set_adm_event_flag(pf_to_mgmt, SSS_ADM_EVENT_FAIL);
goto unlock_sync_msg;
}
timeo = msecs_to_jiffies(timeout ? timeout : SSS_MGMT_MSG_TIMEOUT);
ret = wait_for_completion_timeout(recv_done, timeo);
if (ret == 0) {
sdk_err(dev, "Mgmt response sync cmd timeout, sync_msg_id: %u\n",
pf_to_mgmt->sync_msg_id);
sss_dump_aeq_info((struct sss_hwdev *)hwdev);
err = -ETIMEDOUT;
sss_set_adm_event_flag(pf_to_mgmt, SSS_ADM_EVENT_TIMEOUT);
goto unlock_sync_msg;
}
spin_lock(&pf_to_mgmt->sync_event_lock);
if (pf_to_mgmt->event_state == SSS_ADM_EVENT_TIMEOUT) {
spin_unlock(&pf_to_mgmt->sync_event_lock);
err = -ETIMEDOUT;
goto unlock_sync_msg;
}
spin_unlock(&pf_to_mgmt->sync_event_lock);
sss_set_adm_event_flag(pf_to_mgmt, SSS_ADM_EVENT_END);
if (!(((struct sss_hwdev *)hwdev)->chip_present_flag)) {
destroy_completion(recv_done);
up(&pf_to_mgmt->sync_lock);
return -ETIMEDOUT;
}
if (buf_out && out_size) {
if (*out_size < recv_msg->buf_len) {
sdk_err(dev,
"Invalid resp msg len: %u out of range: %u, mod %d, cmd %u\n",
recv_msg->buf_len, *out_size, mod, cmd);
err = -EFAULT;
goto unlock_sync_msg;
}
if (recv_msg->buf_len)
memcpy(buf_out, recv_msg->buf, recv_msg->buf_len);
*out_size = recv_msg->buf_len;
}
unlock_sync_msg:
destroy_completion(recv_done);
up(&pf_to_mgmt->sync_lock);
return err;
}
int sss_register_mgmt_msg_handler(void *hwdev, u8 mod_type, void *data,
sss_mgmt_msg_handler_t handler)
{
struct sss_msg_pf_to_mgmt *mgmt_msg = NULL;
if (!hwdev || mod_type >= SSS_MOD_TYPE_HW_MAX)
return -EFAULT;
mgmt_msg = ((struct sss_hwdev *)hwdev)->pf_to_mgmt;
if (!mgmt_msg)
return -EINVAL;
mgmt_msg->recv_data[mod_type] = data;
mgmt_msg->recv_handler[mod_type] = handler;
set_bit(SSS_CALLBACK_REG, &mgmt_msg->recv_handler_state[mod_type]);
return 0;
}
EXPORT_SYMBOL(sss_register_mgmt_msg_handler);
void sss_unregister_mgmt_msg_handler(void *hwdev, u8 mod_type)
{
struct sss_msg_pf_to_mgmt *mgmt_msg = NULL;
if (!hwdev || mod_type >= SSS_MOD_TYPE_HW_MAX)
return;
mgmt_msg = ((struct sss_hwdev *)hwdev)->pf_to_mgmt;
if (!mgmt_msg)
return;
clear_bit(SSS_CALLBACK_REG, &mgmt_msg->recv_handler_state[mod_type]);
while (test_bit(SSS_CALLBACK_RUNNING, &mgmt_msg->recv_handler_state[mod_type]))
usleep_range(SSS_MSG_CB_USLEEP_MIN, SSS_MSG_CB_USLEEP_MAX);
mgmt_msg->recv_data[mod_type] = NULL;
mgmt_msg->recv_handler[mod_type] = NULL;
}
EXPORT_SYMBOL(sss_unregister_mgmt_msg_handler);

View File

@ -0,0 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HWIF_ADM_H
#define SSS_HWIF_ADM_H
#include <linux/types.h>
int sss_adm_msg_read_ack(void *hwdev, u8 dest, const void *cmd,
u16 size, void *ack, u16 ack_size);
int sss_adm_msg_write_nack(void *hwdev, u8 dest, const void *cmd, u16 size);
int sss_sync_send_adm_msg(void *hwdev, u8 mod, u16 cmd, void *buf_in,
u16 in_size, void *buf_out, u16 *out_size, u32 timeout);
#endif

View File

@ -0,0 +1,79 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HWIF_ADM_COMMON_H
#define SSS_HWIF_ADM_COMMON_H
#define SSS_ADM_MSG_AEQ_ID 2
#define SSS_WRITE_ADM_MSG_PRIV_DATA(id) (((u8)(id)) << 16)
#define SSS_READ_ADM_MSG_PRIV_DATA(id, token) ((((u32)(id)) << 16) + (token))
#define SSS_MASK_ID(adm_msg, id) \
((id) & ((adm_msg)->elem_num - 1))
#define SSS_SIZE_TO_4B(size) \
(ALIGN((u32)(size), 4U) >> 2)
#define SSS_SIZE_TO_8B(size) \
(ALIGN((u32)(size), 8U) >> 3)
/* ADM_STATUS_0 CSR: 0x0030+adm msg id*0x080 */
#define SSS_ADM_MSG_STATE_CI_MASK 0xFFFFFFU
#define SSS_ADM_MSG_STATE_CI_SHIFT 0
#define SSS_ADM_MSG_STATE_FSM_MASK 0xFU
#define SSS_ADM_MSG_STATE_FSM_SHIFT 24
#define SSS_ADM_MSG_STATE_CHKSUM_ERR_MASK 0x3U
#define SSS_ADM_MSG_STATE_CHKSUM_ERR_SHIFT 28
#define SSS_ADM_MSG_STATE_CPLD_ERR_MASK 0x1U
#define SSS_ADM_MSG_STATE_CPLD_ERR_SHIFT 30
#define SSS_GET_ADM_MSG_STATE(val, member) \
(((val) >> SSS_ADM_MSG_STATE_##member##_SHIFT) & \
SSS_ADM_MSG_STATE_##member##_MASK)
/* adm_msg_elem.desc structure */
#define SSS_ADM_MSG_DESC_SGL_TYPE_SHIFT 0
#define SSS_ADM_MSG_DESC_RD_WR_SHIFT 1
#define SSS_ADM_MSG_DESC_MGMT_BYPASS_SHIFT 2
#define SSS_ADM_MSG_DESC_REPLY_AEQE_EN_SHIFT 3
#define SSS_ADM_MSG_DESC_MSG_VALID_SHIFT 4
#define SSS_ADM_MSG_DESC_MSG_CHANNEL_SHIFT 6
#define SSS_ADM_MSG_DESC_PRIV_DATA_SHIFT 8
#define SSS_ADM_MSG_DESC_DEST_SHIFT 32
#define SSS_ADM_MSG_DESC_SIZE_SHIFT 40
#define SSS_ADM_MSG_DESC_XOR_CHKSUM_SHIFT 56
#define SSS_ADM_MSG_DESC_SGL_TYPE_MASK 0x1U
#define SSS_ADM_MSG_DESC_RD_WR_MASK 0x1U
#define SSS_ADM_MSG_DESC_MGMT_BYPASS_MASK 0x1U
#define SSS_ADM_MSG_DESC_REPLY_AEQE_EN_MASK 0x1U
#define SSS_ADM_MSG_DESC_MSG_VALID_MASK 0x3U
#define SSS_ADM_MSG_DESC_MSG_CHANNEL_MASK 0x3U
#define SSS_ADM_MSG_DESC_PRIV_DATA_MASK 0xFFFFFFU
#define SSS_ADM_MSG_DESC_DEST_MASK 0x1FU
#define SSS_ADM_MSG_DESC_SIZE_MASK 0x7FFU
#define SSS_ADM_MSG_DESC_XOR_CHKSUM_MASK 0xFFU
#define SSS_ADM_MSG_DESC_SET(val, member) \
((((u64)(val)) & SSS_ADM_MSG_DESC_##member##_MASK) << \
SSS_ADM_MSG_DESC_##member##_SHIFT)
/* adm_msg_elem structure */
#define SSS_ADM_MSG_ELEM_CTRL_ELEM_LEN_SHIFT 0
#define SSS_ADM_MSG_ELEM_CTRL_RD_DMA_ATTR_OFF_SHIFT 16
#define SSS_ADM_MSG_ELEM_CTRL_WR_DMA_ATTR_OFF_SHIFT 24
#define SSS_ADM_MSG_ELEM_CTRL_XOR_CHKSUM_SHIFT 56
#define SSS_ADM_MSG_ELEM_CTRL_ELEM_LEN_MASK 0x3FU
#define SSS_ADM_MSG_ELEM_CTRL_RD_DMA_ATTR_OFF_MASK 0x3FU
#define SSS_ADM_MSG_ELEM_CTRL_WR_DMA_ATTR_OFF_MASK 0x3FU
#define SSS_ADM_MSG_ELEM_CTRL_XOR_CHKSUM_MASK 0xFFU
#define SSS_ADM_MSG_ELEM_CTRL_SET(val, member) \
((((u64)(val)) & SSS_ADM_MSG_ELEM_CTRL_##member##_MASK) << \
SSS_ADM_MSG_ELEM_CTRL_##member##_SHIFT)
#endif

View File

@ -0,0 +1,763 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/completion.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/semaphore.h>
#include <linux/jiffies.h>
#include <linux/delay.h>
#include "sss_kernel.h"
#include "sss_hw.h"
#include "sss_common.h"
#include "sss_hwdev.h"
#include "sss_csr.h"
#include "sss_hwif_api.h"
#include "sss_hwif_adm_common.h"
#include "sss_hwif_mgmt_common.h"
/* ADM_MSG_REQ CSR: 0x0020+adm_id*0x080 */
#define SSS_ADM_MSG_REQ_RESTART_SHIFT 1
#define SSS_ADM_MSG_REQ_WB_TRIGGER_SHIFT 2
#define SSS_ADM_MSG_REQ_RESTART_MASK 0x1U
#define SSS_ADM_MSG_REQ_WB_TRIGGER_MASK 0x1U
#define SSS_SET_ADM_MSG_REQ(val, member) \
(((val) & SSS_ADM_MSG_REQ_##member##_MASK) << \
SSS_ADM_MSG_REQ_##member##_SHIFT)
#define SSS_GET_ADM_MSG_REQ(val, member) \
(((val) >> SSS_ADM_MSG_REQ_##member##_SHIFT) & \
SSS_ADM_MSG_REQ_##member##_MASK)
#define SSS_CLEAR_ADM_MSG_REQ(val, member) \
((val) & (~(SSS_ADM_MSG_REQ_##member##_MASK \
<< SSS_ADM_MSG_REQ_##member##_SHIFT)))
/* ADM_MSG_CTRL CSR: 0x0014+adm_id*0x080 */
#define SSS_ADM_MSG_CTRL_RESTART_EN_SHIFT 1
#define SSS_ADM_MSG_CTRL_XOR_ERR_SHIFT 2
#define SSS_ADM_MSG_CTRL_AEQE_EN_SHIFT 4
#define SSS_ADM_MSG_CTRL_AEQ_ID_SHIFT 8
#define SSS_ADM_MSG_CTRL_XOR_CHK_EN_SHIFT 28
#define SSS_ADM_MSG_CTRL_ELEM_SIZE_SHIFT 30
#define SSS_ADM_MSG_CTRL_RESTART_EN_MASK 0x1U
#define SSS_ADM_MSG_CTRL_XOR_ERR_MASK 0x1U
#define SSS_ADM_MSG_CTRL_AEQE_EN_MASK 0x1U
#define SSS_ADM_MSG_CTRL_AEQ_ID_MASK 0x3U
#define SSS_ADM_MSG_CTRL_XOR_CHK_EN_MASK 0x3U
#define SSS_ADM_MSG_CTRL_ELEM_SIZE_MASK 0x3U
#define SSS_SET_ADM_MSG_CTRL(val, member) \
(((val) & SSS_ADM_MSG_CTRL_##member##_MASK) << \
SSS_ADM_MSG_CTRL_##member##_SHIFT)
#define SSS_CLEAR_ADM_MSG_CTRL(val, member) \
((val) & (~(SSS_ADM_MSG_CTRL_##member##_MASK \
<< SSS_ADM_MSG_CTRL_##member##_SHIFT)))
#define SSS_ADM_MSG_BUF_SIZE 2048ULL
#define SSS_ADM_MSG_NODE_ALIGN_SIZE 512ULL
#define SSS_ADM_MSG_PAYLOAD_ALIGN_SIZE 64ULL
#define SSS_ADM_MSG_REPLY_ALIGNMENT 128ULL
#define SSS_ADM_MSG_TIMEOUT 10000
#define SSS_ADM_MSG_ELEM_SIZE_SHIFT 6U
#define SSS_ADM_MSG_ELEM_NUM 32
#define SSS_ADM_MSG_ELEM_SIZE 128
#define SSS_ADM_MSG_REPLY_DATA_SIZE 128
#define SSS_MGMT_WQ_NAME "sssnic_mgmt"
#define SSS_GET_ADM_MSG_ELEM_PADDR(adm_msg, elem_id) \
((adm_msg)->elem_paddr_base + (adm_msg)->elem_size_align * (elem_id))
#define SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, elem_id) \
((adm_msg)->elem_vaddr_base + (adm_msg)->elem_size_align * (elem_id))
#define SSS_GET_ADM_MSG_BUF_PADDR(adm_msg, elem_id) \
((adm_msg)->buf_paddr_base + (adm_msg)->buf_size_align * (elem_id))
#define SSS_GET_ADM_MSG_BUF_VADDR(adm_msg, elem_id) \
((adm_msg)->buf_vaddr_base + (adm_msg)->buf_size_align * (elem_id))
#define SSS_GET_ADM_MSG_REPLY_PADDR(adm_msg, elem_id) \
((adm_msg)->reply_paddr_base + (adm_msg)->reply_size_align * (elem_id))
#define SSS_GET_ADM_MSG_REPLY_VADDR(adm_msg, elem_id) \
((adm_msg)->reply_vaddr_base + (adm_msg)->reply_size_align * (elem_id))
typedef void (*sss_alloc_elem_buf_handler_t)(struct sss_adm_msg *adm_msg, u32 elem_id);
struct sss_adm_msg_attr {
struct sss_hwdev *hwdev;
enum sss_adm_msg_type msg_type;
u32 elem_num;
u16 reply_size;
u16 elem_size;
};
static enum sss_process_ret sss_adm_msg_reset_handler(void *priv_data)
{
u32 val;
u32 addr;
struct sss_adm_msg *adm_msg = priv_data;
if (!SSS_TO_HWDEV(adm_msg)->chip_present_flag)
return SSS_PROCESS_ERR;
addr = SSS_CSR_ADM_MSG_REQ_ADDR(adm_msg->msg_type);
val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr);
if (!SSS_GET_ADM_MSG_REQ(val, RESTART))
return SSS_PROCESS_OK;
return SSS_PROCESS_DOING;
}
static enum sss_process_ret sss_adm_msg_ready_handler(void *priv_data)
{
u32 val;
u32 addr;
struct sss_adm_msg *adm_msg = priv_data;
if (!SSS_TO_HWDEV(adm_msg)->chip_present_flag)
return SSS_PROCESS_ERR;
addr = SSS_CSR_ADM_MSG_STATE_0_ADDR(adm_msg->msg_type);
val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr);
if (SSS_GET_ADM_MSG_STATE(val, CI) == adm_msg->ci)
return SSS_PROCESS_OK;
return SSS_PROCESS_DOING;
}
static void sss_chip_clean_adm_msg(struct sss_adm_msg *adm_msg)
{
u32 val;
u32 addr = SSS_CSR_ADM_MSG_CTRL_ADDR(adm_msg->msg_type);
val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr);
val = SSS_CLEAR_ADM_MSG_CTRL(val, RESTART_EN) &
SSS_CLEAR_ADM_MSG_CTRL(val, XOR_ERR) &
SSS_CLEAR_ADM_MSG_CTRL(val, AEQE_EN) &
SSS_CLEAR_ADM_MSG_CTRL(val, XOR_CHK_EN) &
SSS_CLEAR_ADM_MSG_CTRL(val, ELEM_SIZE);
sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val);
}
static void sss_chip_set_adm_msg_wb_addr(struct sss_adm_msg *adm_msg)
{
u32 val;
u32 addr;
addr = SSS_CSR_ADM_MSG_STATE_HI_ADDR(adm_msg->msg_type);
val = upper_32_bits(adm_msg->wb_state_paddr);
sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val);
addr = SSS_CSR_ADM_MSG_STATE_LO_ADDR(adm_msg->msg_type);
val = lower_32_bits(adm_msg->wb_state_paddr);
sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val);
}
static int sss_chip_reset_adm_msg(struct sss_adm_msg *adm_msg)
{
u32 val;
u32 addr;
addr = SSS_CSR_ADM_MSG_REQ_ADDR(adm_msg->msg_type);
val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr);
val = SSS_CLEAR_ADM_MSG_REQ(val, RESTART);
val |= SSS_SET_ADM_MSG_REQ(1, RESTART);
sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val);
return sss_check_handler_timeout(adm_msg, sss_adm_msg_reset_handler,
SSS_ADM_MSG_TIMEOUT, USEC_PER_MSEC);
}
static void sss_chip_init_elem_size(struct sss_adm_msg *adm_msg)
{
u32 val;
u32 addr;
u32 size;
addr = SSS_CSR_ADM_MSG_CTRL_ADDR(adm_msg->msg_type);
val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr);
val = SSS_CLEAR_ADM_MSG_CTRL(val, AEQE_EN) &
SSS_CLEAR_ADM_MSG_CTRL(val, ELEM_SIZE);
size = (u32)ilog2(adm_msg->elem_size >> SSS_ADM_MSG_ELEM_SIZE_SHIFT);
val |= SSS_SET_ADM_MSG_CTRL(0, AEQE_EN) |
SSS_SET_ADM_MSG_CTRL(size, ELEM_SIZE);
sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val);
}
static void sss_chip_set_elem_num(struct sss_adm_msg *adm_msg)
{
u32 addr;
addr = SSS_CSR_ADM_MSG_NUM_ELEM_ADDR(adm_msg->msg_type);
sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, adm_msg->elem_num);
}
static void sss_chip_init_elem_head(struct sss_adm_msg *adm_msg)
{
u32 val;
u32 addr;
addr = SSS_CSR_ADM_MSG_HEAD_HI_ADDR(adm_msg->msg_type);
val = upper_32_bits(adm_msg->head_elem_paddr);
sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val);
addr = SSS_CSR_ADM_MSG_HEAD_LO_ADDR(adm_msg->msg_type);
val = lower_32_bits(adm_msg->head_elem_paddr);
sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val);
}
static int sss_wait_adm_msg_ready(struct sss_adm_msg *adm_msg)
{
return sss_check_handler_timeout(adm_msg, sss_adm_msg_ready_handler,
SSS_ADM_MSG_TIMEOUT, USEC_PER_MSEC);
}
static int sss_chip_init_adm_msg(struct sss_adm_msg *adm_msg)
{
sss_chip_clean_adm_msg(adm_msg);
sss_chip_set_adm_msg_wb_addr(adm_msg);
if (sss_chip_reset_adm_msg(adm_msg) != 0) {
sdk_err(SSS_TO_HWDEV(adm_msg)->dev_hdl, "Fail to restart adm cmd\n");
return -EBUSY;
}
sss_chip_init_elem_size(adm_msg);
sss_chip_set_elem_num(adm_msg);
sss_chip_init_elem_head(adm_msg);
return sss_wait_adm_msg_ready(adm_msg);
}
static void sss_init_ctx_buf_addr(struct sss_adm_msg *adm_msg,
u32 elem_id)
{
u64 paddr;
void *vaddr;
struct sss_adm_msg_elem_ctx *ctx = &adm_msg->elem_ctx[elem_id];
struct sss_adm_msg_elem *elem = NULL;
vaddr = (u8 *)SSS_GET_ADM_MSG_BUF_VADDR(adm_msg, elem_id);
paddr = SSS_GET_ADM_MSG_BUF_PADDR(adm_msg, elem_id);
ctx->adm_msg_vaddr = vaddr;
elem =
(struct sss_adm_msg_elem *)SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, elem_id);
elem->write.hw_msg_paddr = cpu_to_be64(paddr);
}
static void sss_init_ctx_reply_addr(struct sss_adm_msg *adm_msg,
u32 elem_id)
{
u64 paddr;
void *vaddr;
struct sss_adm_msg_elem_ctx *ctx = &adm_msg->elem_ctx[elem_id];
struct sss_adm_msg_elem *elem = NULL;
paddr = SSS_GET_ADM_MSG_REPLY_PADDR(adm_msg, elem_id);
vaddr = (u8 *)SSS_GET_ADM_MSG_REPLY_VADDR(adm_msg, elem_id);
elem =
(struct sss_adm_msg_elem *)SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, elem_id);
elem->read.hw_wb_reply_paddr = cpu_to_be64(paddr);
ctx->reply_fmt = vaddr;
ctx->adm_msg_vaddr = &elem->read.hw_msg_paddr;
}
static void sss_init_ctx_buf_reply_addr(struct sss_adm_msg *adm_msg,
u32 elem_id)
{
u64 buf_paddr;
void *buf_vaddr;
void *rsp_vaddr;
struct sss_adm_msg_elem_ctx *ctx = &adm_msg->elem_ctx[elem_id];
struct sss_adm_msg_elem *elem = NULL;
rsp_vaddr = (u8 *)SSS_GET_ADM_MSG_REPLY_VADDR(adm_msg, elem_id);
buf_paddr = SSS_GET_ADM_MSG_BUF_PADDR(adm_msg, elem_id);
buf_vaddr = (u8 *)SSS_GET_ADM_MSG_BUF_VADDR(adm_msg, elem_id);
elem =
(struct sss_adm_msg_elem *)SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, elem_id);
ctx->reply_fmt = rsp_vaddr;
ctx->adm_msg_vaddr = buf_vaddr;
elem->read.hw_msg_paddr = cpu_to_be64(buf_paddr);
}
static void sss_alloc_reply_buf(struct sss_adm_msg *adm_msg,
struct sss_adm_msg_elem *elem, u32 cell_idx)
{
struct sss_adm_msg_elem_ctx *ctx = NULL;
void *resp_vaddr;
u64 resp_paddr;
resp_vaddr = (u8 *)((u64)adm_msg->reply_vaddr_base +
adm_msg->reply_size_align * cell_idx);
resp_paddr = adm_msg->reply_paddr_base +
adm_msg->reply_size_align * cell_idx;
ctx = &adm_msg->elem_ctx[cell_idx];
ctx->reply_fmt = resp_vaddr;
elem->read.hw_wb_reply_paddr = cpu_to_be64(resp_paddr);
}
static int sss_init_elem_ctx(struct sss_adm_msg *adm_msg, u32 elem_id)
{
struct sss_adm_msg_elem_ctx *ctx = NULL;
struct sss_adm_msg_elem *elem;
sss_alloc_elem_buf_handler_t handler[] = {
NULL,
NULL,
sss_init_ctx_buf_addr,
sss_init_ctx_reply_addr,
sss_init_ctx_buf_addr,
sss_init_ctx_buf_reply_addr,
sss_init_ctx_buf_addr
};
elem = (struct sss_adm_msg_elem *)SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, elem_id);
if (adm_msg->msg_type == SSS_ADM_MSG_MULTI_READ ||
adm_msg->msg_type == SSS_ADM_MSG_POLL_READ)
sss_alloc_reply_buf(adm_msg, elem, elem_id);
ctx = &adm_msg->elem_ctx[elem_id];
ctx->elem_vaddr =
(struct sss_adm_msg_elem *)SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, elem_id);
ctx->hwdev = adm_msg->hwdev;
if (adm_msg->msg_type >= ARRAY_LEN(handler))
goto out;
if (!handler[adm_msg->msg_type])
goto out;
handler[adm_msg->msg_type](adm_msg, elem_id);
return 0;
out:
sdk_err(SSS_TO_HWDEV(adm_msg)->dev_hdl, "Unsupport adm msg type %u\n", adm_msg->msg_type);
return -EINVAL;
}
static int sss_init_adm_msg_elem(struct sss_adm_msg *adm_msg)
{
u32 i;
u64 paddr;
void *vaddr;
struct sss_adm_msg_elem *elem = NULL;
struct sss_adm_msg_elem *pre_elt = NULL;
int ret;
for (i = 0; i < adm_msg->elem_num; i++) {
ret = sss_init_elem_ctx(adm_msg, i);
if (ret != 0)
return ret;
paddr = SSS_GET_ADM_MSG_ELEM_PADDR(adm_msg, i);
vaddr = SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, i);
if (!pre_elt) {
adm_msg->head_node = vaddr;
adm_msg->head_elem_paddr = (dma_addr_t)paddr;
} else {
pre_elt->next_elem_paddr = cpu_to_be64(paddr);
}
elem = vaddr;
elem->next_elem_paddr = 0;
pre_elt = elem;
}
elem->next_elem_paddr = cpu_to_be64(adm_msg->head_elem_paddr);
adm_msg->now_node = adm_msg->head_node;
return 0;
}
static int sss_alloc_adm_msg_ctx(struct sss_adm_msg *adm_msg)
{
size_t ctx_size;
ctx_size = adm_msg->elem_num * sizeof(*adm_msg->elem_ctx);
adm_msg->elem_ctx = kzalloc(ctx_size, GFP_KERNEL);
if (!adm_msg->elem_ctx)
return -ENOMEM;
return 0;
}
static void sss_free_adm_msg_ctx(struct sss_adm_msg *adm_msg)
{
kfree(adm_msg->elem_ctx);
adm_msg->elem_ctx = NULL;
}
static int sss_alloc_adm_msg_wb_state(struct sss_adm_msg *adm_msg)
{
void *dev_hdl = SSS_TO_HWDEV(adm_msg)->dev_hdl;
adm_msg->wb_state = dma_zalloc_coherent(dev_hdl, sizeof(*adm_msg->wb_state),
&adm_msg->wb_state_paddr, GFP_KERNEL);
if (!adm_msg->wb_state) {
sdk_err(dev_hdl, "Fail to alloc dma wb status\n");
return -ENOMEM;
}
return 0;
}
static void sss_free_adm_msg_wb_state(struct sss_adm_msg *adm_msg)
{
void *dev_hdl = SSS_TO_HWDEV(adm_msg)->dev_hdl;
dma_free_coherent(dev_hdl, sizeof(*adm_msg->wb_state),
adm_msg->wb_state, adm_msg->wb_state_paddr);
}
static int sss_alloc_elem_buf(struct sss_adm_msg *adm_msg)
{
int ret;
size_t buf_size;
void *dev_hdl = SSS_TO_HWDEV(adm_msg)->dev_hdl;
adm_msg->buf_size_align = ALIGN(SSS_ADM_MSG_BUF_SIZE,
SSS_ADM_MSG_PAYLOAD_ALIGN_SIZE);
adm_msg->elem_size_align = ALIGN((u64)adm_msg->elem_size,
SSS_ADM_MSG_NODE_ALIGN_SIZE);
adm_msg->reply_size_align = ALIGN((u64)adm_msg->reply_size,
SSS_ADM_MSG_REPLY_ALIGNMENT);
buf_size = (adm_msg->buf_size_align + adm_msg->elem_size_align +
adm_msg->reply_size_align) * adm_msg->elem_num;
ret = sss_dma_zalloc_coherent_align(dev_hdl, buf_size, SSS_ADM_MSG_NODE_ALIGN_SIZE,
GFP_KERNEL, &adm_msg->elem_addr);
if (ret != 0) {
sdk_err(dev_hdl, "Fail to alloc adm msg elem buffer\n");
return ret;
}
adm_msg->elem_vaddr_base = adm_msg->elem_addr.align_vaddr;
adm_msg->elem_paddr_base = adm_msg->elem_addr.align_paddr;
adm_msg->reply_vaddr_base = (u8 *)((u64)adm_msg->elem_vaddr_base +
adm_msg->elem_size_align * adm_msg->elem_num);
adm_msg->reply_paddr_base = adm_msg->elem_paddr_base +
adm_msg->elem_size_align * adm_msg->elem_num;
adm_msg->buf_vaddr_base = (u8 *)((u64)adm_msg->reply_vaddr_base +
adm_msg->reply_size_align * adm_msg->elem_num);
adm_msg->buf_paddr_base = adm_msg->reply_paddr_base +
adm_msg->reply_size_align * adm_msg->elem_num;
return 0;
}
static void sss_free_elem_buf(struct sss_adm_msg *adm_msg)
{
void *dev_hdl = SSS_TO_HWDEV(adm_msg)->dev_hdl;
sss_dma_free_coherent_align(dev_hdl, &adm_msg->elem_addr);
}
static int sss_alloc_adm_msg_buf(struct sss_adm_msg *adm_msg)
{
int ret;
ret = sss_alloc_adm_msg_ctx(adm_msg);
if (ret != 0)
return ret;
ret = sss_alloc_adm_msg_wb_state(adm_msg);
if (ret != 0)
goto alloc_wb_err;
ret = sss_alloc_elem_buf(adm_msg);
if (ret != 0)
goto alloc_elem_buf_err;
return 0;
alloc_elem_buf_err:
sss_free_adm_msg_wb_state(adm_msg);
alloc_wb_err:
sss_free_adm_msg_ctx(adm_msg);
return ret;
}
static void sss_free_adm_msg_buf(struct sss_adm_msg *adm_msg)
{
sss_free_elem_buf(adm_msg);
sss_free_adm_msg_wb_state(adm_msg);
sss_free_adm_msg_ctx(adm_msg);
}
static void sss_init_adm_msg_param(struct sss_adm_msg *adm_msg,
struct sss_hwdev *hwdev, u8 msg_type)
{
adm_msg->hwdev = hwdev;
adm_msg->elem_num = SSS_ADM_MSG_ELEM_NUM;
adm_msg->reply_size = SSS_ADM_MSG_REPLY_DATA_SIZE;
adm_msg->elem_size = SSS_ADM_MSG_ELEM_SIZE;
adm_msg->msg_type = msg_type;
adm_msg->pi = 0;
adm_msg->ci = 0;
if (adm_msg->msg_type == SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE)
spin_lock_init(&adm_msg->async_lock);
else
sema_init(&adm_msg->sem, 1);
}
static int create_adm_msg(struct sss_hwdev *hwdev, struct sss_adm_msg **adm_msg, u8 msg_type)
{
struct sss_adm_msg *msg;
int ret;
msg = kzalloc(sizeof(*msg), GFP_KERNEL);
if (!msg)
return -ENOMEM;
sss_init_adm_msg_param(msg, hwdev, msg_type);
ret = sss_alloc_adm_msg_buf(msg);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init adm msg buf\n");
return ret;
}
ret = sss_init_adm_msg_elem(msg);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init adm msg elem\n");
sss_free_adm_msg_buf(msg);
return ret;
}
ret = sss_chip_init_adm_msg(msg);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init adm msg\n");
sss_free_adm_msg_buf(msg);
return ret;
}
*adm_msg = msg;
return 0;
}
void sss_destroy_adm_msg(struct sss_adm_msg *adm_msg)
{
sss_free_adm_msg_buf(adm_msg);
kfree(adm_msg);
}
static int sss_init_adm_msg(struct sss_hwdev *hwdev,
struct sss_adm_msg **adm_msg)
{
int ret;
u8 i;
u8 adm_msg_type;
void *dev = ((struct sss_hwdev *)hwdev)->dev_hdl;
if (!SSS_SUPPORT_ADM_MSG(hwdev))
return 0;
for (adm_msg_type = SSS_ADM_MSG_WRITE_TO_MGMT_MODULE;
adm_msg_type < SSS_ADM_MSG_MAX; adm_msg_type++) {
ret = create_adm_msg(hwdev, &adm_msg[adm_msg_type], adm_msg_type);
if (ret) {
sdk_err(dev, "Failed to create adm msg %d\n", adm_msg_type);
goto create_adm_msg_err;
}
}
return 0;
create_adm_msg_err:
for (i = SSS_ADM_MSG_WRITE_TO_MGMT_MODULE; i < adm_msg_type; i++)
sss_destroy_adm_msg(hwdev->pf_to_mgmt->adm_msg[adm_msg_type]);
return ret;
}
static void sss_deinit_adm_msg(const struct sss_hwdev *hwdev,
struct sss_adm_msg **adm_msg)
{
u8 adm_msg_type;
if (!SSS_SUPPORT_ADM_MSG(hwdev))
return;
for (adm_msg_type = SSS_ADM_MSG_WRITE_TO_MGMT_MODULE;
adm_msg_type < SSS_ADM_MSG_MAX; adm_msg_type++)
sss_destroy_adm_msg(adm_msg[adm_msg_type]);
}
static int sss_alloc_msg_buf(struct sss_msg_pf_to_mgmt *mgmt_msg)
{
struct sss_recv_msg *recv_msg = &mgmt_msg->recv_msg;
struct sss_recv_msg *resp_msg = &mgmt_msg->recv_resp_msg;
recv_msg->seq_id = SSS_MGMT_SEQ_ID_MAX;
resp_msg->seq_id = SSS_MGMT_SEQ_ID_MAX;
recv_msg->buf = kzalloc(SSS_PF_MGMT_BUF_LEN_MAX, GFP_KERNEL);
if (!recv_msg->buf)
return -ENOMEM;
resp_msg->buf = kzalloc(SSS_PF_MGMT_BUF_LEN_MAX, GFP_KERNEL);
if (!resp_msg->buf)
goto alloc_resp_msg_err;
mgmt_msg->ack_buf = kzalloc(SSS_PF_MGMT_BUF_LEN_MAX, GFP_KERNEL);
if (!mgmt_msg->ack_buf)
goto alloc_ack_buf_err;
mgmt_msg->sync_buf = kzalloc(SSS_PF_MGMT_BUF_LEN_MAX, GFP_KERNEL);
if (!mgmt_msg->sync_buf)
goto alloc_sync_buf_err;
mgmt_msg->async_msg_buf = kzalloc(SSS_PF_MGMT_BUF_LEN_MAX, GFP_KERNEL);
if (!mgmt_msg->async_msg_buf)
goto alloc_async_msg_buf_err;
return 0;
alloc_async_msg_buf_err:
kfree(mgmt_msg->sync_buf);
mgmt_msg->sync_buf = NULL;
alloc_sync_buf_err:
kfree(mgmt_msg->ack_buf);
mgmt_msg->ack_buf = NULL;
alloc_ack_buf_err:
kfree(resp_msg->buf);
resp_msg->buf = NULL;
alloc_resp_msg_err:
kfree(recv_msg->buf);
recv_msg->buf = NULL;
return -ENOMEM;
}
static void sss_free_msg_buf(struct sss_msg_pf_to_mgmt *mgmt_msg)
{
struct sss_recv_msg *recv_msg = &mgmt_msg->recv_msg;
struct sss_recv_msg *resp_msg = &mgmt_msg->recv_resp_msg;
kfree(mgmt_msg->async_msg_buf);
kfree(mgmt_msg->sync_buf);
kfree(mgmt_msg->ack_buf);
kfree(resp_msg->buf);
kfree(recv_msg->buf);
}
int sss_hwif_init_adm(struct sss_hwdev *hwdev)
{
int ret;
struct sss_msg_pf_to_mgmt *mgmt_msg;
mgmt_msg = kzalloc(sizeof(*mgmt_msg), GFP_KERNEL);
if (!mgmt_msg)
return -ENOMEM;
spin_lock_init(&mgmt_msg->async_msg_lock);
spin_lock_init(&mgmt_msg->sync_event_lock);
sema_init(&mgmt_msg->sync_lock, 1);
mgmt_msg->hwdev = hwdev;
hwdev->pf_to_mgmt = mgmt_msg;
mgmt_msg->workq = create_singlethread_workqueue(SSS_MGMT_WQ_NAME);
if (!mgmt_msg->workq) {
sdk_err(hwdev->dev_hdl, "Fail to init mgmt workq\n");
ret = -ENOMEM;
goto alloc_mgmt_wq_err;
}
ret = sss_alloc_msg_buf(mgmt_msg);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to alloc msg buffer\n");
goto alloc_msg_buf_err;
}
ret = sss_init_adm_msg(hwdev, mgmt_msg->adm_msg);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init adm msg\n");
goto init_all_adm_err;
}
return 0;
init_all_adm_err:
sss_free_msg_buf(mgmt_msg);
alloc_msg_buf_err:
destroy_workqueue(mgmt_msg->workq);
alloc_mgmt_wq_err:
kfree(mgmt_msg);
hwdev->pf_to_mgmt = NULL;
return ret;
}
void sss_hwif_deinit_adm(struct sss_hwdev *hwdev)
{
struct sss_msg_pf_to_mgmt *mgmt_msg = hwdev->pf_to_mgmt;
destroy_workqueue(mgmt_msg->workq);
sss_deinit_adm_msg(hwdev, mgmt_msg->adm_msg);
sss_free_msg_buf(mgmt_msg);
kfree(mgmt_msg);
hwdev->pf_to_mgmt = NULL;
}
void sss_complete_adm_event(struct sss_hwdev *hwdev)
{
struct sss_recv_msg *recv_msg =
&hwdev->pf_to_mgmt->recv_resp_msg;
spin_lock_bh(&hwdev->pf_to_mgmt->sync_event_lock);
if (hwdev->pf_to_mgmt->event_state == SSS_ADM_EVENT_START) {
complete(&recv_msg->done);
hwdev->pf_to_mgmt->event_state = SSS_ADM_EVENT_TIMEOUT;
}
spin_unlock_bh(&hwdev->pf_to_mgmt->sync_event_lock);
}

View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HWIF_ADM_INIT_H
#define SSS_HWIF_ADM_INIT_H
#include "sss_hwdev.h"
int sss_hwif_init_adm(struct sss_hwdev *hwdev);
void sss_hwif_deinit_adm(struct sss_hwdev *hwdev);
void sss_complete_adm_event(struct sss_hwdev *hwdev);
#endif

View File

@ -0,0 +1,568 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include "sss_kernel.h"
#include "sss_hwdev.h"
#include "sss_eq_info.h"
#include "sss_hw_svc_cap.h"
#include "sss_hw_irq.h"
#include "sss_hw_aeq.h"
#include "sss_hw_export.h"
#include "sss_hwif_aeq.h"
#include "sss_hw_common.h"
#include "sss_hwif_eq.h"
#include "sss_hwif_api.h"
#include "sss_hwif_export.h"
#include "sss_csr.h"
#define SSS_DEF_AEQ_DEPTH 0x10000
#define SSS_MIN_AEQ_DEPTH 64
#define SSS_MAX_AEQ_DEPTH \
((SSS_MAX_EQ_PAGE_SIZE / SSS_AEQE_SIZE) * SSS_AEQ_MAX_PAGE)
#define SSS_AEQE_DESC_SIZE 4
#define SSS_AEQE_DATA_SIZE (SSS_AEQE_SIZE - SSS_AEQE_DESC_SIZE)
struct sss_aeq_elem {
u8 aeqe_data[SSS_AEQE_DATA_SIZE];
u32 desc;
};
#define SSS_GET_AEQ_ELEM(aeq, id) \
((struct sss_aeq_elem *)SSS_GET_EQ_ELEM((aeq), (id)))
#define SSS_GET_CUR_AEQ_ELEM(aeq) SSS_GET_AEQ_ELEM((aeq), (aeq)->ci)
#define SSS_GET_AEQ_SW_EVENT(type) \
(((type) >= SSS_ERR_MAX) ? \
SSS_STF_EVENT : SSS_STL_EVENT)
#define SSS_AEQ_CTRL_0_INTR_ID_SHIFT 0
#define SSS_AEQ_CTRL_0_DMA_ATTR_SHIFT 12
#define SSS_AEQ_CTRL_0_PCI_INTF_ID_SHIFT 20
#define SSS_AEQ_CTRL_0_INTR_MODE_SHIFT 31
#define SSS_AEQ_CTRL_0_INTR_ID_MASK 0x3FFU
#define SSS_AEQ_CTRL_0_DMA_ATTR_MASK 0x3FU
#define SSS_AEQ_CTRL_0_PCI_INTF_ID_MASK 0x7U
#define SSS_AEQ_CTRL_0_INTR_MODE_MASK 0x1U
#define SSS_SET_AEQ_CTRL_0(val, member) \
(((val) & SSS_AEQ_CTRL_0_##member##_MASK) << \
SSS_AEQ_CTRL_0_##member##_SHIFT)
#define SSS_CLEAR_AEQ_CTRL_0(val, member) \
((val) & (~(SSS_AEQ_CTRL_0_##member##_MASK << \
SSS_AEQ_CTRL_0_##member##_SHIFT)))
#define SSS_AEQ_CTRL_1_SIZE_SHIFT 0
#define SSS_AEQ_CTRL_1_ELEM_SIZE_SHIFT 24
#define SSS_AEQ_CTRL_1_PAGE_SIZE_SHIFT 28
#define SSS_AEQ_CTRL_1_SIZE_MASK 0x1FFFFFU
#define SSS_AEQ_CTRL_1_ELEM_SIZE_MASK 0x3U
#define SSS_AEQ_CTRL_1_PAGE_SIZE_MASK 0xFU
#define SSS_SET_AEQ_CTRL_1(val, member) \
(((val) & SSS_AEQ_CTRL_1_##member##_MASK) << \
SSS_AEQ_CTRL_1_##member##_SHIFT)
#define SSS_CLEAR_AEQ_CTRL_1(val, member) \
((val) & (~(SSS_AEQ_CTRL_1_##member##_MASK << \
SSS_AEQ_CTRL_1_##member##_SHIFT)))
#define SSS_ELEM_SIZE_IN_32B(aeq) (((aeq)->entry_size) >> 5)
#define SSS_SET_EQ_HW_E_SIZE(aeq) ((u32)ilog2(SSS_ELEM_SIZE_IN_32B(aeq)))
#define SSS_AEQ_WQ_NAME "sss_eqs"
#define SSS_AEQ_NAME "sss_aeq"
#define SSS_AEQ_TO_INFO(eq) \
container_of((eq) - (eq)->qid, struct sss_aeq_info, aeq[0])
#define SSS_AEQ_DMA_ATTR_DEF 0
enum sss_aeq_cb_state {
SSS_AEQ_HW_CB_REG = 0,
SSS_AEQ_HW_CB_RUNNING,
SSS_AEQ_SW_CB_REG,
SSS_AEQ_SW_CB_RUNNING,
};
static u32 aeq_depth = SSS_DEF_AEQ_DEPTH;
module_param(aeq_depth, uint, 0444);
MODULE_PARM_DESC(aeq_depth,
"aeq depth, valid range is " __stringify(SSS_MIN_AEQ_DEPTH)
" - " __stringify(SSS_MAX_AEQ_DEPTH));
static void sss_chip_set_aeq_intr(struct sss_eq *aeq)
{
u32 val;
struct sss_hwif *hwif = SSS_TO_HWDEV(aeq)->hwif;
val = sss_chip_read_reg(hwif, SSS_CSR_AEQ_CTRL_0_ADDR);
val = SSS_CLEAR_AEQ_CTRL_0(val, INTR_ID) &
SSS_CLEAR_AEQ_CTRL_0(val, DMA_ATTR) &
SSS_CLEAR_AEQ_CTRL_0(val, PCI_INTF_ID) &
SSS_CLEAR_AEQ_CTRL_0(val, INTR_MODE);
val |= SSS_SET_AEQ_CTRL_0(SSS_EQ_IRQ_ID(aeq), INTR_ID) |
SSS_SET_AEQ_CTRL_0(SSS_AEQ_DMA_ATTR_DEF, DMA_ATTR) |
SSS_SET_AEQ_CTRL_0(SSS_GET_HWIF_PCI_INTF_ID(hwif), PCI_INTF_ID) |
SSS_SET_AEQ_CTRL_0(SSS_INTR_MODE_ARMED, INTR_MODE);
sss_chip_write_reg(hwif, SSS_CSR_AEQ_CTRL_0_ADDR, val);
}
static void sss_chip_set_aeq_size(struct sss_eq *aeq)
{
u32 val;
struct sss_hwif *hwif = SSS_TO_HWDEV(aeq)->hwif;
val = SSS_SET_AEQ_CTRL_1(aeq->len, SIZE) |
SSS_SET_AEQ_CTRL_1(SSS_SET_EQ_HW_E_SIZE(aeq), ELEM_SIZE) |
SSS_SET_AEQ_CTRL_1(SSS_SET_EQ_HW_PAGE_SIZE(aeq), PAGE_SIZE);
sss_chip_write_reg(hwif, SSS_CSR_AEQ_CTRL_1_ADDR, val);
}
static u32 sss_chip_init_aeq_attr(void *aeq)
{
sss_chip_set_aeq_intr(aeq);
sss_chip_set_aeq_size(aeq);
return 0;
}
static void sss_init_aeqe_desc(void *data)
{
u32 i;
u32 init_val;
struct sss_aeq_elem *aeqe = NULL;
struct sss_eq *aeq = (struct sss_eq *)data;
init_val = cpu_to_be32(SSS_EQ_WRAPPED(aeq));
for (i = 0; i < aeq->len; i++) {
aeqe = SSS_GET_AEQ_ELEM(aeq, i);
aeqe->desc = init_val;
}
/* write all aeq desc */
wmb();
}
static irqreturn_t sss_aeq_intr_handle(int irq, void *data)
{
struct sss_eq *aeq = (struct sss_eq *)data;
struct sss_aeq_info *aeq_info = SSS_AEQ_TO_INFO(aeq);
sss_chip_clear_msix_resend_bit(aeq->hwdev, SSS_EQ_IRQ_ID(aeq),
SSS_EQ_MSIX_RESEND_TIMER_CLEAR);
queue_work_on(WORK_CPU_UNBOUND, aeq_info->workq, &aeq->aeq_work);
return IRQ_HANDLED;
}
static void sss_aeq_event_handle(struct sss_eq *aeq, u32 desc)
{
u32 size;
u32 event;
u8 data[SSS_AEQE_DATA_SIZE];
enum sss_aeq_hw_event hw_event;
enum sss_aeq_sw_event sw_event;
struct sss_aeq_info *aeq_info = SSS_AEQ_TO_INFO(aeq);
struct sss_aeq_elem *aeqe;
aeqe = SSS_GET_CUR_AEQ_ELEM(aeq);
hw_event = SSS_GET_EQE_DESC(desc, TYPE);
SSS_TO_HWDEV(aeq)->aeq_stat.cur_recv_cnt++;
if (SSS_GET_EQE_DESC(desc, SRC)) {
event = hw_event;
sw_event = SSS_GET_AEQ_SW_EVENT(event);
memcpy(data, aeqe->aeqe_data, SSS_AEQE_DATA_SIZE);
sss_be32_to_cpu(data, SSS_AEQE_DATA_SIZE);
set_bit(SSS_AEQ_SW_CB_RUNNING, &aeq_info->sw_event_handler_state[sw_event]);
if (aeq_info->sw_event_handler[sw_event] &&
test_bit(SSS_AEQ_SW_CB_REG, &aeq_info->sw_event_handler_state[sw_event]))
aeq_info->sw_event_handler[sw_event](aeq_info->sw_event_data[sw_event],
hw_event, data);
clear_bit(SSS_AEQ_SW_CB_RUNNING, &aeq_info->sw_event_handler_state[sw_event]);
return;
}
if (hw_event < SSS_AEQ_EVENT_MAX) {
memcpy(data, aeqe->aeqe_data, SSS_AEQE_DATA_SIZE);
sss_be32_to_cpu(data, SSS_AEQE_DATA_SIZE);
size = SSS_GET_EQE_DESC(desc, SIZE);
set_bit(SSS_AEQ_HW_CB_RUNNING, &aeq_info->hw_event_handler_state[hw_event]);
if (aeq_info->hw_event_handler[hw_event] &&
test_bit(SSS_AEQ_HW_CB_REG, &aeq_info->hw_event_handler_state[hw_event]))
aeq_info->hw_event_handler[hw_event](aeq_info->hw_event_data[hw_event],
data, size);
clear_bit(SSS_AEQ_HW_CB_RUNNING, &aeq_info->hw_event_handler_state[hw_event]);
return;
}
sdk_warn(SSS_TO_HWDEV(aeq)->dev_hdl, "Unknown aeq event %d\n", hw_event);
}
static bool sss_aeq_irq_handle(struct sss_eq *aeq)
{
struct sss_aeq_elem *elem = NULL;
u32 desc;
u32 i;
u32 eqe_cnt = 0;
for (i = 0; i < SSS_TASK_PROCESS_EQE_LIMIT; i++) {
elem = SSS_GET_CUR_AEQ_ELEM(aeq);
/* Data in HW is in Big endian Format */
desc = be32_to_cpu(elem->desc);
/* HW updates wrap bit, when it adds eq element event */
if (SSS_GET_EQE_DESC(desc, WRAPPED) == aeq->wrap)
return false;
dma_rmb();
sss_aeq_event_handle(aeq, desc);
sss_increase_eq_ci(aeq);
if (++eqe_cnt >= SSS_EQ_UPDATE_CI_STEP) {
eqe_cnt = 0;
sss_chip_set_eq_ci(aeq, SSS_EQ_NOT_ARMED);
}
}
return true;
}
static void sss_aeq_irq_work(struct work_struct *work)
{
bool unfinish;
struct sss_eq *aeq = container_of(work, struct sss_eq, aeq_work);
struct sss_aeq_info *aeq_info = SSS_AEQ_TO_INFO(aeq);
unfinish = sss_aeq_irq_handle(aeq);
sss_chip_set_eq_ci(aeq, SSS_EQ_ARM_STATE(unfinish));
if (unfinish)
queue_work_on(WORK_CPU_UNBOUND, aeq_info->workq, &aeq->aeq_work);
}
static void sss_init_aeq_para(struct sss_eq *aeq, u16 qid)
{
aeq->init_desc_handler = sss_init_aeqe_desc;
aeq->init_attr_handler = sss_chip_init_aeq_attr;
aeq->irq_handler = sss_aeq_intr_handle;
aeq->name = SSS_AEQ_NAME;
INIT_WORK(&aeq->aeq_work, sss_aeq_irq_work);
aeq->qid = qid;
aeq->len = aeq_depth;
aeq->type = SSS_AEQ;
aeq->entry_size = SSS_AEQE_SIZE;
}
static int sss_init_aeq(struct sss_hwdev *hwdev,
u16 aeq_num, struct sss_irq_desc *irq)
{
u16 i;
u16 qid;
int ret;
struct sss_aeq_info *aeq_info = NULL;
aeq_info = kzalloc(sizeof(*aeq_info), GFP_KERNEL);
if (!aeq_info)
return -ENOMEM;
hwdev->aeq_info = aeq_info;
aeq_info->hwdev = hwdev;
aeq_info->num = aeq_num;
aeq_info->workq = alloc_workqueue(SSS_AEQ_WQ_NAME, WQ_MEM_RECLAIM, SSS_MAX_AEQ);
if (!aeq_info->workq) {
ret = -ENOMEM;
sdk_err(hwdev->dev_hdl, "Fail to alloc aeq workqueue\n");
goto alloc_workq_err;
}
if (aeq_depth < SSS_MIN_AEQ_DEPTH || aeq_depth > SSS_MAX_AEQ_DEPTH) {
sdk_warn(hwdev->dev_hdl, "Invalid aeq_depth value %u, adjust to %d\n",
aeq_depth, SSS_DEF_AEQ_DEPTH);
aeq_depth = SSS_DEF_AEQ_DEPTH;
}
for (qid = 0; qid < aeq_num; qid++) {
sss_init_aeq_para(&aeq_info->aeq[qid], qid);
ret = sss_init_eq(hwdev, &aeq_info->aeq[qid], &irq[qid]);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init aeq %u\n", qid);
goto init_aeq_err;
}
}
for (qid = 0; qid < aeq_num; qid++)
sss_chip_set_msix_state(hwdev, irq[qid].msix_id, SSS_MSIX_ENABLE);
return 0;
init_aeq_err:
for (i = 0; i < qid; i++)
sss_deinit_eq(&aeq_info->aeq[i]);
destroy_workqueue(aeq_info->workq);
alloc_workq_err:
kfree(aeq_info);
hwdev->aeq_info = NULL;
return ret;
}
void sss_deinit_aeq(struct sss_hwdev *hwdev)
{
struct sss_aeq_info *aeq_info = hwdev->aeq_info;
enum sss_aeq_hw_event aeq_event;
enum sss_aeq_sw_event sw_aeq_event;
u16 qid;
for (qid = 0; qid < aeq_info->num; qid++)
sss_deinit_eq(&aeq_info->aeq[qid]);
for (sw_aeq_event = SSS_STL_EVENT;
sw_aeq_event < SSS_AEQ_SW_EVENT_MAX; sw_aeq_event++)
sss_aeq_unregister_swe_cb(hwdev, sw_aeq_event);
for (aeq_event = SSS_HW_FROM_INT;
aeq_event < SSS_AEQ_EVENT_MAX; aeq_event++)
sss_aeq_unregister_hw_cb(hwdev, aeq_event);
destroy_workqueue(aeq_info->workq);
kfree(aeq_info);
hwdev->aeq_info = NULL;
}
void sss_get_aeq_irq(struct sss_hwdev *hwdev,
struct sss_irq_desc *irq_array, u16 *irq_num)
{
struct sss_aeq_info *aeq_info = hwdev->aeq_info;
u16 qid;
for (qid = 0; qid < aeq_info->num; qid++) {
irq_array[qid].irq_id = aeq_info->aeq[qid].irq_desc.irq_id;
irq_array[qid].msix_id =
aeq_info->aeq[qid].irq_desc.msix_id;
}
*irq_num = aeq_info->num;
}
void sss_dump_aeq_info(struct sss_hwdev *hwdev)
{
struct sss_aeq_elem *aeqe = NULL;
struct sss_eq *aeq = NULL;
u32 addr;
u32 ci;
u32 pi;
u32 ctrl0;
u32 id;
int qid;
for (qid = 0; qid < hwdev->aeq_info->num; qid++) {
aeq = &hwdev->aeq_info->aeq[qid];
/* Indirect access should set qid first */
sss_chip_write_reg(SSS_TO_HWDEV(aeq)->hwif,
SSS_EQ_INDIR_ID_ADDR(aeq->type), aeq->qid);
wmb(); /* make sure set qid firstly */
addr = SSS_CSR_AEQ_CTRL_0_ADDR;
ctrl0 = sss_chip_read_reg(hwdev->hwif, addr);
id = sss_chip_read_reg(hwdev->hwif, SSS_EQ_INDIR_ID_ADDR(aeq->type));
addr = SSS_EQ_CI_REG_ADDR(aeq);
ci = sss_chip_read_reg(hwdev->hwif, addr);
addr = SSS_EQ_PI_REG_ADDR(aeq);
pi = sss_chip_read_reg(hwdev->hwif, addr);
aeqe = SSS_GET_CUR_AEQ_ELEM(aeq);
sdk_err(hwdev->dev_hdl,
"Aeq id: %d, id: %u, ctrl0: 0x%08x, ci: 0x%08x, pi: 0x%x, work_state: 0x%x, wrap: %u, desc: 0x%x swci:0x%x\n",
qid, id, ctrl0, ci, pi, work_busy(&aeq->aeq_work),
aeq->wrap, be32_to_cpu(aeqe->desc), aeq->ci);
}
sss_dump_chip_err_info(hwdev);
}
int sss_aeq_register_hw_cb(void *hwdev, void *pri_handle,
enum sss_aeq_hw_event event, sss_aeq_hw_event_handler_t event_handler)
{
struct sss_aeq_info *aeq_info = NULL;
if (!hwdev || !event_handler || event >= SSS_AEQ_EVENT_MAX)
return -EINVAL;
aeq_info = SSS_TO_AEQ_INFO(hwdev);
aeq_info->hw_event_handler[event] = event_handler;
aeq_info->hw_event_data[event] = pri_handle;
set_bit(SSS_AEQ_HW_CB_REG, &aeq_info->hw_event_handler_state[event]);
return 0;
}
void sss_aeq_unregister_hw_cb(void *hwdev, enum sss_aeq_hw_event event)
{
struct sss_aeq_info *aeq_info = NULL;
if (!hwdev || event >= SSS_AEQ_EVENT_MAX)
return;
aeq_info = SSS_TO_AEQ_INFO(hwdev);
clear_bit(SSS_AEQ_HW_CB_REG, &aeq_info->hw_event_handler_state[event]);
while (test_bit(SSS_AEQ_HW_CB_RUNNING, &aeq_info->hw_event_handler_state[event]))
usleep_range(SSS_EQ_USLEEP_LOW_LIMIT, SSS_EQ_USLEEP_HIG_LIMIT);
aeq_info->hw_event_handler[event] = NULL;
}
int sss_aeq_register_swe_cb(void *hwdev, void *pri_handle,
enum sss_aeq_sw_event event,
sss_aeq_sw_event_handler_t sw_event_handler)
{
struct sss_aeq_info *aeq_info = NULL;
if (!hwdev || !sw_event_handler || event >= SSS_AEQ_SW_EVENT_MAX)
return -EINVAL;
aeq_info = SSS_TO_AEQ_INFO(hwdev);
aeq_info->sw_event_handler[event] = sw_event_handler;
aeq_info->sw_event_data[event] = pri_handle;
set_bit(SSS_AEQ_SW_CB_REG, &aeq_info->sw_event_handler_state[event]);
return 0;
}
void sss_aeq_unregister_swe_cb(void *hwdev, enum sss_aeq_sw_event event)
{
struct sss_aeq_info *aeq_info = NULL;
if (!hwdev || event >= SSS_AEQ_SW_EVENT_MAX)
return;
aeq_info = SSS_TO_AEQ_INFO(hwdev);
clear_bit(SSS_AEQ_SW_CB_REG, &aeq_info->sw_event_handler_state[event]);
while (test_bit(SSS_AEQ_SW_CB_RUNNING,
&aeq_info->sw_event_handler_state[event]))
usleep_range(SSS_EQ_USLEEP_LOW_LIMIT, SSS_EQ_USLEEP_HIG_LIMIT);
aeq_info->sw_event_handler[event] = NULL;
}
int sss_hwif_init_aeq(struct sss_hwdev *hwdev)
{
u16 i;
u16 aeq_num;
u16 act_num = 0;
int ret;
struct sss_irq_desc irq_array[SSS_MAX_AEQ] = {0};
aeq_num = SSS_GET_HWIF_AEQ_NUM(hwdev->hwif);
if (aeq_num > SSS_MAX_AEQ) {
sdk_warn(hwdev->dev_hdl, "Adjust aeq_num to %d\n", SSS_MAX_AEQ);
aeq_num = SSS_MAX_AEQ;
}
act_num = sss_alloc_irq(hwdev, SSS_SERVICE_TYPE_INTF, irq_array, aeq_num);
if (act_num == 0) {
sdk_err(hwdev->dev_hdl, "Fail to alloc irq, aeq_num: %u\n", aeq_num);
return -ENOMEM;
}
if (act_num < aeq_num) {
sdk_warn(hwdev->dev_hdl, "Adjust aeq_num to %u\n", act_num);
aeq_num = act_num;
}
ret = sss_init_aeq(hwdev, aeq_num, irq_array);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init aeq\n");
goto init_aeqs_err;
}
return 0;
init_aeqs_err:
for (i = 0; i < aeq_num; i++)
sss_free_irq(hwdev, SSS_SERVICE_TYPE_INTF, irq_array[i].irq_id);
return ret;
}
void sss_hwif_deinit_aeq(struct sss_hwdev *hwdev)
{
u16 i;
u16 irq_num;
struct sss_irq_desc irq_array[SSS_MAX_AEQ] = {0};
sss_get_aeq_irq(hwdev, irq_array, &irq_num);
sss_deinit_aeq(hwdev);
for (i = 0; i < irq_num; i++)
sss_free_irq(hwdev, SSS_SERVICE_TYPE_INTF, irq_array[i].irq_id);
}
int sss_init_aeq_msix_attr(struct sss_hwdev *hwdev)
{
int i;
int ret;
struct sss_aeq_info *aeq_info = hwdev->aeq_info;
struct sss_irq_cfg intr_info = {0};
sss_init_eq_intr_info(&intr_info);
for (i = aeq_info->num - 1; i >= 0; i--) {
intr_info.msix_id = SSS_EQ_IRQ_ID(&aeq_info->aeq[i]);
ret = sss_chip_set_eq_msix_attr(hwdev, &intr_info, SSS_CHANNEL_COMM);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to set msix attr for aeq %d\n", i);
return -EFAULT;
}
}
return 0;
}
u8 sss_sw_aeqe_handler(void *dev, u8 aeq_event, u8 *data)
{
struct sss_hwdev *hwdev = (struct sss_hwdev *)dev;
if (!hwdev)
return 0;
sdk_err(hwdev->dev_hdl, "Received ucode aeq event, type: 0x%x, data: 0x%llx\n",
aeq_event, *((u64 *)data));
if (aeq_event < SSS_ERR_MAX)
atomic_inc(&hwdev->hw_stats.nic_ucode_event_stats[aeq_event]);
return 0;
}

View File

@ -0,0 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HWIF_AEQ_H
#define SSS_HWIF_AEQ_H
#include "sss_hw_irq.h"
#include "sss_hw_aeq.h"
#include "sss_hwdev.h"
#include "sss_aeq_info.h"
void sss_deinit_aeq(struct sss_hwdev *hwdev);
void sss_get_aeq_irq(struct sss_hwdev *hwdev,
struct sss_irq_desc *irq_array, u16 *irq_num);
void sss_dump_aeq_info(struct sss_hwdev *hwdev);
int sss_aeq_register_hw_cb(void *hwdev, void *pri_handle,
enum sss_aeq_hw_event event, sss_aeq_hw_event_handler_t event_handler);
void sss_aeq_unregister_hw_cb(void *hwdev, enum sss_aeq_hw_event event);
int sss_aeq_register_swe_cb(void *hwdev, void *pri_handle,
enum sss_aeq_sw_event event,
sss_aeq_sw_event_handler_t sw_event_handler);
void sss_aeq_unregister_swe_cb(void *hwdev, enum sss_aeq_sw_event event);
int sss_hwif_init_aeq(struct sss_hwdev *hwdev);
void sss_hwif_deinit_aeq(struct sss_hwdev *hwdev);
int sss_init_aeq_msix_attr(struct sss_hwdev *hwdev);
u8 sss_sw_aeqe_handler(void *dev, u8 aeq_event, u8 *data);
#endif

View File

@ -0,0 +1,293 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/module.h>
#include "sss_kernel.h"
#include "sss_hw.h"
#include "sss_csr.h"
#include "sss_hwdev.h"
#include "sss_hwif_api.h"
#include "sss_hwif_export.h"
#define SSS_GET_REG_FLAG(reg) ((reg) & (~(SSS_CSR_FLAG_MASK)))
#define SSS_GET_REG_ADDR(reg) ((reg) & (SSS_CSR_FLAG_MASK))
#define SSS_PAGE_SIZE_HW(pg_size) ((u8)ilog2((u32)((pg_size) >> 12)))
#define SSS_CLEAR_SLAVE_HOST_STATUS(host_id, val) ((val) & (~(1U << (host_id))))
#define SSS_SET_SLAVE_HOST_STATUS(host_id, enable) (((u8)(enable) & 1U) << (host_id))
#define SSS_MULT_HOST_SLAVE_STATUS_ADDR (SSS_MGMT_FLAG + 0xDF30)
u32 sss_chip_read_reg(struct sss_hwif *hwif, u32 reg)
{
if (SSS_GET_REG_FLAG(reg) == SSS_MGMT_FLAG)
return be32_to_cpu(readl(hwif->mgmt_reg_base +
SSS_GET_REG_ADDR(reg)));
else
return be32_to_cpu(readl(hwif->cfg_reg_base +
SSS_GET_REG_ADDR(reg)));
}
void sss_chip_write_reg(struct sss_hwif *hwif, u32 reg, u32 val)
{
if (SSS_GET_REG_FLAG(reg) == SSS_MGMT_FLAG)
writel(cpu_to_be32(val),
hwif->mgmt_reg_base + SSS_GET_REG_ADDR(reg));
else
writel(cpu_to_be32(val),
hwif->cfg_reg_base + SSS_GET_REG_ADDR(reg));
}
bool sss_chip_get_present_state(void *hwdev)
{
u32 val;
val = sss_chip_read_reg(SSS_TO_HWIF(hwdev), SSS_CSR_HW_ATTR1_ADDR);
if (val == SSS_PCIE_LINK_DOWN) {
sdk_warn(SSS_TO_DEV(hwdev), "Card is not present\n");
return false;
}
return true;
}
u32 sss_chip_get_pcie_link_status(void *hwdev)
{
u32 val;
if (!hwdev)
return SSS_PCIE_LINK_DOWN;
val = sss_chip_read_reg(SSS_TO_HWIF(hwdev), SSS_CSR_HW_ATTR1_ADDR);
if (val == SSS_PCIE_LINK_DOWN)
return val;
return !SSS_GET_AF1(val, MGMT_INIT_STATUS);
}
void sss_chip_set_pf_status(struct sss_hwif *hwif,
enum sss_pf_status status)
{
u32 val;
if (SSS_GET_HWIF_FUNC_TYPE(hwif) == SSS_FUNC_TYPE_VF)
return;
val = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR6_ADDR);
val = SSS_CLEAR_AF6(val, PF_STATUS);
val |= SSS_SET_AF6(status, PF_STATUS);
sss_chip_write_reg(hwif, SSS_CSR_HW_ATTR6_ADDR, val);
}
enum sss_pf_status sss_chip_get_pf_status(struct sss_hwif *hwif)
{
u32 val = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR6_ADDR);
return SSS_GET_AF6(val, PF_STATUS);
}
void sss_chip_enable_doorbell(struct sss_hwif *hwif)
{
u32 addr;
u32 val;
addr = SSS_CSR_HW_ATTR4_ADDR;
val = sss_chip_read_reg(hwif, addr);
val = SSS_CLEAR_AF4(val, DOORBELL_CTRL);
val |= SSS_SET_AF4(DB_ENABLE, DOORBELL_CTRL);
sss_chip_write_reg(hwif, addr, val);
}
void sss_chip_disable_doorbell(struct sss_hwif *hwif)
{
u32 addr;
u32 val;
addr = SSS_CSR_HW_ATTR4_ADDR;
val = sss_chip_read_reg(hwif, addr);
val = SSS_CLEAR_AF4(val, DOORBELL_CTRL);
val |= SSS_SET_AF4(DB_DISABLE, DOORBELL_CTRL);
sss_chip_write_reg(hwif, addr, val);
}
void sss_free_db_id(struct sss_hwif *hwif, u32 id)
{
struct sss_db_pool *pool = &hwif->db_pool;
if (id >= pool->bit_size)
return;
spin_lock(&pool->id_lock);
clear_bit((int)id, pool->bitmap);
spin_unlock(&pool->id_lock);
}
int sss_alloc_db_id(struct sss_hwif *hwif, u32 *id)
{
struct sss_db_pool *pool = &hwif->db_pool;
u32 pg_id;
spin_lock(&pool->id_lock);
pg_id = (u32)find_first_zero_bit(pool->bitmap, pool->bit_size);
if (pg_id == pool->bit_size) {
spin_unlock(&pool->id_lock);
return -ENOMEM;
}
set_bit(pg_id, pool->bitmap);
spin_unlock(&pool->id_lock);
*id = pg_id;
return 0;
}
void sss_dump_chip_err_info(struct sss_hwdev *hwdev)
{
u32 value;
if (sss_get_func_type(hwdev) == SSS_FUNC_TYPE_VF)
return;
value = sss_chip_read_reg(hwdev->hwif, SSS_CHIP_BASE_INFO_ADDR);
sdk_warn(hwdev->dev_hdl, "Chip base info: 0x%08x\n", value);
value = sss_chip_read_reg(hwdev->hwif, SSS_MGMT_HEALTH_STATUS_ADDR);
sdk_warn(hwdev->dev_hdl, "Mgmt CPU health status: 0x%08x\n", value);
value = sss_chip_read_reg(hwdev->hwif, SSS_CHIP_ERR_STATUS0_ADDR);
sdk_warn(hwdev->dev_hdl, "Chip fatal error status0: 0x%08x\n", value);
value = sss_chip_read_reg(hwdev->hwif, SSS_CHIP_ERR_STATUS1_ADDR);
sdk_warn(hwdev->dev_hdl, "Chip fatal error status1: 0x%08x\n", value);
value = sss_chip_read_reg(hwdev->hwif, SSS_ERR_INFO0_ADDR);
sdk_warn(hwdev->dev_hdl, "Chip exception info0: 0x%08x\n", value);
value = sss_chip_read_reg(hwdev->hwif, SSS_ERR_INFO1_ADDR);
sdk_warn(hwdev->dev_hdl, "Chip exception info1: 0x%08x\n", value);
value = sss_chip_read_reg(hwdev->hwif, SSS_ERR_INFO2_ADDR);
sdk_warn(hwdev->dev_hdl, "Chip exception info2: 0x%08x\n", value);
}
u8 sss_chip_get_host_ppf_id(struct sss_hwdev *hwdev, u8 host_id)
{
u32 addr;
u32 val;
if (!hwdev)
return 0;
addr = SSS_CSR_FUNC_PPF_ELECT(host_id);
val = sss_chip_read_reg(hwdev->hwif, addr);
return SSS_GET_PPF_ELECT_PORT(val, ID);
}
static void sss_init_eq_msix_cfg(void *hwdev,
struct sss_cmd_msix_config *cmd_msix,
struct sss_irq_cfg *info)
{
cmd_msix->opcode = SSS_MGMT_MSG_SET_CMD;
cmd_msix->func_id = sss_get_global_func_id(hwdev);
cmd_msix->msix_index = (u16)info->msix_id;
cmd_msix->lli_credit_cnt = info->lli_credit;
cmd_msix->lli_timer_cnt = info->lli_timer;
cmd_msix->pending_cnt = info->pending;
cmd_msix->coalesce_timer_cnt = info->coalesc_timer;
cmd_msix->resend_timer_cnt = info->resend_timer;
}
int sss_chip_set_eq_msix_attr(void *hwdev,
struct sss_irq_cfg *intr_info, u16 ch)
{
int ret;
struct sss_cmd_msix_config cmd_msix = {0};
u16 out_len = sizeof(cmd_msix);
sss_init_eq_msix_cfg(hwdev, &cmd_msix, intr_info);
ret = sss_sync_send_msg_ch(hwdev, SSS_COMM_MGMT_CMD_CFG_MSIX_CTRL_REG,
&cmd_msix, sizeof(cmd_msix), &cmd_msix, &out_len, ch);
if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_msix)) {
sdk_err(SSS_TO_DEV(hwdev),
"Fail to set eq msix cfg, ret: %d, status: 0x%x, out_len: 0x%x, ch: 0x%x\n",
ret, cmd_msix.head.state, out_len, ch);
return -EINVAL;
}
return 0;
}
int sss_chip_set_wq_page_size(void *hwdev, u16 func_id, u32 page_size)
{
int ret;
struct sss_cmd_wq_page_size cmd_page = {0};
u16 out_len = sizeof(cmd_page);
cmd_page.opcode = SSS_MGMT_MSG_SET_CMD;
cmd_page.func_id = func_id;
cmd_page.page_size = SSS_PAGE_SIZE_HW(page_size);
ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_CFG_PAGESIZE,
&cmd_page, sizeof(cmd_page), &cmd_page, &out_len);
if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_page)) {
sdk_err(SSS_TO_DEV(hwdev),
"Fail to set wq page size, ret: %d, status: 0x%x, out_len: 0x%0x\n",
ret, cmd_page.head.state, out_len);
return -EFAULT;
}
return 0;
}
int sss_chip_set_ceq_attr(struct sss_hwdev *hwdev, u16 qid,
u32 attr0, u32 attr1)
{
int ret;
struct sss_cmd_ceq_ctrl_reg cmd_ceq = {0};
u16 out_len = sizeof(cmd_ceq);
cmd_ceq.func_id = sss_get_global_func_id(hwdev);
cmd_ceq.qid = qid;
cmd_ceq.ctrl0 = attr0;
cmd_ceq.ctrl1 = attr1;
ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_SET_CEQ_CTRL_REG,
&cmd_ceq, sizeof(cmd_ceq), &cmd_ceq, &out_len);
if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_ceq)) {
sdk_err(hwdev->dev_hdl,
"Fail to set ceq %u ctrl, ret: %d status: 0x%x, out_len: 0x%x\n",
qid, ret, cmd_ceq.head.state, out_len);
return -EFAULT;
}
return 0;
}
void sss_chip_set_slave_host_status(void *dev, u8 host_id, bool enable)
{
u32 val;
struct sss_hwdev *hwdev = dev;
if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_PPF)
return;
val = sss_chip_read_reg(hwdev->hwif, SSS_MULT_HOST_SLAVE_STATUS_ADDR);
val = SSS_CLEAR_SLAVE_HOST_STATUS(host_id, val);
val |= SSS_SET_SLAVE_HOST_STATUS(host_id, !!enable);
sss_chip_write_reg(hwdev->hwif, SSS_MULT_HOST_SLAVE_STATUS_ADDR, val);
sdk_info(hwdev->dev_hdl, "Set slave host %d status %d, reg value: 0x%x\n",
host_id, enable, val);
}

View File

@ -0,0 +1,127 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HWIF_API_H
#define SSS_HWIF_API_H
#include "sss_hwdev.h"
enum sss_pf_status {
SSS_PF_STATUS_INIT = 0X0,
SSS_PF_STATUS_ACTIVE_FLAG = 0x11,
SSS_PF_STATUS_FLR_START_FLAG = 0x12,
SSS_PF_STATUS_FLR_FINISH_FLAG = 0x13,
};
enum sss_doorbell_ctrl {
DB_ENABLE,
DB_DISABLE,
};
enum sss_outbound_ctrl {
OUTBOUND_ENABLE,
OUTBOUND_DISABLE,
};
#define SSS_PCIE_LINK_DOWN 0xFFFFFFFF
#define SSS_PCIE_LINK_UP 0
#define SSS_AF1_PPF_ID_SHIFT 0
#define SSS_AF1_AEQ_PER_FUNC_SHIFT 8
#define SSS_AF1_MGMT_INIT_STATUS_SHIFT 30
#define SSS_AF1_PF_INIT_STATUS_SHIFT 31
#define SSS_AF1_PPF_ID_MASK 0x3F
#define SSS_AF1_AEQ_PER_FUNC_MASK 0x3
#define SSS_AF1_MGMT_INIT_STATUS_MASK 0x1
#define SSS_AF1_PF_INIT_STATUS_MASK 0x1
#define SSS_GET_AF1(val, member) \
(((val) >> SSS_AF1_##member##_SHIFT) & SSS_AF1_##member##_MASK)
#define SSS_AF4_DOORBELL_CTRL_SHIFT 0
#define SSS_AF4_DOORBELL_CTRL_MASK 0x1
#define SSS_GET_AF4(val, member) \
(((val) >> SSS_AF4_##member##_SHIFT) & SSS_AF4_##member##_MASK)
#define SSS_SET_AF4(val, member) \
(((val) & SSS_AF4_##member##_MASK) << SSS_AF4_##member##_SHIFT)
#define SSS_CLEAR_AF4(val, member) \
((val) & (~(SSS_AF4_##member##_MASK << SSS_AF4_##member##_SHIFT)))
#define SSS_AF6_PF_STATUS_SHIFT 0
#define SSS_AF6_PF_STATUS_MASK 0xFFFF
#define SSS_AF6_FUNC_MAX_SQ_SHIFT 23
#define SSS_AF6_FUNC_MAX_SQ_MASK 0x1FF
#define SSS_AF6_MSIX_FLEX_EN_SHIFT 22
#define SSS_AF6_MSIX_FLEX_EN_MASK 0x1
#define SSS_SET_AF6(val, member) \
((((u32)(val)) & SSS_AF6_##member##_MASK) << \
SSS_AF6_##member##_SHIFT)
#define SSS_GET_AF6(val, member) \
(((u32)(val) >> SSS_AF6_##member##_SHIFT) & SSS_AF6_##member##_MASK)
#define SSS_CLEAR_AF6(val, member) \
((u32)(val) & (~(SSS_AF6_##member##_MASK << \
SSS_AF6_##member##_SHIFT)))
#define SSS_PPF_ELECT_PORT_ID_SHIFT 0
#define SSS_PPF_ELECT_PORT_ID_MASK 0x3F
#define SSS_GET_PPF_ELECT_PORT(val, member) \
(((val) >> SSS_PPF_ELECT_PORT_##member##_SHIFT) & \
SSS_PPF_ELECT_PORT_##member##_MASK)
#define SSS_PPF_ELECTION_ID_SHIFT 0
#define SSS_PPF_ELECTION_ID_MASK 0x3F
#define SSS_SET_PPF(val, member) \
(((val) & SSS_PPF_ELECTION_##member##_MASK) << \
SSS_PPF_ELECTION_##member##_SHIFT)
#define SSS_GET_PPF(val, member) \
(((val) >> SSS_PPF_ELECTION_##member##_SHIFT) & \
SSS_PPF_ELECTION_##member##_MASK)
#define SSS_CLEAR_PPF(val, member) \
((val) & (~(SSS_PPF_ELECTION_##member##_MASK << \
SSS_PPF_ELECTION_##member##_SHIFT)))
#define SSS_DB_DWQE_SIZE 0x00400000
/* db/dwqe page size: 4K */
#define SSS_DB_PAGE_SIZE 0x00001000ULL
#define SSS_DWQE_OFFSET 0x00000800ULL
#define SSS_DB_MAX_AREAS (SSS_DB_DWQE_SIZE / SSS_DB_PAGE_SIZE)
#define SSS_DB_ID(db, db_base) \
((u32)(((ulong)(db) - (ulong)(db_base)) / SSS_DB_PAGE_SIZE))
u32 sss_chip_read_reg(struct sss_hwif *hwif, u32 reg);
void sss_chip_write_reg(struct sss_hwif *hwif, u32 reg, u32 val);
bool sss_chip_get_present_state(void *hwdev);
u32 sss_chip_get_pcie_link_status(void *hwdev);
void sss_chip_set_pf_status(struct sss_hwif *hwif, enum sss_pf_status status);
enum sss_pf_status sss_chip_get_pf_status(struct sss_hwif *hwif);
void sss_chip_enable_doorbell(struct sss_hwif *hwif);
void sss_chip_disable_doorbell(struct sss_hwif *hwif);
int sss_alloc_db_id(struct sss_hwif *hwif, u32 *id);
void sss_free_db_id(struct sss_hwif *hwif, u32 id);
void sss_dump_chip_err_info(struct sss_hwdev *hwdev);
u8 sss_chip_get_host_ppf_id(struct sss_hwdev *hwdev, u8 host_id);
int sss_chip_set_eq_msix_attr(void *hwdev, struct sss_irq_cfg *info, u16 channel);
int sss_chip_set_wq_page_size(void *hwdev, u16 func_id, u32 page_size);
int sss_chip_set_ceq_attr(struct sss_hwdev *hwdev, u16 qid,
u32 attr0, u32 attr1);
void sss_chip_set_slave_host_status(void *hwdev, u8 host_id, bool enable);
#endif

View File

@ -0,0 +1,441 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include "sss_kernel.h"
#include "sss_hwdev.h"
#include "sss_eq_info.h"
#include "sss_hw_svc_cap.h"
#include "sss_hw_irq.h"
#include "sss_hw_ceq.h"
#include "sss_hw_export.h"
#include "sss_hwif_ceq.h"
#include "sss_hw_common.h"
#include "sss_hwif_eq.h"
#include "sss_hwif_api.h"
#include "sss_hwif_export.h"
#define SSS_DEF_CEQ_DEPTH 8192
#define SSS_CEQ_NAME "sss_ceq"
#define SSS_CEQ_CTRL_0_INTR_ID_SHIFT 0
#define SSS_CEQ_CTRL_0_DMA_ATTR_SHIFT 12
#define SSS_CEQ_CTRL_0_LIMIT_KICK_SHIFT 20
#define SSS_CEQ_CTRL_0_PCI_INTF_ID_SHIFT 24
#define SSS_CEQ_CTRL_0_PAGE_SIZE_SHIFT 27
#define SSS_CEQ_CTRL_0_INTR_MODE_SHIFT 31
#define SSS_CEQ_CTRL_0_INTR_ID_MASK 0x3FFU
#define SSS_CEQ_CTRL_0_DMA_ATTR_MASK 0x3FU
#define SSS_CEQ_CTRL_0_LIMIT_KICK_MASK 0xFU
#define SSS_CEQ_CTRL_0_PCI_INTF_ID_MASK 0x3U
#define SSS_CEQ_CTRL_0_PAGE_SIZE_MASK 0xF
#define SSS_CEQ_CTRL_0_INTR_MODE_MASK 0x1U
#define SSS_SET_CEQ_CTRL_0(val, member) \
(((val) & SSS_CEQ_CTRL_0_##member##_MASK) << \
SSS_CEQ_CTRL_0_##member##_SHIFT)
#define SSS_CEQ_CTRL_1_LEN_SHIFT 0
#define SSS_CEQ_CTRL_1_GLB_FUNC_ID_SHIFT 20
#define SSS_CEQ_CTRL_1_LEN_MASK 0xFFFFFU
#define SSS_CEQ_CTRL_1_GLB_FUNC_ID_MASK 0xFFFU
#define SSS_SET_CEQ_CTRL_1(val, member) \
(((val) & SSS_CEQ_CTRL_1_##member##_MASK) << \
SSS_CEQ_CTRL_1_##member##_SHIFT)
#define SSS_CEQ_DMA_ATTR_DEF 0
#define SSS_MIN_CEQ_DEPTH 64
#define SSS_MAX_CEQ_DEPTH \
((SSS_MAX_EQ_PAGE_SIZE / SSS_CEQE_SIZE) * SSS_CEQ_MAX_PAGE)
#define SSS_GET_CEQ_ELEM(ceq, id) ((u32 *)SSS_GET_EQ_ELEM((ceq), (id)))
#define SSS_GET_CUR_CEQ_ELEM(ceq) SSS_GET_CEQ_ELEM((ceq), (ceq)->ci)
#define SSS_CEQE_TYPE_SHIFT 23
#define SSS_CEQE_TYPE_MASK 0x7
#define SSS_CEQE_TYPE(type) \
(((type) >> SSS_CEQE_TYPE_SHIFT) & SSS_CEQE_TYPE_MASK)
#define SSS_CEQE_DATA_MASK 0x3FFFFFF
#define SSS_CEQE_DATA(data) ((data) & SSS_CEQE_DATA_MASK)
#define SSS_CEQ_TO_INFO(eq) \
container_of((eq) - (eq)->qid, struct sss_ceq_info, ceq[0])
#define CEQ_LMT_KICK_DEF 0
enum sss_ceq_cb_state {
SSS_CEQ_CB_REG = 0,
SSS_CEQ_CB_RUNNING,
};
static u32 ceq_depth = SSS_DEF_CEQ_DEPTH;
module_param(ceq_depth, uint, 0444);
MODULE_PARM_DESC(ceq_depth,
"ceq depth, valid range is " __stringify(SSS_MIN_CEQ_DEPTH)
" - " __stringify(SSS_MAX_CEQ_DEPTH));
static u32 tasklet_depth = SSS_TASK_PROCESS_EQE_LIMIT;
module_param(tasklet_depth, uint, 0444);
MODULE_PARM_DESC(tasklet_depth,
"The max number of ceqe can be processed in tasklet, default = 1024");
void sss_init_ceqe_desc(void *data)
{
u32 i;
u32 init_val;
u32 *ceqe = NULL;
struct sss_eq *ceq = (struct sss_eq *)data;
init_val = cpu_to_be32(SSS_EQ_WRAPPED(ceq));
for (i = 0; i < ceq->len; i++) {
ceqe = SSS_GET_CEQ_ELEM(ceq, i);
*(ceqe) = init_val;
}
/* write all ceq desc */
wmb();
}
static u32 sss_chip_init_ceq_attr(void *data)
{
u32 val;
u32 len;
struct sss_eq *ceq = (struct sss_eq *)data;
struct sss_hwif *hwif = SSS_TO_HWDEV(ceq)->hwif;
val = SSS_SET_CEQ_CTRL_0(SSS_EQ_IRQ_ID(ceq), INTR_ID) |
SSS_SET_CEQ_CTRL_0(SSS_CEQ_DMA_ATTR_DEF, DMA_ATTR) |
SSS_SET_CEQ_CTRL_0(CEQ_LMT_KICK_DEF, LIMIT_KICK) |
SSS_SET_CEQ_CTRL_0(SSS_GET_HWIF_PCI_INTF_ID(hwif), PCI_INTF_ID) |
SSS_SET_CEQ_CTRL_0(SSS_SET_EQ_HW_PAGE_SIZE(ceq), PAGE_SIZE) |
SSS_SET_CEQ_CTRL_0(SSS_INTR_MODE_ARMED, INTR_MODE);
len = SSS_SET_CEQ_CTRL_1(ceq->len, LEN);
return sss_chip_set_ceq_attr(SSS_TO_HWDEV(ceq), ceq->qid, val, len);
}
irqreturn_t sss_ceq_intr_handle(int irq, void *data)
{
struct sss_eq *ceq = (struct sss_eq *)data;
ceq->hw_intr_jiffies = jiffies;
sss_chip_clear_msix_resend_bit(ceq->hwdev, SSS_EQ_IRQ_ID(ceq),
SSS_EQ_MSIX_RESEND_TIMER_CLEAR);
tasklet_schedule(&ceq->ceq_tasklet);
return IRQ_HANDLED;
}
static void sss_ceqe_handler(struct sss_eq *ceq, u32 ceqe)
{
u32 ceqe_data = SSS_CEQE_DATA(ceqe);
enum sss_ceq_event ceq_event = SSS_CEQE_TYPE(ceqe);
struct sss_ceq_info *ceq_info = SSS_CEQ_TO_INFO(ceq);
if (ceq_event >= SSS_CEQ_EVENT_MAX) {
sdk_err(SSS_TO_HWDEV(ceq)->dev_hdl, "Unknown ceq_event:%d, ceqe_data: 0x%x\n",
ceq_event, ceqe_data);
return;
}
set_bit(SSS_CEQ_CB_RUNNING, &ceq_info->event_handler_state[ceq_event]);
if (ceq_info->event_handler[ceq_event] &&
test_bit(SSS_CEQ_CB_REG, &ceq_info->event_handler_state[ceq_event]))
ceq_info->event_handler[ceq_event](ceq_info->event_handler_data[ceq_event],
ceqe_data);
clear_bit(SSS_CEQ_CB_RUNNING, &ceq_info->event_handler_state[ceq_event]);
}
static bool sss_ceq_irq_handle(struct sss_eq *ceq)
{
u32 elem;
u32 eqe_cnt = 0;
u32 i;
for (i = 0; i < tasklet_depth; i++) {
elem = *(SSS_GET_CUR_CEQ_ELEM(ceq));
elem = be32_to_cpu(elem);
/* HW updates wrap bit, when it adds eq element event */
if (SSS_GET_EQE_DESC(elem, WRAPPED) == ceq->wrap)
return false;
sss_ceqe_handler(ceq, elem);
sss_increase_eq_ci(ceq);
if (++eqe_cnt >= SSS_EQ_UPDATE_CI_STEP) {
eqe_cnt = 0;
sss_chip_set_eq_ci(ceq, SSS_EQ_NOT_ARMED);
}
}
return true;
}
static void sss_ceq_tasklet(ulong ceq_data)
{
bool unfinish;
struct sss_eq *ceq = (struct sss_eq *)ceq_data;
ceq->sw_intr_jiffies = jiffies;
unfinish = sss_ceq_irq_handle(ceq);
sss_chip_set_eq_ci(ceq, SSS_EQ_ARM_STATE(unfinish));
if (unfinish)
tasklet_schedule(&ceq->ceq_tasklet);
}
static void sss_init_ceq_para(struct sss_eq *ceq, u16 qid)
{
ceq->init_desc_handler = sss_init_ceqe_desc;
ceq->init_attr_handler = sss_chip_init_ceq_attr;
ceq->irq_handler = sss_ceq_intr_handle;
ceq->name = SSS_CEQ_NAME;
tasklet_init(&ceq->ceq_tasklet, sss_ceq_tasklet, (ulong)ceq);
ceq->qid = qid;
ceq->len = ceq_depth;
ceq->type = SSS_CEQ;
ceq->entry_size = SSS_CEQE_SIZE;
}
static int sss_init_ceq(struct sss_hwdev *hwdev,
struct sss_irq_desc *irq_array, u16 irq_num)
{
u16 i;
u16 qid;
int ret;
struct sss_ceq_info *ceq_info = NULL;
ceq_info = kzalloc(sizeof(*ceq_info), GFP_KERNEL);
if (!ceq_info)
return -ENOMEM;
ceq_info->hwdev = hwdev;
ceq_info->num = irq_num;
hwdev->ceq_info = ceq_info;
if (tasklet_depth == 0) {
sdk_warn(hwdev->dev_hdl,
"Invalid tasklet_depth can not be zero, adjust to %d\n",
SSS_TASK_PROCESS_EQE_LIMIT);
tasklet_depth = SSS_TASK_PROCESS_EQE_LIMIT;
}
if (ceq_depth < SSS_MIN_CEQ_DEPTH || ceq_depth > SSS_MAX_CEQ_DEPTH) {
sdk_warn(hwdev->dev_hdl,
"Invalid ceq_depth %u out of range, adjust to %d\n",
ceq_depth, SSS_DEF_CEQ_DEPTH);
ceq_depth = SSS_DEF_CEQ_DEPTH;
}
for (qid = 0; qid < irq_num; qid++) {
sss_init_ceq_para(&ceq_info->ceq[qid], qid);
ret = sss_init_eq(hwdev, &ceq_info->ceq[qid], &irq_array[qid]);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init ceq %u\n", qid);
goto init_ceq_err;
}
}
for (qid = 0; qid < irq_num; qid++)
sss_chip_set_msix_state(hwdev, irq_array[qid].msix_id, SSS_MSIX_ENABLE);
return 0;
init_ceq_err:
for (i = 0; i < qid; i++)
sss_deinit_eq(&ceq_info->ceq[i]);
kfree(ceq_info);
hwdev->ceq_info = NULL;
return ret;
}
static void sss_get_ceq_irq(struct sss_hwdev *hwdev, struct sss_irq_desc *irq,
u16 *irq_num)
{
u16 i;
struct sss_ceq_info *ceq_info = hwdev->ceq_info;
for (i = 0; i < ceq_info->num; i++) {
irq[i].msix_id = ceq_info->ceq[i].irq_desc.msix_id;
irq[i].irq_id = ceq_info->ceq[i].irq_desc.irq_id;
}
*irq_num = ceq_info->num;
}
int sss_hwif_init_ceq(struct sss_hwdev *hwdev)
{
u16 i;
u16 ceq_num;
u16 act_num = 0;
int ret;
struct sss_irq_desc irq_desc[SSS_MAX_CEQ] = {0};
ceq_num = SSS_GET_HWIF_CEQ_NUM(hwdev->hwif);
if (ceq_num > SSS_MAX_CEQ) {
sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %d\n", SSS_MAX_CEQ);
ceq_num = SSS_MAX_CEQ;
}
act_num = sss_alloc_irq(hwdev, SSS_SERVICE_TYPE_INTF, irq_desc, ceq_num);
if (act_num == 0) {
sdk_err(hwdev->dev_hdl, "Fail to alloc irq, ceq_num: %u\n", ceq_num);
return -EINVAL;
}
if (act_num < ceq_num) {
sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %u\n", act_num);
ceq_num = act_num;
}
ret = sss_init_ceq(hwdev, irq_desc, ceq_num);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init ceq, ret:%d\n", ret);
goto init_ceq_err;
}
return 0;
init_ceq_err:
for (i = 0; i < act_num; i++)
sss_free_irq(hwdev, SSS_SERVICE_TYPE_INTF, irq_desc[i].irq_id);
return ret;
}
static void sss_deinit_ceq(struct sss_hwdev *hwdev)
{
u16 i;
struct sss_ceq_info *ceq_info = hwdev->ceq_info;
enum sss_ceq_event event;
for (i = 0; i < ceq_info->num; i++)
sss_deinit_eq(&ceq_info->ceq[i]);
for (event = SSS_NIC_CTRLQ; event < SSS_CEQ_EVENT_MAX; event++)
sss_ceq_unregister_cb(hwdev, event);
kfree(ceq_info);
hwdev->ceq_info = NULL;
}
void sss_hwif_deinit_ceq(struct sss_hwdev *hwdev)
{
int i;
u16 irq_num = 0;
struct sss_irq_desc irq[SSS_MAX_CEQ] = {0};
sss_get_ceq_irq(hwdev, irq, &irq_num);
sss_deinit_ceq(hwdev);
for (i = 0; i < irq_num; i++)
sss_free_irq(hwdev, SSS_SERVICE_TYPE_INTF, irq[i].irq_id);
}
void sss_dump_ceq_info(struct sss_hwdev *hwdev)
{
struct sss_eq *ceq_info = NULL;
u32 addr;
u32 ci;
u32 pi;
int qid;
for (qid = 0; qid < hwdev->ceq_info->num; qid++) {
ceq_info = &hwdev->ceq_info->ceq[qid];
/* Indirect access should set qid first */
sss_chip_write_reg(SSS_TO_HWDEV(ceq_info)->hwif,
SSS_EQ_INDIR_ID_ADDR(ceq_info->type), ceq_info->qid);
wmb(); /* make sure set qid firstly */
addr = SSS_EQ_CI_REG_ADDR(ceq_info);
ci = sss_chip_read_reg(hwdev->hwif, addr);
addr = SSS_EQ_PI_REG_ADDR(ceq_info);
pi = sss_chip_read_reg(hwdev->hwif, addr);
sdk_err(hwdev->dev_hdl,
"Ceq id: %d, ci: 0x%08x, sw_ci: 0x%08x, pi: 0x%x, tasklet_state: 0x%lx, wrap: %u, ceqe: 0x%x\n",
qid, ci, ceq_info->ci, pi, tasklet_state(&ceq_info->ceq_tasklet),
ceq_info->wrap, be32_to_cpu(*(SSS_GET_CUR_CEQ_ELEM(ceq_info))));
sdk_err(hwdev->dev_hdl, "Ceq last response hard interrupt time: %u\n",
jiffies_to_msecs(jiffies - ceq_info->hw_intr_jiffies));
sdk_err(hwdev->dev_hdl, "Ceq last response soft interrupt time: %u\n",
jiffies_to_msecs(jiffies - ceq_info->sw_intr_jiffies));
}
sss_dump_chip_err_info(hwdev);
}
int sss_ceq_register_cb(void *hwdev, void *data,
enum sss_ceq_event ceq_event, sss_ceq_event_handler_t event_handler)
{
struct sss_ceq_info *ceq_info = NULL;
if (!hwdev || ceq_event >= SSS_CEQ_EVENT_MAX)
return -EINVAL;
ceq_info = SSS_TO_CEQ_INFO(hwdev);
ceq_info->event_handler_data[ceq_event] = data;
ceq_info->event_handler[ceq_event] = event_handler;
set_bit(SSS_CEQ_CB_REG, &ceq_info->event_handler_state[ceq_event]);
return 0;
}
void sss_ceq_unregister_cb(void *hwdev, enum sss_ceq_event ceq_event)
{
struct sss_ceq_info *ceq_info = NULL;
if (!hwdev || ceq_event >= SSS_CEQ_EVENT_MAX)
return;
ceq_info = SSS_TO_CEQ_INFO(hwdev);
clear_bit(SSS_CEQ_CB_REG, &ceq_info->event_handler_state[ceq_event]);
while (test_bit(SSS_CEQ_CB_RUNNING,
&ceq_info->event_handler_state[ceq_event]))
usleep_range(SSS_EQ_USLEEP_LOW_LIMIT, SSS_EQ_USLEEP_HIG_LIMIT);
ceq_info->event_handler[ceq_event] = NULL;
}
int sss_init_ceq_msix_attr(struct sss_hwdev *hwdev)
{
u16 i;
int ret;
struct sss_ceq_info *ceq_info = hwdev->ceq_info;
struct sss_irq_cfg intr_info = {0};
sss_init_eq_intr_info(&intr_info);
for (i = 0; i < ceq_info->num; i++) {
intr_info.msix_id = SSS_EQ_IRQ_ID(&ceq_info->ceq[i]);
ret = sss_chip_set_msix_attr(hwdev, intr_info, SSS_CHANNEL_COMM);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to set msix attr for ceq %u\n", i);
return -EFAULT;
}
}
return 0;
}

View File

@ -0,0 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HWIF_CEQ_H
#define SSS_HWIF_CEQ_H
#include "sss_hw_ceq.h"
#include "sss_ceq_info.h"
#include "sss_hwdev.h"
int sss_ceq_register_cb(void *hwdev, void *data,
enum sss_ceq_event ceq_event, sss_ceq_event_handler_t event_handler);
void sss_ceq_unregister_cb(void *hwdev, enum sss_ceq_event ceq_event);
int sss_hwif_init_ceq(struct sss_hwdev *hwdev);
void sss_hwif_deinit_ceq(struct sss_hwdev *hwdev);
void sss_dump_ceq_info(struct sss_hwdev *hwdev);
int sss_init_ceq_msix_attr(struct sss_hwdev *hwdev);
#endif

View File

@ -0,0 +1,928 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/errno.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/module.h>
#include "sss_kernel.h"
#include "sss_hw.h"
#include "sss_hwdev.h"
#include "sss_hwif_eq.h"
#include "sss_hwif_api.h"
#include "sss_hwif_ctrlq.h"
#include "sss_hwif_aeq.h"
#include "sss_hwif_ceq.h"
#include "sss_common.h"
#define SSS_CTRLQ_CMD_TIMEOUT 5000 /* millisecond */
#define SSS_CTRLQ_WQE_HEAD_LEN 32
#define SSS_HI_8_BITS(data) (((data) >> 8) & 0xFF)
#define SSS_LO_8_BITS(data) ((data) & 0xFF)
#define SSS_CTRLQ_DB_INFO_HI_PI_SHIFT 0
#define SSS_CTRLQ_DB_INFO_HI_PI_MASK 0xFFU
#define SSS_CTRLQ_DB_INFO_SET(val, member) \
((((u32)(val)) & SSS_CTRLQ_DB_INFO_##member##_MASK) << \
SSS_CTRLQ_DB_INFO_##member##_SHIFT)
#define SSS_CTRLQ_DB_HEAD_QUEUE_TYPE_SHIFT 23
#define SSS_CTRLQ_DB_HEAD_CTRLQ_TYPE_SHIFT 24
#define SSS_CTRLQ_DB_HEAD_SRC_TYPE_SHIFT 27
#define SSS_CTRLQ_DB_HEAD_QUEUE_TYPE_MASK 0x1U
#define SSS_CTRLQ_DB_HEAD_CTRLQ_TYPE_MASK 0x7U
#define SSS_CTRLQ_DB_HEAD_SRC_TYPE_MASK 0x1FU
#define SSS_CTRLQ_DB_HEAD_SET(val, member) \
((((u32)(val)) & SSS_CTRLQ_DB_HEAD_##member##_MASK) << \
SSS_CTRLQ_DB_HEAD_##member##_SHIFT)
#define SSS_CTRLQ_CTRL_PI_SHIFT 0
#define SSS_CTRLQ_CTRL_CMD_SHIFT 16
#define SSS_CTRLQ_CTRL_MOD_SHIFT 24
#define SSS_CTRLQ_CTRL_ACK_TYPE_SHIFT 29
#define SSS_CTRLQ_CTRL_HW_BUSY_BIT_SHIFT 31
#define SSS_CTRLQ_CTRL_PI_MASK 0xFFFFU
#define SSS_CTRLQ_CTRL_CMD_MASK 0xFFU
#define SSS_CTRLQ_CTRL_MOD_MASK 0x1FU
#define SSS_CTRLQ_CTRL_ACK_TYPE_MASK 0x3U
#define SSS_CTRLQ_CTRL_HW_BUSY_BIT_MASK 0x1U
#define SSS_CTRLQ_CTRL_SET(val, member) \
((((u32)(val)) & SSS_CTRLQ_CTRL_##member##_MASK) << \
SSS_CTRLQ_CTRL_##member##_SHIFT)
#define SSS_CTRLQ_CTRL_GET(val, member) \
(((val) >> SSS_CTRLQ_CTRL_##member##_SHIFT) & \
SSS_CTRLQ_CTRL_##member##_MASK)
#define SSS_CTRLQ_WQE_HEAD_BD_LEN_SHIFT 0
#define SSS_CTRLQ_WQE_HEAD_COMPLETE_FMT_SHIFT 15
#define SSS_CTRLQ_WQE_HEAD_DATA_FMT_SHIFT 22
#define SSS_CTRLQ_WQE_HEAD_COMPLETE_REQ_SHIFT 23
#define SSS_CTRLQ_WQE_HEAD_COMPLETE_SECT_LEN_SHIFT 27
#define SSS_CTRLQ_WQE_HEAD_CTRL_LEN_SHIFT 29
#define SSS_CTRLQ_WQE_HEAD_HW_BUSY_BIT_SHIFT 31
#define SSS_CTRLQ_WQE_HEAD_BD_LEN_MASK 0xFFU
#define SSS_CTRLQ_WQE_HEAD_COMPLETE_FMT_MASK 0x1U
#define SSS_CTRLQ_WQE_HEAD_DATA_FMT_MASK 0x1U
#define SSS_CTRLQ_WQE_HEAD_COMPLETE_REQ_MASK 0x1U
#define SSS_CTRLQ_WQE_HEAD_COMPLETE_SECT_LEN_MASK 0x3U
#define SSS_CTRLQ_WQE_HEAD_CTRL_LEN_MASK 0x3U
#define SSS_CTRLQ_WQE_HEAD_HW_BUSY_BIT_MASK 0x1U
#define SSS_CTRLQ_WQE_HEAD_SET(val, member) \
((((u32)(val)) & SSS_CTRLQ_WQE_HEAD_##member##_MASK) << \
SSS_CTRLQ_WQE_HEAD_##member##_SHIFT)
#define SSS_GET_CTRLQ_WQE_HEAD(val, member) \
(((val) >> SSS_CTRLQ_WQE_HEAD_##member##_SHIFT) & \
SSS_CTRLQ_WQE_HEAD_##member##_MASK)
#define SSS_STORE_DATA_ARM_SHIFT 31
#define SSS_STORE_DATA_ARM_MASK 0x1U
#define SSS_STORE_DATA_SET(val, member) \
(((val) & SSS_STORE_DATA_##member##_MASK) << \
SSS_STORE_DATA_##member##_SHIFT)
#define SSS_STORE_DATA_CLEAR(val, member) \
((val) & (~(SSS_STORE_DATA_##member##_MASK << \
SSS_STORE_DATA_##member##_SHIFT)))
#define SSS_WQE_ERRCODE_VAL_SHIFT 0
#define SSS_WQE_ERRCODE_VAL_MASK 0x7FFFFFFF
#define SSS_GET_WQE_ERRCODE(val, member) \
(((val) >> SSS_WQE_ERRCODE_##member##_SHIFT) & \
SSS_WQE_ERRCODE_##member##_MASK)
#define SSS_CEQE_CTRLQ_TYPE_SHIFT 0
#define SSS_CEQE_CTRLQ_TYPE_MASK 0x7
#define SSS_GET_CEQE_CTRLQ(val, member) \
(((val) >> SSS_CEQE_CTRLQ_##member##_SHIFT) & \
SSS_CEQE_CTRLQ_##member##_MASK)
#define SSS_WQE_COMPLETE(ctrl_info) SSS_CTRLQ_CTRL_GET(ctrl_info, HW_BUSY_BIT)
#define SSS_WQE_HEAD(wqe) ((struct sss_ctrlq_head *)(wqe))
#define SSS_CTRLQ_DB_PI_OFF(pi) (((u16)SSS_LO_8_BITS(pi)) << 3)
#define SSS_CTRLQ_DB_ADDR(db_base, pi) \
(((u8 *)(db_base)) + SSS_CTRLQ_DB_PI_OFF(pi))
#define SSS_FIRST_DATA_TO_WRITE_LAST sizeof(u64)
#define SSS_WQE_LCMD_SIZE 64
#define SSS_WQE_SCMD_SIZE 64
#define SSS_COMPLETE_LEN 3
#define SSS_CTRLQ_WQE_SIZE 64
#define SSS_CTRLQ_TO_INFO(ctrlq) \
container_of((ctrlq) - (ctrlq)->ctrlq_type, struct sss_ctrlq_info, ctrlq[0])
#define SSS_CTRLQ_COMPLETE_CODE 11
enum SSS_ctrlq_scmd_type {
SSS_CTRLQ_SET_ARM_CMD = 2,
};
enum sss_ctrl_sect_len {
SSS_CTRL_SECT_LEN = 1,
SSS_CTRL_DIRECT_SECT_LEN = 2,
};
enum sss_bd_len {
SSS_BD_LCMD_LEN = 2,
SSS_BD_SCMD_LEN = 3,
};
enum sss_data_fmt {
SSS_DATA_SGE,
SSS_DATA_DIRECT,
};
enum sss_completion_fmt {
SSS_COMPLETE_DIRECT,
SSS_COMPLETE_SGE,
};
enum sss_completion_request {
SSS_CEQ_SET = 1,
};
enum sss_ctrlq_comm_msg_type {
SSS_SYNC_MSG_DIRECT_REPLY,
SSS_SYNC_MSG_SGE_REPLY,
SSS_ASYNC_MSG,
};
#define SSS_SCMD_DATA_LEN 16
enum sss_db_src_type {
SSS_DB_SRC_CTRLQ_TYPE,
SSS_DB_SRC_L2NIC_SQ_TYPE,
};
enum sss_ctrlq_db_type {
SSS_DB_SQ_RQ_TYPE,
SSS_DB_CTRLQ_TYPE,
};
struct sss_ctrlq_db {
u32 head;
u32 info;
};
/* hardware define: ctrlq wqe */
struct sss_ctrlq_head {
u32 info;
u32 store_data;
};
struct sss_scmd_bd {
u32 data_len;
u32 rsvd;
u8 data[SSS_SCMD_DATA_LEN];
};
struct sss_lcmd_bd {
struct sss_sge sge;
u32 rsvd1;
u64 store_async_buf;
u64 rsvd3;
};
struct sss_wqe_state {
u32 info;
};
struct sss_wqe_ctrl {
u32 info;
};
struct sss_sge_reply {
struct sss_sge sge;
u32 rsvd;
};
struct sss_ctrlq_completion {
union {
struct sss_sge_reply sge_reply;
u64 direct_reply;
};
};
struct sss_ctrlq_wqe_scmd {
struct sss_ctrlq_head head;
u64 rsvd;
struct sss_wqe_state state;
struct sss_wqe_ctrl ctrl;
struct sss_ctrlq_completion completion;
struct sss_scmd_bd bd;
};
struct sss_ctrlq_wqe_lcmd {
struct sss_ctrlq_head head;
struct sss_wqe_state state;
struct sss_wqe_ctrl ctrl;
struct sss_ctrlq_completion completion;
struct sss_lcmd_bd bd;
};
struct sss_ctrlq_inline_wqe {
struct sss_ctrlq_wqe_scmd wqe_scmd;
};
struct sss_ctrlq_wqe {
union {
struct sss_ctrlq_inline_wqe inline_wqe;
struct sss_ctrlq_wqe_lcmd wqe_lcmd;
};
};
typedef int (*sss_ctrlq_type_handler_t)(struct sss_ctrlq *ctrlq,
struct sss_ctrlq_wqe *wqe, u16 ci);
void *sss_ctrlq_read_wqe(struct sss_wq *wq, u16 *ci)
{
if (sss_wq_is_empty(wq))
return NULL;
return sss_wq_read_one_wqebb(wq, ci);
}
static void *sss_ctrlq_get_wqe(struct sss_wq *wq, u16 *pi)
{
if (!sss_wq_free_wqebb(wq))
return NULL;
return sss_wq_get_one_wqebb(wq, pi);
}
static void sss_ctrlq_set_completion(struct sss_ctrlq_completion *complete,
struct sss_ctrl_msg_buf *out_buf)
{
struct sss_sge_reply *sge_reply = &complete->sge_reply;
sss_set_sge(&sge_reply->sge, out_buf->dma_addr, SSS_CTRLQ_BUF_LEN);
}
static void sss_ctrlq_set_lcmd_bufdesc(struct sss_ctrlq_wqe_lcmd *wqe,
struct sss_ctrl_msg_buf *in_buf)
{
sss_set_sge(&wqe->bd.sge, in_buf->dma_addr, in_buf->size);
}
static void sss_ctrlq_fill_db(struct sss_ctrlq_db *db,
enum sss_ctrlq_type ctrlq_type, u16 pi)
{
db->info = SSS_CTRLQ_DB_INFO_SET(SSS_HI_8_BITS(pi), HI_PI);
db->head = SSS_CTRLQ_DB_HEAD_SET(SSS_DB_CTRLQ_TYPE, QUEUE_TYPE) |
SSS_CTRLQ_DB_HEAD_SET(ctrlq_type, CTRLQ_TYPE) |
SSS_CTRLQ_DB_HEAD_SET(SSS_DB_SRC_CTRLQ_TYPE, SRC_TYPE);
}
static void sss_ctrlq_set_db(struct sss_ctrlq *ctrlq,
enum sss_ctrlq_type ctrlq_type, u16 pi)
{
struct sss_ctrlq_db db = {0};
u8 *db_base = SSS_TO_HWDEV(ctrlq)->ctrlq_info->db_base;
sss_ctrlq_fill_db(&db, ctrlq_type, pi);
/* The data that is written to HW should be in Big Endian Format */
db.info = sss_hw_be32(db.info);
db.head = sss_hw_be32(db.head);
wmb(); /* make sure write db info to reg */
writeq(*((u64 *)&db), SSS_CTRLQ_DB_ADDR(db_base, pi));
}
static void sss_ctrlq_fill_wqe(void *dst, const void *src)
{
memcpy((u8 *)dst + SSS_FIRST_DATA_TO_WRITE_LAST,
(u8 *)src + SSS_FIRST_DATA_TO_WRITE_LAST,
SSS_CTRLQ_WQE_SIZE - SSS_FIRST_DATA_TO_WRITE_LAST);
wmb(); /* The first 8 bytes should be written last */
*(u64 *)dst = *(u64 *)src;
}
static void sss_ctrlq_prepare_wqe_ctrl(struct sss_ctrlq_wqe *wqe,
int wrapped, u8 mod, u8 cmd, u16 pi,
enum sss_completion_fmt complete_fmt,
enum sss_data_fmt data_fmt,
enum sss_bd_len buf_len)
{
struct sss_wqe_ctrl *ctrl = NULL;
enum sss_ctrl_sect_len ctrl_len;
struct sss_ctrlq_wqe_lcmd *wqe_lcmd = NULL;
struct sss_ctrlq_wqe_scmd *wqe_scmd = NULL;
u32 saved_data = SSS_WQE_HEAD(wqe)->store_data;
if (data_fmt == SSS_DATA_SGE) {
wqe_lcmd = &wqe->wqe_lcmd;
wqe_lcmd->state.info = 0;
ctrl = &wqe_lcmd->ctrl;
ctrl_len = SSS_CTRL_SECT_LEN;
} else {
wqe_scmd = &wqe->inline_wqe.wqe_scmd;
wqe_scmd->state.info = 0;
ctrl = &wqe_scmd->ctrl;
ctrl_len = SSS_CTRL_DIRECT_SECT_LEN;
}
ctrl->info = SSS_CTRLQ_CTRL_SET(pi, PI) |
SSS_CTRLQ_CTRL_SET(cmd, CMD) |
SSS_CTRLQ_CTRL_SET(mod, MOD) |
SSS_CTRLQ_CTRL_SET(SSS_ACK_TYPE_CTRLQ, ACK_TYPE);
SSS_WQE_HEAD(wqe)->info =
SSS_CTRLQ_WQE_HEAD_SET(buf_len, BD_LEN) |
SSS_CTRLQ_WQE_HEAD_SET(complete_fmt, COMPLETE_FMT) |
SSS_CTRLQ_WQE_HEAD_SET(data_fmt, DATA_FMT) |
SSS_CTRLQ_WQE_HEAD_SET(SSS_CEQ_SET, COMPLETE_REQ) |
SSS_CTRLQ_WQE_HEAD_SET(SSS_COMPLETE_LEN, COMPLETE_SECT_LEN) |
SSS_CTRLQ_WQE_HEAD_SET(ctrl_len, CTRL_LEN) |
SSS_CTRLQ_WQE_HEAD_SET((u32)wrapped, HW_BUSY_BIT);
if (cmd == SSS_CTRLQ_SET_ARM_CMD && mod == SSS_MOD_TYPE_COMM) {
saved_data &= SSS_STORE_DATA_CLEAR(saved_data, ARM);
SSS_WQE_HEAD(wqe)->store_data = saved_data |
SSS_STORE_DATA_SET(1, ARM);
} else {
saved_data &= SSS_STORE_DATA_CLEAR(saved_data, ARM);
SSS_WQE_HEAD(wqe)->store_data = saved_data;
}
}
static void sss_ctrlq_set_lcmd_wqe(struct sss_ctrlq_wqe *wqe,
enum sss_ctrlq_comm_msg_type cmd_type,
struct sss_ctrl_msg_buf *in_buf,
struct sss_ctrl_msg_buf *out_buf, int wrapped,
u8 mod, u8 cmd, u16 pi)
{
struct sss_ctrlq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
enum sss_completion_fmt complete_fmt = SSS_COMPLETE_DIRECT;
switch (cmd_type) {
case SSS_SYNC_MSG_DIRECT_REPLY:
wqe_lcmd->completion.direct_reply = 0;
break;
case SSS_SYNC_MSG_SGE_REPLY:
if (out_buf) {
complete_fmt = SSS_COMPLETE_SGE;
sss_ctrlq_set_completion(&wqe_lcmd->completion, out_buf);
}
break;
case SSS_ASYNC_MSG:
wqe_lcmd->completion.direct_reply = 0;
wqe_lcmd->bd.store_async_buf = (u64)(in_buf);
break;
}
sss_ctrlq_prepare_wqe_ctrl(wqe, wrapped, mod, cmd, pi, complete_fmt,
SSS_DATA_SGE, SSS_BD_LCMD_LEN);
sss_ctrlq_set_lcmd_bufdesc(wqe_lcmd, in_buf);
}
static void sss_ctrlq_update_cmd_state(struct sss_ctrlq *ctrlq, u16 pi,
struct sss_ctrlq_wqe *wqe)
{
struct sss_ctrlq_cmd_info *info = &ctrlq->cmd_info[pi];
struct sss_ctrlq_wqe_lcmd *lcmd = &wqe->wqe_lcmd;
u32 state = sss_hw_cpu32(lcmd->state.info);
if (info->direct_resp)
*info->direct_resp =
sss_hw_cpu32(lcmd->completion.direct_reply);
if (info->err_code)
*info->err_code = SSS_GET_WQE_ERRCODE(state, VAL);
}
static int sss_ctrlq_check_sync_timeout(struct sss_ctrlq *ctrlq,
struct sss_ctrlq_wqe *wqe, u16 pi)
{
struct sss_ctrlq_wqe_lcmd *wqe_lcmd;
struct sss_wqe_ctrl *ctrl;
u32 ctrl_info;
wqe_lcmd = &wqe->wqe_lcmd;
ctrl = &wqe_lcmd->ctrl;
ctrl_info = sss_hw_cpu32((ctrl)->info);
if (!SSS_WQE_COMPLETE(ctrl_info)) {
sdk_info(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Ctrlq wqe do not complete\n");
return -EFAULT;
}
sss_ctrlq_update_cmd_state(ctrlq, pi, wqe);
sdk_info(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Success to check ctrlq sync cmd\n");
return 0;
}
static void sss_reset_cmd_info(struct sss_ctrlq_cmd_info *cmd_info,
const struct sss_ctrlq_cmd_info *store_cmd_info)
{
if (cmd_info->err_code == store_cmd_info->err_code)
cmd_info->err_code = NULL;
if (cmd_info->done == store_cmd_info->done)
cmd_info->done = NULL;
if (cmd_info->direct_resp == store_cmd_info->direct_resp)
cmd_info->direct_resp = NULL;
}
static int sss_ctrlq_ceq_handler_state(struct sss_ctrlq *ctrlq,
struct sss_ctrlq_cmd_info *cmd_info,
struct sss_ctrlq_cmd_info *store_cmd_info,
u64 curr_msg_id, u16 curr_pi,
struct sss_ctrlq_wqe *curr_wqe,
u32 timeout)
{
ulong timeo;
int ret;
ulong end = jiffies + msecs_to_jiffies(timeout);
if (SSS_TO_HWDEV(ctrlq)->poll) {
while (time_before(jiffies, end)) {
sss_ctrlq_ceq_handler(SSS_TO_HWDEV(ctrlq), 0);
if (store_cmd_info->done->done != 0)
return 0;
usleep_range(9, 10); /* sleep 9 us ~ 10 us */
}
} else {
timeo = msecs_to_jiffies(timeout);
if (wait_for_completion_timeout(store_cmd_info->done, timeo))
return 0;
}
spin_lock_bh(&ctrlq->ctrlq_lock);
if (cmd_info->cmpt_code == store_cmd_info->cmpt_code)
cmd_info->cmpt_code = NULL;
if (*store_cmd_info->cmpt_code == SSS_CTRLQ_COMPLETE_CODE) {
sdk_info(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Ctrlq direct sync command complete\n");
spin_unlock_bh(&ctrlq->ctrlq_lock);
return 0;
}
if (curr_msg_id == cmd_info->msg_id) {
ret = sss_ctrlq_check_sync_timeout(ctrlq, curr_wqe, curr_pi);
if (ret != 0)
cmd_info->msg_type = SSS_MSG_TYPE_TIMEOUT;
else
cmd_info->msg_type = SSS_MSG_TYPE_PSEUDO_TIMEOUT;
} else {
ret = -ETIMEDOUT;
sdk_err(SSS_TO_HWDEV(ctrlq)->dev_hdl,
"Ctrlq sync command curr_msg_id dismatch with cmd_info msg_id\n");
}
sss_reset_cmd_info(cmd_info, store_cmd_info);
spin_unlock_bh(&ctrlq->ctrlq_lock);
if (ret == 0)
return 0;
sss_dump_ceq_info(SSS_TO_HWDEV(ctrlq));
return -ETIMEDOUT;
}
static int sss_wait_ctrlq_sync_cmd_completion(struct sss_ctrlq *ctrlq,
struct sss_ctrlq_cmd_info *cmd_info,
struct sss_ctrlq_cmd_info *store_cmd_info,
u64 curr_msg_id, u16 curr_pi,
struct sss_ctrlq_wqe *curr_wqe, u32 timeout)
{
return sss_ctrlq_ceq_handler_state(ctrlq, cmd_info, store_cmd_info,
curr_msg_id, curr_pi, curr_wqe, timeout);
}
static int sss_ctrlq_msg_lock(struct sss_ctrlq *ctrlq, u16 channel)
{
struct sss_ctrlq_info *ctrlq_info = SSS_CTRLQ_TO_INFO(ctrlq);
spin_lock_bh(&ctrlq->ctrlq_lock);
if (ctrlq_info->lock_channel_en && test_bit(channel, &ctrlq_info->channel_stop)) {
spin_unlock_bh(&ctrlq->ctrlq_lock);
return -EAGAIN;
}
return 0;
}
static void sss_ctrlq_msg_unlock(struct sss_ctrlq *ctrlq)
{
spin_unlock_bh(&ctrlq->ctrlq_lock);
}
static void sss_ctrlq_set_cmd_buf(struct sss_ctrlq_cmd_info *cmd_info,
struct sss_hwdev *hwdev,
struct sss_ctrl_msg_buf *in_buf,
struct sss_ctrl_msg_buf *out_buf)
{
cmd_info->in_buf = in_buf;
cmd_info->out_buf = out_buf;
if (in_buf)
atomic_inc(&in_buf->ref_cnt);
if (out_buf)
atomic_inc(&out_buf->ref_cnt);
}
int sss_ctrlq_sync_cmd_direct_reply(struct sss_ctrlq *ctrlq, u8 mod,
u8 cmd, struct sss_ctrl_msg_buf *in_buf,
u64 *out_param, u32 timeout, u16 channel)
{
struct sss_wq *wq = &ctrlq->wq;
struct sss_ctrlq_wqe *curr_wqe = NULL;
struct sss_ctrlq_wqe wqe;
struct sss_ctrlq_cmd_info *cmd_info = NULL;
struct sss_ctrlq_cmd_info store_cmd_info;
struct completion done;
u16 curr_pi, next_pi;
int wrapped;
int errcode = 0;
int cmpt_code = SSS_CTRLQ_SEND_CMPT_CODE;
u64 curr_msg_id;
int ret;
u32 real_timeout;
ret = sss_ctrlq_msg_lock(ctrlq, channel);
if (ret != 0)
return ret;
curr_wqe = sss_ctrlq_get_wqe(wq, &curr_pi);
if (!curr_wqe) {
sss_ctrlq_msg_unlock(ctrlq);
return -EBUSY;
}
memset(&wqe, 0, sizeof(wqe));
wrapped = ctrlq->wrapped;
next_pi = curr_pi + SSS_WQEBB_NUM_FOR_CTRLQ;
if (next_pi >= wq->q_depth) {
ctrlq->wrapped = (ctrlq->wrapped == 0) ? 1 : 0;
next_pi -= (u16)wq->q_depth;
}
cmd_info = &ctrlq->cmd_info[curr_pi];
init_completion(&done);
cmd_info->msg_type = SSS_MSG_TYPE_DIRECT_RESP;
cmd_info->done = &done;
cmd_info->err_code = &errcode;
cmd_info->direct_resp = out_param;
cmd_info->cmpt_code = &cmpt_code;
cmd_info->channel = channel;
sss_ctrlq_set_cmd_buf(cmd_info, SSS_TO_HWDEV(ctrlq), in_buf, NULL);
memcpy(&store_cmd_info, cmd_info, sizeof(*cmd_info));
sss_ctrlq_set_lcmd_wqe(&wqe, SSS_SYNC_MSG_DIRECT_REPLY, in_buf, NULL,
wrapped, mod, cmd, curr_pi);
/* CTRLQ WQE is not shadow, therefore wqe will be written to wq */
sss_ctrlq_fill_wqe(curr_wqe, &wqe);
(cmd_info->msg_id)++;
curr_msg_id = cmd_info->msg_id;
sss_ctrlq_set_db(ctrlq, SSS_CTRLQ_SYNC, next_pi);
sss_ctrlq_msg_unlock(ctrlq);
real_timeout = timeout ? timeout : SSS_CTRLQ_CMD_TIMEOUT;
ret = sss_wait_ctrlq_sync_cmd_completion(ctrlq, cmd_info, &store_cmd_info,
curr_msg_id, curr_pi, curr_wqe, real_timeout);
if (ret != 0) {
sdk_err(SSS_TO_HWDEV(ctrlq)->dev_hdl,
"Ctrlq sync cmd direct resp timeout, mod: %u, cmd: %u, pi: 0x%x\n",
mod, cmd, curr_pi);
ret = -ETIMEDOUT;
}
if (cmpt_code == SSS_CTRLQ_FORCE_STOP_CMPT_CODE) {
sdk_info(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Force stop ctrlq cmd, mod: %u, cmd: %u\n",
mod, cmd);
ret = -EAGAIN;
}
destroy_completion(&done);
smp_rmb(); /* read error code after completion */
return (ret != 0) ? ret : errcode;
}
int sss_ctrlq_sync_cmd_detail_reply(struct sss_ctrlq *ctrlq, u8 mod, u8 cmd,
struct sss_ctrl_msg_buf *in_buf,
struct sss_ctrl_msg_buf *out_buf,
u64 *out_param, u32 timeout, u16 channel)
{
struct sss_wq *wq = &ctrlq->wq;
struct sss_ctrlq_wqe *curr_wqe = NULL, wqe;
struct sss_ctrlq_cmd_info *cmd_info = NULL, store_cmd_info;
struct completion done;
u16 curr_pi, next_pi;
int wrapped, errcode = 0;
int cmpt_code = SSS_CTRLQ_SEND_CMPT_CODE;
u64 curr_msg_id;
int ret;
u32 real_timeout;
ret = sss_ctrlq_msg_lock(ctrlq, channel);
if (ret != 0)
return ret;
curr_wqe = sss_ctrlq_get_wqe(wq, &curr_pi);
if (!curr_wqe) {
sss_ctrlq_msg_unlock(ctrlq);
return -EBUSY;
}
memset(&wqe, 0, sizeof(wqe));
wrapped = ctrlq->wrapped;
next_pi = curr_pi + SSS_WQEBB_NUM_FOR_CTRLQ;
if (next_pi >= wq->q_depth) {
ctrlq->wrapped = (ctrlq->wrapped == 0) ? 1 : 0;
next_pi -= (u16)wq->q_depth;
}
cmd_info = &ctrlq->cmd_info[curr_pi];
init_completion(&done);
cmd_info->msg_type = SSS_MSG_TYPE_SGE_RESP;
cmd_info->done = &done;
cmd_info->err_code = &errcode;
cmd_info->direct_resp = out_param;
cmd_info->cmpt_code = &cmpt_code;
cmd_info->channel = channel;
sss_ctrlq_set_cmd_buf(cmd_info, SSS_TO_HWDEV(ctrlq), in_buf, out_buf);
memcpy(&store_cmd_info, cmd_info, sizeof(*cmd_info));
sss_ctrlq_set_lcmd_wqe(&wqe, SSS_SYNC_MSG_SGE_REPLY, in_buf, out_buf,
wrapped, mod, cmd, curr_pi);
sss_ctrlq_fill_wqe(curr_wqe, &wqe);
(cmd_info->msg_id)++;
curr_msg_id = cmd_info->msg_id;
sss_ctrlq_set_db(ctrlq, ctrlq->ctrlq_type, next_pi);
sss_ctrlq_msg_unlock(ctrlq);
real_timeout = timeout ? timeout : SSS_CTRLQ_CMD_TIMEOUT;
ret = sss_wait_ctrlq_sync_cmd_completion(ctrlq, cmd_info, &store_cmd_info,
curr_msg_id, curr_pi, curr_wqe, real_timeout);
if (ret != 0) {
sdk_err(SSS_TO_HWDEV(ctrlq)->dev_hdl,
"Ctrlq sync cmd detail resp timeout, mod: %u, cmd: %u, pi: 0x%x\n",
mod, cmd, curr_pi);
ret = -ETIMEDOUT;
}
if (cmpt_code == SSS_CTRLQ_FORCE_STOP_CMPT_CODE) {
sdk_info(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Force stop ctrlq cmd, mod: %u, cmd: %u\n",
mod, cmd);
ret = -EAGAIN;
}
destroy_completion(&done);
smp_rmb(); /* read error code after completion */
return (ret != 0) ? ret : errcode;
}
void sss_free_ctrlq_cmd_buf(struct sss_hwdev *hwdev,
struct sss_ctrlq_cmd_info *info)
{
if (info->in_buf)
sss_free_ctrlq_msg_buf(hwdev, info->in_buf);
if (info->out_buf)
sss_free_ctrlq_msg_buf(hwdev, info->out_buf);
info->out_buf = NULL;
info->in_buf = NULL;
}
static void sss_erase_wqe_complete_bit(struct sss_ctrlq *ctrlq,
struct sss_ctrlq_wqe *wqe, u16 ci)
{
struct sss_wqe_ctrl *wqe_ctrl = NULL;
u32 head = sss_hw_cpu32(SSS_WQE_HEAD(wqe)->info);
enum sss_data_fmt format = SSS_GET_CTRLQ_WQE_HEAD(head, DATA_FMT);
wqe_ctrl = (format == SSS_DATA_SGE) ? &wqe->wqe_lcmd.ctrl :
&wqe->inline_wqe.wqe_scmd.ctrl;
wqe_ctrl->info = 0;
ctrlq->cmd_info[ci].msg_type = SSS_MSG_TYPE_NONE;
/* write ctrlq wqe msg type */
wmb();
sss_update_wq_ci(&ctrlq->wq, SSS_WQEBB_NUM_FOR_CTRLQ);
}
static void sss_ctrlq_update_cmd_info(struct sss_ctrlq *ctrlq,
struct sss_ctrlq_wqe *wqe, u16 ci)
{
struct sss_ctrlq_cmd_info *info = &ctrlq->cmd_info[ci];
struct sss_ctrlq_wqe_lcmd *lcmd = &wqe->wqe_lcmd;
u32 status;
spin_lock(&ctrlq->ctrlq_lock);
if (info->direct_resp)
*info->direct_resp =
sss_hw_cpu32(lcmd->completion.direct_reply);
if (info->err_code) {
status = sss_hw_cpu32(lcmd->state.info);
*info->err_code = SSS_GET_WQE_ERRCODE(status, VAL);
}
if (info->cmpt_code) {
*info->cmpt_code = SSS_CTRLQ_COMPLETE_CODE;
info->cmpt_code = NULL;
}
/* read all before set info done */
smp_rmb();
if (info->done) {
complete(info->done);
info->done = NULL;
}
spin_unlock(&ctrlq->ctrlq_lock);
}
static int sss_ctrlq_arm_ceq_handler(struct sss_ctrlq *ctrlq,
struct sss_ctrlq_wqe *wqe, u16 ci)
{
struct sss_wqe_ctrl *ctrl = &wqe->inline_wqe.wqe_scmd.ctrl;
u32 info = sss_hw_cpu32((ctrl)->info);
if (!SSS_WQE_COMPLETE(info))
return -EBUSY;
sss_erase_wqe_complete_bit(ctrlq, wqe, ci);
return 0;
}
static int sss_ctrlq_default_handler(struct sss_ctrlq *ctrlq,
struct sss_ctrlq_wqe *wqe, u16 ci)
{
struct sss_wqe_ctrl *ctrl = &wqe->wqe_lcmd.ctrl;
u32 info = sss_hw_cpu32((ctrl)->info);
if (!SSS_WQE_COMPLETE(info))
return -EBUSY;
dma_rmb();
sss_ctrlq_update_cmd_info(ctrlq, wqe, ci);
sss_free_ctrlq_cmd_buf(SSS_TO_HWDEV(ctrlq), &ctrlq->cmd_info[ci]);
sss_erase_wqe_complete_bit(ctrlq, wqe, ci);
return 0;
}
static int sss_ctrlq_async_cmd_handler(struct sss_ctrlq *ctrlq,
struct sss_ctrlq_wqe *wqe, u16 ci)
{
struct sss_wqe_ctrl *ctrl = &wqe->wqe_lcmd.ctrl;
u32 info = sss_hw_cpu32((ctrl)->info);
if (!SSS_WQE_COMPLETE(info))
return -EBUSY;
dma_rmb();
sss_free_ctrlq_cmd_buf(SSS_TO_HWDEV(ctrlq), &ctrlq->cmd_info[ci]);
sss_erase_wqe_complete_bit(ctrlq, wqe, ci);
return 0;
}
static int sss_ctrlq_pseudo_timeout_handler(struct sss_ctrlq *ctrlq,
struct sss_ctrlq_wqe *wqe, u16 ci)
{
sss_free_ctrlq_cmd_buf(SSS_TO_HWDEV(ctrlq), &ctrlq->cmd_info[ci]);
sss_erase_wqe_complete_bit(ctrlq, wqe, ci);
return 0;
}
static int sss_ctrlq_timeout_handler(struct sss_ctrlq *ctrlq,
struct sss_ctrlq_wqe *wqe, u16 ci)
{
u32 i;
u32 *data = (u32 *)wqe;
u32 num = SSS_CTRLQ_WQE_HEAD_LEN / sizeof(u32);
sdk_warn(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Ctrlq timeout, ci: %u\n", ci);
for (i = 0; i < num; i += 0x4) {
sdk_info(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Ctrlq wqe data: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
*(data + i), *(data + i + 0x1), *(data + i + 0x2),
*(data + i + 0x3));
}
sss_free_ctrlq_cmd_buf(SSS_TO_HWDEV(ctrlq), &ctrlq->cmd_info[ci]);
sss_erase_wqe_complete_bit(ctrlq, wqe, ci);
return 0;
}
static int sss_ctrlq_force_stop_handler(struct sss_ctrlq *ctrlq,
struct sss_ctrlq_wqe *wqe, u16 ci)
{
return sss_ctrlq_async_cmd_handler(ctrlq, wqe, ci);
}
void sss_ctrlq_ceq_handler(void *dev, u32 data)
{
u16 ci;
int ret;
enum sss_ctrlq_type type = SSS_GET_CEQE_CTRLQ(data, TYPE);
struct sss_ctrlq *ctrlq = &SSS_TO_CTRLQ_INFO(dev)->ctrlq[type];
struct sss_ctrlq_wqe *ctrlq_wqe = NULL;
struct sss_ctrlq_cmd_info *info = NULL;
sss_ctrlq_type_handler_t handler[] = {
NULL,
sss_ctrlq_arm_ceq_handler,
sss_ctrlq_default_handler,
sss_ctrlq_default_handler,
sss_ctrlq_async_cmd_handler,
sss_ctrlq_pseudo_timeout_handler,
sss_ctrlq_timeout_handler,
sss_ctrlq_force_stop_handler,
};
while ((ctrlq_wqe = sss_ctrlq_read_wqe(&ctrlq->wq, &ci)) != NULL) {
info = &ctrlq->cmd_info[ci];
if (info->msg_type < SSS_MSG_TYPE_NONE ||
info->msg_type >= SSS_MSG_TYPE_MAX) {
ret = sss_ctrlq_default_handler(ctrlq, ctrlq_wqe, ci);
if (ret)
break;
continue;
}
if (!handler[info->msg_type])
break;
ret = handler[info->msg_type](ctrlq, ctrlq_wqe, ci);
if (ret)
break;
}
}

View File

@ -0,0 +1,34 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HWIF_CTRLQ_H
#define SSS_HWIF_CTRLQ_H
#include "sss_hw_wq.h"
#define SSS_CTRLQ_BUF_LEN 2048U
#define SSS_CTRLQ_SEND_CMPT_CODE 10
#define SSS_CTRLQ_FORCE_STOP_CMPT_CODE 12
#define SSS_WQEBB_NUM_FOR_CTRLQ 1
enum sss_ctrlq_state {
SSS_CTRLQ_ENABLE = BIT(0),
};
void *sss_ctrlq_read_wqe(struct sss_wq *wq, u16 *ci);
void sss_ctrlq_ceq_handler(void *handle, u32 ceqe_data);
void sss_free_ctrlq_cmd_buf(struct sss_hwdev *hwdev,
struct sss_ctrlq_cmd_info *cmd_info);
int sss_ctrlq_sync_cmd_direct_reply(struct sss_ctrlq *ctrlq, u8 mod,
u8 cmd, struct sss_ctrl_msg_buf *in_buf,
u64 *out_param, u32 timeout, u16 channel);
int sss_ctrlq_sync_cmd_detail_reply(struct sss_ctrlq *ctrlq, u8 mod, u8 cmd,
struct sss_ctrl_msg_buf *in_buf,
struct sss_ctrl_msg_buf *out_buf,
u64 *out_param, u32 timeout, u16 channel);
#endif

View File

@ -0,0 +1,170 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/errno.h>
#include <linux/module.h>
#include "sss_kernel.h"
#include "sss_hw.h"
#include "sss_hwdev.h"
#include "sss_hwif_api.h"
#include "sss_hwif_ctrlq.h"
#include "sss_common.h"
#define SSS_CTRLQ_ENABLE_TIMEOUT 300
static int sss_wait_ctrlq_enable(struct sss_ctrlq_info *ctrlq_info)
{
unsigned long end;
end = jiffies + msecs_to_jiffies(SSS_CTRLQ_ENABLE_TIMEOUT);
do {
if (ctrlq_info->state & SSS_CTRLQ_ENABLE)
return 0;
} while (time_before(jiffies, end) &&
SSS_TO_HWDEV(ctrlq_info)->chip_present_flag &&
!ctrlq_info->disable_flag);
ctrlq_info->disable_flag = 1;
return -EBUSY;
}
static int sss_check_ctrlq_param(const void *hwdev, const struct sss_ctrl_msg_buf *in_buf)
{
if (!hwdev || !in_buf) {
pr_err("Invalid ctrlq param: hwdev: %p or in_buf: %p\n", hwdev, in_buf);
return -EINVAL;
}
if (in_buf->size == 0 || in_buf->size > SSS_CTRLQ_BUF_LEN) {
pr_err("Invalid ctrlq buf size: 0x%x\n", in_buf->size);
return -EINVAL;
}
return 0;
}
struct sss_ctrl_msg_buf *sss_alloc_ctrlq_msg_buf(void *hwdev)
{
struct sss_ctrlq_info *ctrlq_info = NULL;
struct sss_ctrl_msg_buf *msg_buf = NULL;
void *dev = NULL;
if (!hwdev) {
pr_err("Alloc ctrlq msg buf: hwdev is NULL\n");
return NULL;
}
ctrlq_info = ((struct sss_hwdev *)hwdev)->ctrlq_info;
dev = ((struct sss_hwdev *)hwdev)->dev_hdl;
msg_buf = kzalloc(sizeof(*msg_buf), GFP_ATOMIC);
if (!msg_buf)
return NULL;
msg_buf->buf = pci_pool_alloc(ctrlq_info->msg_buf_pool, GFP_ATOMIC,
&msg_buf->dma_addr);
if (!msg_buf->buf) {
sdk_err(dev, "Fail to allocate ctrlq pci pool\n");
goto alloc_pci_buf_err;
}
msg_buf->size = SSS_CTRLQ_BUF_LEN;
atomic_set(&msg_buf->ref_cnt, 1);
return msg_buf;
alloc_pci_buf_err:
kfree(msg_buf);
return NULL;
}
EXPORT_SYMBOL(sss_alloc_ctrlq_msg_buf);
void sss_free_ctrlq_msg_buf(void *hwdev, struct sss_ctrl_msg_buf *msg_buf)
{
struct sss_ctrlq_info *ctrlq_info = SSS_TO_CTRLQ_INFO(hwdev);
if (!hwdev || !msg_buf) {
pr_err("Invalid ctrlq param: hwdev: %p or msg_buf: %p\n", hwdev, msg_buf);
return;
}
if (atomic_dec_and_test(&msg_buf->ref_cnt) == 0)
return;
pci_pool_free(ctrlq_info->msg_buf_pool, msg_buf->buf, msg_buf->dma_addr);
kfree(msg_buf);
}
EXPORT_SYMBOL(sss_free_ctrlq_msg_buf);
int sss_ctrlq_direct_reply(void *hwdev, u8 mod, u8 cmd,
struct sss_ctrl_msg_buf *in_buf, u64 *out_param,
u32 timeout, u16 channel)
{
int ret;
struct sss_ctrlq_info *ctrlq_info = NULL;
ret = sss_check_ctrlq_param(hwdev, in_buf);
if (ret != 0) {
pr_err("Invalid ctrlq parameters\n");
return ret;
}
if (!sss_chip_get_present_state((struct sss_hwdev *)hwdev))
return -EPERM;
ctrlq_info = ((struct sss_hwdev *)hwdev)->ctrlq_info;
ret = sss_wait_ctrlq_enable(ctrlq_info);
if (ret != 0) {
sdk_err(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, "Ctrlq is disable\n");
return ret;
}
ret = sss_ctrlq_sync_cmd_direct_reply(&ctrlq_info->ctrlq[SSS_CTRLQ_SYNC],
mod, cmd, in_buf, out_param, timeout, channel);
if (!(((struct sss_hwdev *)hwdev)->chip_present_flag))
return -ETIMEDOUT;
else
return ret;
}
EXPORT_SYMBOL(sss_ctrlq_direct_reply);
int sss_ctrlq_detail_reply(void *hwdev, u8 mod, u8 cmd,
struct sss_ctrl_msg_buf *in_buf, struct sss_ctrl_msg_buf *out_buf,
u64 *out_param, u32 timeout, u16 channel)
{
int ret;
struct sss_ctrlq_info *ctrlq_info = NULL;
ret = sss_check_ctrlq_param(hwdev, in_buf);
if (ret != 0)
return ret;
ctrlq_info = ((struct sss_hwdev *)hwdev)->ctrlq_info;
if (!sss_chip_get_present_state((struct sss_hwdev *)hwdev))
return -EPERM;
ret = sss_wait_ctrlq_enable(ctrlq_info);
if (ret != 0) {
sdk_err(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, "Ctrlq is disable\n");
return ret;
}
ret = sss_ctrlq_sync_cmd_detail_reply(&ctrlq_info->ctrlq[SSS_CTRLQ_SYNC],
mod, cmd, in_buf, out_buf,
out_param, timeout, channel);
if (!(((struct sss_hwdev *)hwdev)->chip_present_flag))
return -ETIMEDOUT;
else
return ret;
}
EXPORT_SYMBOL(sss_ctrlq_detail_reply);

View File

@ -0,0 +1,597 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/errno.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/module.h>
#include "sss_kernel.h"
#include "sss_hw.h"
#include "sss_hwdev.h"
#include "sss_hwif_export.h"
#include "sss_hwif_ceq.h"
#include "sss_hwif_api.h"
#include "sss_hwif_ctrlq.h"
#include "sss_common.h"
#define SSS_CTRLQ_DEPTH 4096
#define SSS_CTRLQ_PFN_SHIFT 12
#define SSS_CTRLQ_PFN(addr) ((addr) >> SSS_CTRLQ_PFN_SHIFT)
#define SSS_CTRLQ_CEQ_ID 0
#define SSS_CTRLQ_WQ_CLA_SIZE 512
#define SSS_CTRLQ_WQEBB_SIZE 64
#define SSS_CTRLQ_IDLE_TIMEOUT 5000
#define SSS_CTRLQ_CTX_NOW_WQE_PAGE_PFN_SHIFT 0
#define SSS_CTRLQ_CTX_CEQ_ID_SHIFT 53
#define SSS_CTRLQ_CTX_CEQ_ARM_SHIFT 61
#define SSS_CTRLQ_CTX_CEQ_EN_SHIFT 62
#define SSS_CTRLQ_CTX_HW_BUSY_BIT_SHIFT 63
#define SSS_CTRLQ_CTX_NOW_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF
#define SSS_CTRLQ_CTX_CEQ_ID_MASK 0xFF
#define SSS_CTRLQ_CTX_CEQ_ARM_MASK 0x1
#define SSS_CTRLQ_CTX_CEQ_EN_MASK 0x1
#define SSS_CTRLQ_CTX_HW_BUSY_BIT_MASK 0x1
#define SSS_SET_CTRLQ_CTX_INFO(val, member) \
(((u64)(val) & SSS_CTRLQ_CTX_##member##_MASK) \
<< SSS_CTRLQ_CTX_##member##_SHIFT)
#define SSS_CTRLQ_CTX_WQ_BLOCK_PFN_SHIFT 0
#define SSS_CTRLQ_CTX_CI_SHIFT 52
#define SSS_CTRLQ_CTX_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF
#define SSS_CTRLQ_CTX_CI_MASK 0xFFF
#define SSS_SET_CTRLQ_CTX_BLOCK_INFO(val, member) \
(((u64)(val) & SSS_CTRLQ_CTX_##member##_MASK) \
<< SSS_CTRLQ_CTX_##member##_SHIFT)
#define SSS_CTRLQ_CLA_WQ_PAGE_NUM (SSS_CTRLQ_WQ_CLA_SIZE / sizeof(u64))
#define SSS_GET_WQ_PAGE_SIZE(page_order) (SSS_HW_WQ_PAGE_SIZE * (1U << (page_order)))
#define SSS_CTRLQ_DMA_POOL_NAME "sss_ctrlq"
#define SSS_CTRLQ_WRAP_ENABLE 1
#define SSS_SET_WQE_PAGE_PFN(pfn) \
(SSS_SET_CTRLQ_CTX_INFO(1, CEQ_ARM) | \
SSS_SET_CTRLQ_CTX_INFO(1, CEQ_EN) | \
SSS_SET_CTRLQ_CTX_INFO((pfn), NOW_WQE_PAGE_PFN) | \
SSS_SET_CTRLQ_CTX_INFO(SSS_CTRLQ_CEQ_ID, CEQ_ID) | \
SSS_SET_CTRLQ_CTX_INFO(1, HW_BUSY_BIT))
#define SSS_SET_WQ_BLOCK_PFN(wq, pfn) \
(SSS_SET_CTRLQ_CTX_BLOCK_INFO((pfn), WQ_BLOCK_PFN) | \
SSS_SET_CTRLQ_CTX_BLOCK_INFO((u16)(wq)->ci, CI))
static u32 wq_page_num = SSS_MAX_WQ_PAGE_NUM;
module_param(wq_page_num, uint, 0444);
MODULE_PARM_DESC(wq_page_num,
"Set wq page num, wq page size is 4K * (2 ^ wq_page_num) - default is 8");
static int sss_init_ctrq_block(struct sss_ctrlq_info *ctrlq_info)
{
u8 i;
if (SSS_WQ_IS_0_LEVEL_CLA(&ctrlq_info->ctrlq[SSS_CTRLQ_SYNC].wq))
return 0;
/* ctrlq wq's CLA table is up to 512B */
if (ctrlq_info->ctrlq[SSS_CTRLQ_SYNC].wq.page_num > SSS_CTRLQ_CLA_WQ_PAGE_NUM) {
sdk_err(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, "Ctrlq wq page out of range: %lu\n",
SSS_CTRLQ_CLA_WQ_PAGE_NUM);
return -EINVAL;
}
ctrlq_info->wq_block_vaddr =
dma_zalloc_coherent(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, PAGE_SIZE,
&ctrlq_info->wq_block_paddr, GFP_KERNEL);
if (!ctrlq_info->wq_block_vaddr) {
sdk_err(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, "Fail to alloc ctrlq wq block\n");
return -ENOMEM;
}
for (i = 0; i < ctrlq_info->num; i++)
memcpy((u8 *)ctrlq_info->wq_block_vaddr + SSS_CTRLQ_WQ_CLA_SIZE * i,
ctrlq_info->ctrlq[i].wq.block_vaddr,
ctrlq_info->ctrlq[i].wq.page_num * sizeof(u64));
return 0;
}
static void sss_deinit_ctrq_block(struct sss_ctrlq_info *ctrlq_info)
{
if (ctrlq_info->wq_block_vaddr) {
dma_free_coherent(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, PAGE_SIZE,
ctrlq_info->wq_block_vaddr, ctrlq_info->wq_block_paddr);
ctrlq_info->wq_block_vaddr = NULL;
}
}
static int sss_create_ctrlq_wq(struct sss_ctrlq_info *ctrlq_info)
{
u8 i;
int ret;
u8 q_type;
for (q_type = 0; q_type < ctrlq_info->num; q_type++) {
ret = sss_create_wq(SSS_TO_HWDEV(ctrlq_info), &ctrlq_info->ctrlq[q_type].wq,
SSS_CTRLQ_DEPTH, SSS_CTRLQ_WQEBB_SIZE);
if (ret != 0) {
sdk_err(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, "Fail to create ctrlq wq\n");
goto destroy_wq;
}
}
/* 1-level CLA must put all ctrlq's wq page addr in one wq block */
ret = sss_init_ctrq_block(ctrlq_info);
if (ret != 0)
goto destroy_wq;
return 0;
destroy_wq:
for (i = 0; i < q_type; i++)
sss_destroy_wq(&ctrlq_info->ctrlq[i].wq);
sss_deinit_ctrq_block(ctrlq_info);
return ret;
}
static void sss_destroy_ctrlq_wq(struct sss_ctrlq_info *ctrlq_info)
{
u8 type;
sss_deinit_ctrq_block(ctrlq_info);
for (type = 0; type < ctrlq_info->num; type++)
sss_destroy_wq(&ctrlq_info->ctrlq[type].wq);
}
static int sss_init_ctrlq_info(struct sss_ctrlq *ctrlq,
struct sss_ctrlq_ctxt_info *ctx,
dma_addr_t wq_block_paddr)
{
struct sss_wq *wq = &ctrlq->wq;
u64 pfn = SSS_CTRLQ_PFN(wq->page[0].align_paddr);
ctrlq->cmd_info = kcalloc(ctrlq->wq.q_depth, sizeof(*ctrlq->cmd_info),
GFP_KERNEL);
if (!ctrlq->cmd_info)
return -ENOMEM;
ctrlq->wrapped = SSS_CTRLQ_WRAP_ENABLE;
spin_lock_init(&ctrlq->ctrlq_lock);
ctx->curr_wqe_page_pfn = SSS_SET_WQE_PAGE_PFN(pfn);
pfn = SSS_WQ_IS_0_LEVEL_CLA(wq) ? pfn : SSS_CTRLQ_PFN(wq_block_paddr);
ctx->wq_block_pfn = SSS_SET_WQ_BLOCK_PFN(wq, pfn);
return 0;
}
static void sss_deinit_ctrlq_info(struct sss_ctrlq *ctrlq)
{
kfree(ctrlq->cmd_info);
}
static void sss_flush_ctrlq_sync_cmd(struct sss_ctrlq_cmd_info *info)
{
if (info->msg_type != SSS_MSG_TYPE_DIRECT_RESP &&
info->msg_type != SSS_MSG_TYPE_SGE_RESP)
return;
info->msg_type = SSS_MSG_TYPE_FORCE_STOP;
if (info->cmpt_code && *info->cmpt_code == SSS_CTRLQ_SEND_CMPT_CODE)
*info->cmpt_code = SSS_CTRLQ_FORCE_STOP_CMPT_CODE;
if (info->done) {
complete(info->done);
info->cmpt_code = NULL;
info->direct_resp = NULL;
info->err_code = NULL;
info->done = NULL;
}
}
static void sss_flush_ctrlq_cmd(struct sss_ctrlq *ctrlq)
{
u16 ci = 0;
spin_lock_bh(&ctrlq->ctrlq_lock);
while (sss_ctrlq_read_wqe(&ctrlq->wq, &ci)) {
sss_update_wq_ci(&ctrlq->wq, SSS_WQEBB_NUM_FOR_CTRLQ);
sss_flush_ctrlq_sync_cmd(&ctrlq->cmd_info[ci]);
}
spin_unlock_bh(&ctrlq->ctrlq_lock);
}
static void sss_free_all_ctrlq_cmd_buff(struct sss_ctrlq *ctrlq)
{
u16 i;
for (i = 0; i < ctrlq->wq.q_depth; i++)
sss_free_ctrlq_cmd_buf(SSS_TO_HWDEV(ctrlq), &ctrlq->cmd_info[i]);
}
static int sss_chip_set_ctrlq_ctx(struct sss_hwdev *hwdev, u8 qid,
struct sss_ctrlq_ctxt_info *ctxt)
{
int ret;
struct sss_cmd_ctrlq_ctxt cmd_ctx = {0};
u16 out_len = sizeof(cmd_ctx);
memcpy(&cmd_ctx.ctxt, ctxt, sizeof(*ctxt));
cmd_ctx.ctrlq_id = qid;
cmd_ctx.func_id = sss_get_global_func_id(hwdev);
ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_SET_CTRLQ_CTXT,
&cmd_ctx, sizeof(cmd_ctx), &cmd_ctx, &out_len);
if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_ctx)) {
sdk_err(hwdev->dev_hdl,
"Fail to set ctrlq ctx, ret: %d, status: 0x%x, out_len: 0x%x\n",
ret, cmd_ctx.head.state, out_len);
return -EFAULT;
}
return 0;
}
static int sss_init_ctrlq_ctx(struct sss_hwdev *hwdev)
{
u8 q_type;
int ret;
struct sss_ctrlq_info *ctrlq_info = hwdev->ctrlq_info;
for (q_type = 0; q_type < ctrlq_info->num; q_type++) {
ret = sss_chip_set_ctrlq_ctx(hwdev, q_type, &ctrlq_info->ctrlq[q_type].ctrlq_ctxt);
if (ret != 0)
return ret;
}
ctrlq_info->disable_flag = 0;
ctrlq_info->state |= SSS_CTRLQ_ENABLE;
return 0;
}
int sss_reinit_ctrlq_ctx(struct sss_hwdev *hwdev)
{
u8 ctrlq_type;
struct sss_ctrlq_info *ctrlq_info = hwdev->ctrlq_info;
for (ctrlq_type = 0; ctrlq_type < ctrlq_info->num; ctrlq_type++) {
sss_flush_ctrlq_cmd(&ctrlq_info->ctrlq[ctrlq_type]);
sss_free_all_ctrlq_cmd_buff(&ctrlq_info->ctrlq[ctrlq_type]);
ctrlq_info->ctrlq[ctrlq_type].wrapped = 1;
sss_wq_reset(&ctrlq_info->ctrlq[ctrlq_type].wq);
}
return sss_init_ctrlq_ctx(hwdev);
}
static int sss_init_ctrlq(struct sss_hwdev *hwdev)
{
u8 i;
u8 q_type;
int ret = -ENOMEM;
struct sss_ctrlq_info *ctrlq_info = NULL;
ctrlq_info = kzalloc(sizeof(*ctrlq_info), GFP_KERNEL);
if (!ctrlq_info)
return -ENOMEM;
ctrlq_info->hwdev = hwdev;
hwdev->ctrlq_info = ctrlq_info;
if (SSS_SUPPORT_CTRLQ_NUM(hwdev)) {
ctrlq_info->num = hwdev->glb_attr.ctrlq_num;
if (hwdev->glb_attr.ctrlq_num > SSS_MAX_CTRLQ_TYPE) {
sdk_warn(hwdev->dev_hdl, "Adjust ctrlq num to %d\n", SSS_MAX_CTRLQ_TYPE);
ctrlq_info->num = SSS_MAX_CTRLQ_TYPE;
}
} else {
ctrlq_info->num = SSS_MAX_CTRLQ_TYPE;
}
ctrlq_info->msg_buf_pool = dma_pool_create(SSS_CTRLQ_DMA_POOL_NAME, hwdev->dev_hdl,
SSS_CTRLQ_BUF_LEN, SSS_CTRLQ_BUF_LEN, 0ULL);
if (!ctrlq_info->msg_buf_pool) {
sdk_err(hwdev->dev_hdl, "Fail to create ctrlq buffer pool\n");
goto create_pool_err;
}
ret = sss_create_ctrlq_wq(ctrlq_info);
if (ret != 0)
goto create_wq_err;
ret = sss_alloc_db_addr(hwdev, (void __iomem *)&ctrlq_info->db_base);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to alloc doorbell addr\n");
goto init_db_err;
}
for (q_type = 0; q_type < ctrlq_info->num; q_type++) {
ctrlq_info->ctrlq[q_type].hwdev = hwdev;
ctrlq_info->ctrlq[q_type].ctrlq_type = q_type;
ret = sss_init_ctrlq_info(&ctrlq_info->ctrlq[q_type],
&ctrlq_info->ctrlq[q_type].ctrlq_ctxt,
ctrlq_info->wq_block_paddr);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init ctrlq i :%d\n", q_type);
goto init_ctrlq_info_err;
}
}
ret = sss_init_ctrlq_ctx(hwdev);
if (ret != 0)
goto init_ctrlq_info_err;
return 0;
init_ctrlq_info_err:
for (i = 0; i < q_type; i++)
sss_deinit_ctrlq_info(&ctrlq_info->ctrlq[i]);
sss_free_db_addr(hwdev, ctrlq_info->db_base);
init_db_err:
sss_destroy_ctrlq_wq(ctrlq_info);
create_wq_err:
dma_pool_destroy(ctrlq_info->msg_buf_pool);
create_pool_err:
kfree(ctrlq_info);
hwdev->ctrlq_info = NULL;
return ret;
}
void sss_deinit_ctrlq(struct sss_hwdev *hwdev)
{
u8 i;
struct sss_ctrlq_info *ctrlq_info = hwdev->ctrlq_info;
ctrlq_info->state &= ~SSS_CTRLQ_ENABLE;
for (i = 0; i < ctrlq_info->num; i++) {
sss_flush_ctrlq_cmd(&ctrlq_info->ctrlq[i]);
sss_free_all_ctrlq_cmd_buff(&ctrlq_info->ctrlq[i]);
sss_deinit_ctrlq_info(&ctrlq_info->ctrlq[i]);
}
sss_free_db_addr(hwdev, ctrlq_info->db_base);
sss_destroy_ctrlq_wq(ctrlq_info);
dma_pool_destroy(ctrlq_info->msg_buf_pool);
kfree(ctrlq_info);
hwdev->ctrlq_info = NULL;
}
static int sss_set_ctrlq_depth(void *hwdev)
{
int ret;
struct sss_cmd_root_ctxt cmd_ctx = {0};
u16 out_len = sizeof(cmd_ctx);
cmd_ctx.set_ctrlq_depth = 1;
cmd_ctx.ctrlq_depth = (u8)ilog2(SSS_CTRLQ_DEPTH);
cmd_ctx.func_id = sss_get_global_func_id(hwdev);
ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_SET_VAT, &cmd_ctx,
sizeof(cmd_ctx), &cmd_ctx, &out_len);
if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_ctx)) {
sdk_err(SSS_TO_DEV(hwdev),
"Fail to set ctrlq depth, ret: %d, status: 0x%x, out_len: 0x%x\n",
ret, cmd_ctx.head.state, out_len);
return -EFAULT;
}
return 0;
}
static int sss_hwif_init_ctrlq(struct sss_hwdev *hwdev)
{
int ret;
ret = sss_init_ctrlq(hwdev);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init ctrlq\n");
return ret;
}
sss_ceq_register_cb(hwdev, hwdev, SSS_NIC_CTRLQ, sss_ctrlq_ceq_handler);
ret = sss_set_ctrlq_depth(hwdev);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to set ctrlq depth\n");
goto set_depth_err;
}
set_bit(SSS_HW_CTRLQ_INIT_OK, &hwdev->func_state);
return 0;
set_depth_err:
sss_deinit_ctrlq(hwdev);
return ret;
}
static void sss_hwif_deinit_ctrlq(struct sss_hwdev *hwdev)
{
spin_lock_bh(&hwdev->channel_lock);
clear_bit(SSS_HW_CTRLQ_INIT_OK, &hwdev->func_state);
spin_unlock_bh(&hwdev->channel_lock);
sss_ceq_unregister_cb(hwdev, SSS_NIC_CTRLQ);
sss_deinit_ctrlq(hwdev);
}
static bool sss_ctrlq_is_idle(struct sss_ctrlq *ctrlq)
{
return sss_wq_is_empty(&ctrlq->wq);
}
static enum sss_process_ret sss_check_ctrlq_stop_handler(void *priv_data)
{
struct sss_hwdev *hwdev = priv_data;
struct sss_ctrlq_info *ctrlq_info = hwdev->ctrlq_info;
enum sss_ctrlq_type ctrlq_type;
/* Stop waiting when card unpresent */
if (!hwdev->chip_present_flag)
return SSS_PROCESS_OK;
for (ctrlq_type = 0; ctrlq_type < ctrlq_info->num; ctrlq_type++) {
if (!sss_ctrlq_is_idle(&ctrlq_info->ctrlq[ctrlq_type]))
return SSS_PROCESS_DOING;
}
return SSS_PROCESS_OK;
}
static int sss_init_ctrlq_page_size(struct sss_hwdev *hwdev)
{
int ret;
if (wq_page_num > SSS_MAX_WQ_PAGE_NUM) {
sdk_info(hwdev->dev_hdl,
"Invalid wq_page_num %u out of range, adjust to %d\n",
wq_page_num, SSS_MAX_WQ_PAGE_NUM);
wq_page_num = SSS_MAX_WQ_PAGE_NUM;
}
hwdev->wq_page_size = SSS_GET_WQ_PAGE_SIZE(wq_page_num);
ret = sss_chip_set_wq_page_size(hwdev, sss_get_global_func_id(hwdev),
hwdev->wq_page_size);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to set wq page size\n");
return ret;
}
return 0;
}
static void sss_deinit_ctrlq_page_size(struct sss_hwdev *hwdev)
{
if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_VF)
sss_chip_set_wq_page_size(hwdev, sss_get_global_func_id(hwdev),
SSS_HW_WQ_PAGE_SIZE);
}
int sss_init_ctrlq_channel(struct sss_hwdev *hwdev)
{
int ret;
ret = sss_hwif_init_ceq(hwdev);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init hwdev ceq.\n");
return ret;
}
ret = sss_init_ceq_msix_attr(hwdev);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init ceq msix attr\n");
goto init_msix_err;
}
ret = sss_init_ctrlq_page_size(hwdev);
if (ret != 0)
goto init_size_err;
ret = sss_hwif_init_ctrlq(hwdev);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init hwif ctrlq\n");
goto init_ctrlq_err;
}
return 0;
init_ctrlq_err:
sss_deinit_ctrlq_page_size(hwdev);
init_size_err:
init_msix_err:
sss_hwif_deinit_ceq(hwdev);
return ret;
}
void sss_deinit_ctrlq_channel(struct sss_hwdev *hwdev)
{
sss_hwif_deinit_ctrlq(hwdev);
sss_deinit_ctrlq_page_size(hwdev);
sss_hwif_deinit_ceq(hwdev);
}
void sss_ctrlq_flush_sync_cmd(struct sss_hwdev *hwdev)
{
u16 cnt;
u16 ci;
u16 i;
u16 id;
struct sss_wq *wq = NULL;
struct sss_ctrlq *ctrlq = NULL;
struct sss_ctrlq_cmd_info *info = NULL;
ctrlq = &hwdev->ctrlq_info->ctrlq[SSS_CTRLQ_SYNC];
spin_lock_bh(&ctrlq->ctrlq_lock);
wq = &ctrlq->wq;
id = wq->pi + wq->q_depth - wq->ci;
cnt = (u16)SSS_WQ_MASK_ID(wq, id);
ci = wq->ci;
for (i = 0; i < cnt; i++) {
info = &ctrlq->cmd_info[SSS_WQ_MASK_ID(wq, ci + i)];
sss_flush_ctrlq_sync_cmd(info);
}
spin_unlock_bh(&ctrlq->ctrlq_lock);
}
int sss_wait_ctrlq_stop(struct sss_hwdev *hwdev)
{
enum sss_ctrlq_type ctrlq_type;
struct sss_ctrlq_info *ctrlq_info = hwdev->ctrlq_info;
int ret;
if (!(ctrlq_info->state & SSS_CTRLQ_ENABLE))
return 0;
ctrlq_info->state &= ~SSS_CTRLQ_ENABLE;
ret = sss_check_handler_timeout(hwdev, sss_check_ctrlq_stop_handler,
SSS_CTRLQ_IDLE_TIMEOUT, USEC_PER_MSEC);
if (ret == 0)
return 0;
for (ctrlq_type = 0; ctrlq_type < ctrlq_info->num; ctrlq_type++) {
if (!sss_ctrlq_is_idle(&ctrlq_info->ctrlq[ctrlq_type]))
sdk_err(hwdev->dev_hdl, "Ctrlq %d is busy\n", ctrlq_type);
}
ctrlq_info->state |= SSS_CTRLQ_ENABLE;
return ret;
}

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HWIF_CTRLQ_INIT_H
#define SSS_HWIF_CTRLQ_INIT_H
#include "sss_hwdev.h"
int sss_init_ctrlq_channel(struct sss_hwdev *hwdev);
void sss_deinit_ctrlq_channel(struct sss_hwdev *hwdev);
int sss_reinit_ctrlq_ctx(struct sss_hwdev *hwdev);
int sss_wait_ctrlq_stop(struct sss_hwdev *hwdev);
void sss_ctrlq_flush_sync_cmd(struct sss_hwdev *hwdev);
#endif

View File

@ -0,0 +1,355 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include "sss_kernel.h"
#include "sss_hw.h"
#include "sss_common.h"
#include "sss_hwdev.h"
#include "sss_hwif_api.h"
#include "sss_csr.h"
#include "sss_hwif_eq.h"
#define SSS_EQ_CI_SIMPLE_INDIR_CI_SHIFT 0
#define SSS_EQ_CI_SIMPLE_INDIR_ARMED_SHIFT 21
#define SSS_EQ_CI_SIMPLE_INDIR_AEQ_ID_SHIFT 30
#define SSS_EQ_CI_SIMPLE_INDIR_CEQ_ID_SHIFT 24
#define SSS_EQ_CI_SIMPLE_INDIR_CI_MASK 0x1FFFFFU
#define SSS_EQ_CI_SIMPLE_INDIR_ARMED_MASK 0x1U
#define SSS_EQ_CI_SIMPLE_INDIR_AEQ_ID_MASK 0x3U
#define SSS_EQ_CI_SIMPLE_INDIR_CEQ_ID_MASK 0xFFU
#define SSS_SET_EQ_CI_SIMPLE_INDIR(val, member) \
(((val) & SSS_EQ_CI_SIMPLE_INDIR_##member##_MASK) << \
SSS_EQ_CI_SIMPLE_INDIR_##member##_SHIFT)
#define SSS_EQ_WRAPPED_SHIFT 20
#define SSS_EQ_CI(eq) ((eq)->ci | \
((u32)(eq)->wrap << SSS_EQ_WRAPPED_SHIFT))
#define SSS_EQ_CI_SIMPLE_INDIR_REG_ADDR(eq) \
(((eq)->type == SSS_AEQ) ? \
SSS_CSR_AEQ_CI_SIMPLE_INDIR_ADDR : \
SSS_CSR_CEQ_CI_SIMPLE_INDIR_ADDR)
#define SSS_EQ_HI_PHYS_ADDR_REG(type, pg_num) \
((u32)((type == SSS_AEQ) ? \
SSS_AEQ_PHY_HI_ADDR_REG(pg_num) : \
SSS_CEQ_PHY_HI_ADDR_REG(pg_num)))
#define SSS_EQ_LO_PHYS_ADDR_REG(type, pg_num) \
((u32)((type == SSS_AEQ) ? \
SSS_AEQ_PHY_LO_ADDR_REG(pg_num) : \
SSS_CEQ_PHY_LO_ADDR_REG(pg_num)))
#define SSS_GET_EQ_PAGES_NUM(eq, size) \
((u16)(ALIGN((u32)((eq)->len * (eq)->entry_size), \
(size)) / (size)))
#define SSS_GET_EQ_MAX_PAGES(eq) \
((eq)->type == SSS_AEQ ? SSS_AEQ_MAX_PAGE : \
SSS_CEQ_MAX_PAGE)
#define SSS_GET_EQE_NUM(eq, pg_size) ((pg_size) / (u32)(eq)->entry_size)
#define SSS_EQE_NUM_IS_ALIGN(eq) ((eq)->num_entry_per_pg & ((eq)->num_entry_per_pg - 1))
void sss_chip_set_eq_ci(struct sss_eq *eq, u32 arm_state)
{
u32 val;
if (eq->qid != 0 && SSS_TO_HWDEV(eq)->poll)
arm_state = SSS_EQ_NOT_ARMED;
val = SSS_SET_EQ_CI_SIMPLE_INDIR(arm_state, ARMED) |
SSS_SET_EQ_CI_SIMPLE_INDIR(SSS_EQ_CI(eq), CI);
if (eq->type == SSS_AEQ)
val |= SSS_SET_EQ_CI_SIMPLE_INDIR(eq->qid, AEQ_ID);
else
val |= SSS_SET_EQ_CI_SIMPLE_INDIR(eq->qid, CEQ_ID);
sss_chip_write_reg(SSS_TO_HWDEV(eq)->hwif, SSS_EQ_CI_SIMPLE_INDIR_REG_ADDR(eq), val);
}
static void sss_chip_set_eq_page_addr(struct sss_eq *eq,
u16 page_id, struct sss_dma_addr_align *dma_addr)
{
u32 addr;
addr = SSS_EQ_HI_PHYS_ADDR_REG(eq->type, page_id);
sss_chip_write_reg(SSS_TO_HWDEV(eq)->hwif, addr,
upper_32_bits(dma_addr->align_paddr));
addr = SSS_EQ_LO_PHYS_ADDR_REG(eq->type, page_id);
sss_chip_write_reg(SSS_TO_HWDEV(eq)->hwif, addr,
lower_32_bits(dma_addr->align_paddr));
}
static int sss_chip_init_eq_attr(struct sss_eq *eq)
{
u32 i;
int ret;
for (i = 0; i < eq->page_num; i++)
sss_chip_set_eq_page_addr(eq, i, &eq->page_array[i]);
ret = eq->init_attr_handler(eq);
if (ret != 0)
return ret;
sss_chip_set_eq_ci(eq, SSS_EQ_ARMED);
return 0;
}
static u32 sss_init_eqe_desc(struct sss_eq *eq)
{
eq->num_entry_per_pg = SSS_GET_EQE_NUM(eq, eq->page_size);
if (SSS_EQE_NUM_IS_ALIGN(eq)) {
sdk_err(SSS_TO_HWDEV(eq)->dev_hdl, "Number element in eq page is not align\n");
return -EINVAL;
}
eq->init_desc_handler(eq);
return 0;
}
static int sss_alloc_eq_dma_page(struct sss_eq *eq, u16 id)
{
int ret;
ret = sss_dma_zalloc_coherent_align(SSS_TO_HWDEV(eq)->dev_hdl, eq->page_size,
SSS_MIN_EQ_PAGE_SIZE, GFP_KERNEL, &eq->page_array[id]);
if (ret != 0) {
sdk_err(SSS_TO_HWDEV(eq)->dev_hdl, "Alloc eq page fail, pg index: %hu\n", id);
return ret;
}
return 0;
}
static void sss_free_eq_dma_page(struct sss_eq *eq, u16 max_id)
{
int i;
for (i = 0; i < max_id; i++)
sss_dma_free_coherent_align(SSS_TO_DEV(eq->hwdev), &eq->page_array[i]);
}
static int sss_alloc_eq_page(struct sss_eq *eq)
{
u16 page_id;
int ret;
struct sss_hwdev *hwdev = SSS_TO_HWDEV(eq);
eq->page_array = kcalloc(eq->page_num, sizeof(*eq->page_array), GFP_KERNEL);
if (!eq->page_array)
return -ENOMEM;
for (page_id = 0; page_id < eq->page_num; page_id++) {
ret = sss_alloc_eq_dma_page(eq, page_id);
if (ret != 0)
goto alloc_dma_err;
}
ret = sss_init_eqe_desc(eq);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init eqe\n");
goto alloc_dma_err;
}
return 0;
alloc_dma_err:
sss_free_eq_dma_page(eq, page_id);
kfree(eq->page_array);
eq->page_array = NULL;
return ret;
}
static void sss_free_eq_page(struct sss_eq *eq)
{
u16 i;
struct sss_hwdev *hwdev = SSS_TO_HWDEV(eq);
for (i = 0; i < eq->page_num; i++)
sss_dma_free_coherent_align(hwdev->dev_hdl, &eq->page_array[i]);
kfree(eq->page_array);
eq->page_array = NULL;
}
static inline u32 sss_get_eq_page_size(const struct sss_eq *eq)
{
u32 total_size;
u32 count;
total_size = ALIGN((eq->len * eq->entry_size),
SSS_MIN_EQ_PAGE_SIZE);
if (total_size <= (SSS_GET_EQ_MAX_PAGES(eq) * SSS_MIN_EQ_PAGE_SIZE))
return SSS_MIN_EQ_PAGE_SIZE;
count = (u32)(ALIGN((total_size / SSS_GET_EQ_MAX_PAGES(eq)),
SSS_MIN_EQ_PAGE_SIZE) / SSS_MIN_EQ_PAGE_SIZE);
/* round up to nearest power of two */
count = 1U << (u8)fls((int)(count - 1));
return ((u32)SSS_MIN_EQ_PAGE_SIZE) * count;
}
static int sss_request_eq_irq(struct sss_eq *eq, struct sss_irq_desc *entry)
{
struct pci_dev *pdev = SSS_TO_HWDEV(eq)->pcidev_hdl;
snprintf(eq->irq_name, sizeof(eq->irq_name), "%s%u@pci:%s",
eq->name, eq->qid, pci_name(pdev));
return request_irq(entry->irq_id, eq->irq_handler, 0UL, eq->irq_name, eq);
}
static void sss_chip_reset_eq(struct sss_eq *eq)
{
struct sss_hwdev *hwdev = eq->hwdev;
struct sss_hwif *hwif = hwdev->hwif;
sss_chip_write_reg(hwif, SSS_EQ_INDIR_ID_ADDR(eq->type), eq->qid);
/* make sure set qid firstly*/
wmb();
if (eq->type == SSS_AEQ)
sss_chip_write_reg(hwif, SSS_CSR_AEQ_CTRL_1_ADDR, 0);
else
sss_chip_set_ceq_attr(hwdev, eq->qid, 0, 0);
/* make sure write ctrl reg secondly */
wmb();
sss_chip_write_reg(hwif, SSS_EQ_PI_REG_ADDR(eq), 0);
}
static int sss_init_eq_page_size(struct sss_eq *eq)
{
eq->page_size = sss_get_eq_page_size(eq);
eq->old_page_size = eq->page_size;
eq->page_num = SSS_GET_EQ_PAGES_NUM(eq, eq->page_size);
if (eq->page_num > SSS_GET_EQ_MAX_PAGES(eq)) {
sdk_err(SSS_TO_HWDEV(eq)->dev_hdl, "Number pages: %u too many pages for eq\n",
eq->page_num);
return -EINVAL;
}
return 0;
}
void sss_increase_eq_ci(struct sss_eq *eq)
{
if (!eq)
return;
eq->ci++;
if (eq->ci == eq->len) {
eq->ci = 0;
eq->wrap = !eq->wrap;
}
}
int sss_init_eq(struct sss_hwdev *hwdev, struct sss_eq *eq,
struct sss_irq_desc *entry)
{
int ret = 0;
eq->hwdev = hwdev;
eq->irq_desc.irq_id = entry->irq_id;
eq->irq_desc.msix_id = entry->msix_id;
ret = sss_init_eq_page_size(eq);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init eq params\n");
return ret;
}
ret = sss_alloc_eq_page(eq);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to alloc eq page\n");
return ret;
}
sss_chip_reset_eq(eq);
ret = sss_chip_init_eq_attr(eq);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init eq attr\n");
goto out;
}
ret = sss_request_eq_irq(eq, entry);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to request eq irq, err: %d\n", ret);
goto out;
}
sss_chip_set_msix_state(hwdev, SSS_EQ_IRQ_ID(eq), SSS_MSIX_DISABLE);
return 0;
out:
sss_free_eq_page(eq);
return ret;
}
void sss_deinit_eq(struct sss_eq *eq)
{
struct sss_irq_desc *irq = &eq->irq_desc;
sss_chip_set_msix_state(SSS_TO_HWDEV(eq), SSS_EQ_IRQ_ID(eq), SSS_MSIX_DISABLE);
synchronize_irq(irq->irq_id);
free_irq(irq->irq_id, eq);
sss_chip_write_reg(SSS_TO_HWDEV(eq)->hwif, SSS_EQ_INDIR_ID_ADDR(eq->type), eq->qid);
/* make sure disable msix */
wmb();
if (eq->type == SSS_AEQ) {
cancel_work_sync(&eq->aeq_work);
sss_chip_write_reg(SSS_TO_HWDEV(eq)->hwif, SSS_CSR_AEQ_CTRL_1_ADDR, 0);
} else {
tasklet_kill(&eq->ceq_tasklet);
sss_chip_set_ceq_attr(SSS_TO_HWDEV(eq), eq->qid, 0, 0);
}
eq->ci = sss_chip_read_reg(SSS_TO_HWDEV(eq)->hwif, SSS_EQ_PI_REG_ADDR(eq));
sss_chip_set_eq_ci(eq, SSS_EQ_NOT_ARMED);
sss_free_eq_page(eq);
}
void sss_init_eq_intr_info(struct sss_irq_cfg *intr_info)
{
intr_info->coalesc_intr_set = SSS_EQ_INTR_COALESC;
intr_info->coalesc_timer = SSS_EQ_INTR_COALESC_TIMER_CFG;
intr_info->resend_timer = SSS_EQ_INTR_RESEND_TIMER_CFG;
}

View File

@ -0,0 +1,91 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HWIF_EQ_H
#define SSS_HWIF_EQ_H
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include "sss_hw.h"
#include "sss_hwdev.h"
#include "sss_csr.h"
#define SSS_EQ_UPDATE_CI_STEP 64
#define SSS_TASK_PROCESS_EQE_LIMIT 1024
#define SSS_MIN_EQ_PAGE_SIZE 0x1000 /* min eq page size 4K Bytes */
#define SSS_MAX_EQ_PAGE_SIZE 0x400000 /* max eq page size 4M Bytes */
#define SSS_EQ_USLEEP_LOW_LIMIT 900
#define SSS_EQ_USLEEP_HIG_LIMIT 1000
#define SSS_EQ_IRQ_ID(eq) ((eq)->irq_desc.msix_id)
#define SSS_GET_EQ_ELEM(eq, id) \
(((u8 *)(eq)->page_array[(id) / (eq)->num_entry_per_pg].align_vaddr) + \
(u32)(((id) & ((eq)->num_entry_per_pg - 1)) * (eq)->entry_size))
#define SSS_EQ_VALID_SHIFT 31
#define SSS_EQ_WRAPPED(eq) ((u32)(eq)->wrap << SSS_EQ_VALID_SHIFT)
#define SSS_AEQ_MAX_PAGE 4
#define SSS_CEQ_MAX_PAGE 8
#define SSS_AEQE_SIZE 64
#define SSS_CEQE_SIZE 4
#define SSS_EQ_CI_REG_ADDR(eq) \
(((eq)->type == SSS_AEQ) ? \
SSS_CSR_AEQ_CI_ADDR : SSS_CSR_CEQ_CI_ADDR)
#define SSS_EQ_PI_REG_ADDR(eq) \
(((eq)->type == SSS_AEQ) ? \
SSS_CSR_AEQ_PI_ADDR : SSS_CSR_CEQ_PI_ADDR)
#define SSS_EQ_MSIX_RESEND_TIMER_CLEAR 1
#define SSS_EQ_ELEM_DESC_TYPE_SHIFT 0
#define SSS_EQ_ELEM_DESC_SRC_SHIFT 7
#define SSS_EQ_ELEM_DESC_SIZE_SHIFT 8
#define SSS_EQ_ELEM_DESC_WRAPPED_SHIFT 31
#define SSS_EQ_ELEM_DESC_TYPE_MASK 0x7FU
#define SSS_EQ_ELEM_DESC_SRC_MASK 0x1U
#define SSS_EQ_ELEM_DESC_SIZE_MASK 0xFFU
#define SSS_EQ_ELEM_DESC_WRAPPED_MASK 0x1U
#define SSS_GET_EQE_DESC(val, member) \
(((val) >> SSS_EQ_ELEM_DESC_##member##_SHIFT) & \
SSS_EQ_ELEM_DESC_##member##_MASK)
#define SSS_PAGE_IN_4K(page_size) ((page_size) >> 12)
#define SSS_SET_EQ_HW_PAGE_SIZE(eq) ((u32)ilog2(SSS_PAGE_IN_4K((eq)->page_size)))
enum sss_eq_intr_mode {
SSS_INTR_MODE_ARMED,
SSS_INTR_MODE_ALWAY,
};
enum sss_eq_ci_arm_state {
SSS_EQ_NOT_ARMED,
SSS_EQ_ARMED,
};
#define SSS_EQ_ARM_STATE(unfinish) \
((unfinish) ? SSS_EQ_NOT_ARMED : SSS_EQ_ARMED)
#define SSS_EQ_INTR_COALESC 1
#define SSS_EQ_INTR_COALESC_TIMER_CFG 0xFF
#define SSS_EQ_INTR_RESEND_TIMER_CFG 7
void sss_increase_eq_ci(struct sss_eq *eq);
int sss_init_eq(struct sss_hwdev *hwdev, struct sss_eq *eq,
struct sss_irq_desc *entry);
void sss_deinit_eq(struct sss_eq *eq);
void sss_chip_set_eq_ci(struct sss_eq *eq, u32 arm_state);
void sss_init_eq_intr_info(struct sss_irq_cfg *intr_info);
#endif

View File

@ -0,0 +1,147 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <linux/types.h>
#include <linux/module.h>
#include "sss_kernel.h"
#include "sss_hw_irq.h"
#include "sss_csr.h"
#include "sss_hwdev.h"
#include "sss_hwif_api.h"
int sss_alloc_db_addr(void *hwdev, void __iomem **db_base)
{
struct sss_hwif *hwif = NULL;
u32 id = 0;
int ret;
if (!hwdev || !db_base)
return -EINVAL;
hwif = SSS_TO_HWIF(hwdev);
ret = sss_alloc_db_id(hwif, &id);
if (ret != 0)
return -EFAULT;
*db_base = hwif->db_base_vaddr + id * SSS_DB_PAGE_SIZE;
return 0;
}
EXPORT_SYMBOL(sss_alloc_db_addr);
void sss_free_db_addr(void *hwdev, const void __iomem *db_base)
{
struct sss_hwif *hwif = NULL;
u32 id;
if (!hwdev || !db_base)
return;
hwif = SSS_TO_HWIF(hwdev);
id = SSS_DB_ID(db_base, hwif->db_base_vaddr);
sss_free_db_id(hwif, id);
}
EXPORT_SYMBOL(sss_free_db_addr);
void sss_chip_set_msix_auto_mask(void *hwdev, u16 msix_id,
enum sss_msix_auto_mask flag)
{
u32 val;
if (!hwdev)
return;
val = (flag == SSS_CLR_MSIX_AUTO_MASK) ?
SSS_SET_MSI_CLR_INDIR(1, AUTO_MSK_CLR) :
SSS_SET_MSI_CLR_INDIR(1, AUTO_MSK_SET);
val |= SSS_SET_MSI_CLR_INDIR(msix_id, SIMPLE_INDIR_ID);
sss_chip_write_reg(SSS_TO_HWIF(hwdev), SSS_CSR_FUNC_MSI_CLR_WR_ADDR, val);
}
EXPORT_SYMBOL(sss_chip_set_msix_auto_mask);
void sss_chip_set_msix_state(void *hwdev, u16 msix_id,
enum sss_msix_state flag)
{
u32 val;
if (!hwdev)
return;
val = (flag == SSS_MSIX_ENABLE) ? SSS_SET_MSI_CLR_INDIR(1, INT_MSK_CLR) :
SSS_SET_MSI_CLR_INDIR(1, INT_MSK_SET);
val |= SSS_SET_MSI_CLR_INDIR(msix_id, SIMPLE_INDIR_ID);
sss_chip_write_reg(SSS_TO_HWIF(hwdev), SSS_CSR_FUNC_MSI_CLR_WR_ADDR, val);
}
EXPORT_SYMBOL(sss_chip_set_msix_state);
u16 sss_get_global_func_id(void *hwdev)
{
if (!hwdev)
return 0;
return SSS_GET_HWIF_GLOBAL_ID(SSS_TO_HWIF(hwdev));
}
EXPORT_SYMBOL(sss_get_global_func_id);
u8 sss_get_pf_id_of_vf(void *hwdev)
{
if (!hwdev)
return 0;
return SSS_GET_HWIF_PF_ID(SSS_TO_HWIF(hwdev));
}
EXPORT_SYMBOL(sss_get_pf_id_of_vf);
u8 sss_get_pcie_itf_id(void *hwdev)
{
if (!hwdev)
return 0;
return SSS_GET_HWIF_PCI_INTF_ID(SSS_TO_HWIF(hwdev));
}
EXPORT_SYMBOL(sss_get_pcie_itf_id);
enum sss_func_type sss_get_func_type(void *hwdev)
{
if (!hwdev)
return 0;
return SSS_GET_FUNC_TYPE((struct sss_hwdev *)hwdev);
}
EXPORT_SYMBOL(sss_get_func_type);
enum sss_func_type sss_get_func_id(void *hwdev)
{
if (!hwdev)
return 0;
return SSS_GET_FUNC_ID((struct sss_hwdev *)hwdev);
}
EXPORT_SYMBOL(sss_get_func_id);
u16 sss_get_glb_pf_vf_offset(void *hwdev)
{
if (!hwdev)
return 0;
return SSS_GET_HWIF_GLOBAL_VF_OFFSET(SSS_TO_HWIF(hwdev));
}
EXPORT_SYMBOL(sss_get_glb_pf_vf_offset);
u8 sss_get_ppf_id(void *hwdev)
{
if (!hwdev)
return 0;
return SSS_GET_HWIF_PPF_ID(SSS_TO_HWIF(hwdev));
}
EXPORT_SYMBOL(sss_get_ppf_id);

View File

@ -0,0 +1,413 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/module.h>
#include "sss_kernel.h"
#include "sss_hw.h"
#include "sss_csr.h"
#include "sss_common.h"
#include "sss_hwdev.h"
#include "sss_hwif_init.h"
#include "sss_hwif_api.h"
#define SSS_WAIT_CHIP_READY_TIMEOUT 10000
#define SSS_WAIT_DB_READY_TIMEOUT 60000
#define SSS_MAX_MSIX_ENTRY 2048
#define SSS_AF0_FUNC_GLOBAL_ID_SHIFT 0
#define SSS_AF0_PF_ID_SHIFT 12
#define SSS_AF0_PCI_INTF_ID_SHIFT 17
#define SSS_AF0_VF_IN_PF_SHIFT 20
#define SSS_AF0_FUNC_TYPE_SHIFT 28
#define SSS_AF0_FUNC_GLOBAL_ID_MASK 0xFFF
#define SSS_AF0_PF_ID_MASK 0x1F
#define SSS_AF0_PCI_INTF_ID_MASK 0x7
#define SSS_AF0_VF_IN_PF_MASK 0xFF
#define SSS_AF0_FUNC_TYPE_MASK 0x1
#define SSS_GET_AF0(val, member) \
(((val) >> SSS_AF0_##member##_SHIFT) & SSS_AF0_##member##_MASK)
#define SSS_AF2_CEQ_PER_FUNC_SHIFT 0
#define SSS_AF2_DMA_ATTR_PER_FUNC_SHIFT 9
#define SSS_AF2_IRQ_PER_FUNC_SHIFT 16
#define SSS_AF2_CEQ_PER_FUNC_MASK 0x1FF
#define SSS_AF2_DMA_ATTR_PER_FUNC_MASK 0x7
#define SSS_AF2_IRQ_PER_FUNC_MASK 0x7FF
#define SSS_GET_AF2(val, member) \
(((val) >> SSS_AF2_##member##_SHIFT) & SSS_AF2_##member##_MASK)
#define SSS_AF3_GLOBAL_VF_ID_OF_NXT_PF_SHIFT 0
#define SSS_AF3_GLOBAL_VF_ID_OF_PF_SHIFT 16
#define SSS_AF3_GLOBAL_VF_ID_OF_NXT_PF_MASK 0xFFF
#define SSS_AF3_GLOBAL_VF_ID_OF_PF_MASK 0xFFF
#define SSS_GET_AF3(val, member) \
(((val) >> SSS_AF3_##member##_SHIFT) & SSS_AF3_##member##_MASK)
#define SSS_AF5_OUTBOUND_CTRL_SHIFT 0
#define SSS_AF5_OUTBOUND_CTRL_MASK 0x1
#define SSS_GET_AF5(val, member) \
(((val) >> SSS_AF5_##member##_SHIFT) & SSS_AF5_##member##_MASK)
#define SSS_SET_AF5(val, member) \
(((val) & SSS_AF5_##member##_MASK) << SSS_AF5_##member##_SHIFT)
#define SSS_CLEAR_AF5(val, member) \
((val) & (~(SSS_AF5_##member##_MASK << SSS_AF5_##member##_SHIFT)))
#define SSS_MPF_ELECTION_ID_SHIFT 0
#define SSS_MPF_ELECTION_ID_MASK 0x1F
#define SSS_SET_MPF(val, member) \
(((val) & SSS_MPF_ELECTION_##member##_MASK) << \
SSS_MPF_ELECTION_##member##_SHIFT)
#define SSS_GET_MPF(val, member) \
(((val) >> SSS_MPF_ELECTION_##member##_SHIFT) & \
SSS_MPF_ELECTION_##member##_MASK)
#define SSS_CLEAR_MPF(val, member) \
((val) & (~(SSS_MPF_ELECTION_##member##_MASK << \
SSS_MPF_ELECTION_##member##_SHIFT)))
static enum sss_process_ret sss_check_pcie_link_handle(void *data)
{
u32 status;
status = sss_chip_get_pcie_link_status(data);
if (status == SSS_PCIE_LINK_DOWN)
return SSS_PROCESS_ERR;
else if (status == SSS_PCIE_LINK_UP)
return SSS_PROCESS_OK;
return SSS_PROCESS_DOING;
}
static int sss_wait_pcie_link_up(struct sss_hwdev *hwdev)
{
int ret;
ret = sss_check_handler_timeout(hwdev, sss_check_pcie_link_handle,
SSS_WAIT_CHIP_READY_TIMEOUT, USEC_PER_MSEC);
if (ret == -ETIMEDOUT)
sdk_err(hwdev->dev_hdl, "Wait for chip ready timeout\n");
return ret;
}
static int sss_chip_get_func_attr0(struct sss_hwif *hwif)
{
u32 attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR0_ADDR);
if (attr == SSS_PCIE_LINK_DOWN)
return -EFAULT;
SSS_SET_HWIF_GLOBAL_ID(hwif, SSS_GET_AF0(attr, FUNC_GLOBAL_ID));
SSS_SET_HWIF_PF_ID(hwif, SSS_GET_AF0(attr, PF_ID));
SSS_SET_HWIF_PCI_INTF_ID(hwif, SSS_GET_AF0(attr, PCI_INTF_ID));
SSS_SET_HWIF_FUNC_TYPE(hwif, SSS_GET_AF0(attr, FUNC_TYPE));
return 0;
}
static int sss_chip_get_func_attr1(struct sss_hwif *hwif)
{
u32 attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR1_ADDR);
if (attr == SSS_PCIE_LINK_DOWN)
return -EFAULT;
SSS_SET_HWIF_PPF_ID(hwif, SSS_GET_AF1(attr, PPF_ID));
SSS_SET_HWIF_AEQ_NUM(hwif, BIT(SSS_GET_AF1(attr, AEQ_PER_FUNC)));
return 0;
}
static int sss_chip_get_func_attr2(struct sss_hwif *hwif)
{
u32 attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR2_ADDR);
if (attr == SSS_PCIE_LINK_DOWN)
return -EFAULT;
SSS_SET_HWIF_CEQ_NUM(hwif, (u8)SSS_GET_AF2(attr, CEQ_PER_FUNC));
SSS_SET_HWIF_IRQ_NUM(hwif, SSS_GET_AF2(attr, IRQ_PER_FUNC));
if (SSS_GET_HWIF_IRQ_NUM(hwif) > SSS_MAX_MSIX_ENTRY)
SSS_SET_HWIF_IRQ_NUM(hwif, SSS_MAX_MSIX_ENTRY);
SSS_SET_HWIF_DMA_ATTR_NUM(hwif, BIT(SSS_GET_AF2(attr, DMA_ATTR_PER_FUNC)));
return 0;
}
static int sss_chip_get_func_attr3(struct sss_hwif *hwif)
{
u32 attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR3_ADDR);
if (attr == SSS_PCIE_LINK_DOWN)
return -EFAULT;
SSS_SET_HWIF_GLOBAL_VF_OFFSET(hwif, SSS_GET_AF3(attr, GLOBAL_VF_ID_OF_PF));
return 0;
}
static int sss_chip_get_func_attr6(struct sss_hwif *hwif)
{
u32 attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR6_ADDR);
if (attr == SSS_PCIE_LINK_DOWN)
return -EFAULT;
SSS_SET_HWIF_SQ_NUM(hwif, SSS_GET_AF6(attr, FUNC_MAX_SQ));
SSS_SET_HWIF_MSIX_EN(hwif, SSS_GET_AF6(attr, MSIX_FLEX_EN));
return 0;
}
static int sss_hwif_init_func_attr(struct sss_hwif *hwif)
{
int ret;
ret = sss_chip_get_func_attr0(hwif);
if (ret != 0)
return ret;
ret = sss_chip_get_func_attr1(hwif);
if (ret != 0)
return ret;
ret = sss_chip_get_func_attr2(hwif);
if (ret != 0)
return ret;
ret = sss_chip_get_func_attr3(hwif);
if (ret != 0)
return ret;
ret = sss_chip_get_func_attr6(hwif);
if (ret != 0)
return ret;
return 0;
}
static void sss_chip_init_ppf(struct sss_hwif *hwif)
{
u32 val;
val = sss_chip_read_reg(hwif, SSS_CSR_PPF_ELECT_ADDR);
val = SSS_CLEAR_PPF(val, ID);
val |= SSS_SET_PPF(SSS_GET_HWIF_GLOBAL_ID(hwif), ID);
sss_chip_write_reg(hwif, SSS_CSR_PPF_ELECT_ADDR, val);
/* Check PPF */
val = sss_chip_read_reg(hwif, SSS_CSR_PPF_ELECT_ADDR);
SSS_SET_HWIF_PPF_ID(hwif, SSS_GET_PPF(val, ID));
if (SSS_GET_HWIF_PPF_ID(hwif) == SSS_GET_HWIF_GLOBAL_ID(hwif))
SSS_SET_HWIF_FUNC_TYPE(hwif, SSS_FUNC_TYPE_PPF);
}
static void sss_chip_get_mpf(struct sss_hwif *hwif)
{
u32 mpf;
mpf = sss_chip_read_reg(hwif, SSS_CSR_GLOBAL_MPF_ELECT_ADDR);
SSS_SET_HWIF_MPF_ID(hwif, SSS_GET_MPF(mpf, ID));
}
static void sss_chip_init_mpf(struct sss_hwif *hwif)
{
u32 val;
val = sss_chip_read_reg(hwif, SSS_CSR_GLOBAL_MPF_ELECT_ADDR);
val = SSS_CLEAR_MPF(val, ID);
val |= SSS_SET_MPF(SSS_GET_HWIF_GLOBAL_ID(hwif), ID);
sss_chip_write_reg(hwif, SSS_CSR_GLOBAL_MPF_ELECT_ADDR, val);
}
static int sss_hwif_alloc_db_pool(struct sss_hwif *hwif)
{
struct sss_db_pool *pool = &hwif->db_pool;
u32 bit_size;
bit_size = (hwif->db_dwqe_len > SSS_DB_DWQE_SIZE) ? SSS_DB_MAX_AREAS :
((u32)(hwif->db_dwqe_len / SSS_DB_PAGE_SIZE));
pool->bitmap = bitmap_zalloc(bit_size, GFP_KERNEL);
if (!pool->bitmap) {
pr_err("Fail to allocate db area.\n");
return -ENOMEM;
}
pool->bit_size = bit_size;
spin_lock_init(&pool->id_lock);
return 0;
}
static void sss_hwif_free_db_pool(struct sss_db_pool *pool)
{
kfree(pool->bitmap);
}
static void sss_chip_disable_all_msix(struct sss_hwdev *hwdev)
{
u16 i;
u16 irq_num = SSS_GET_HWIF_IRQ_NUM(hwdev->hwif);
for (i = 0; i < irq_num; i++)
sss_chip_set_msix_state(hwdev, i, SSS_MSIX_DISABLE);
}
static enum sss_process_ret sss_chip_check_db_ready(void *data)
{
int outbound_status;
int db_status;
struct sss_hwif *hwif = data;
u32 db_attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR4_ADDR);
u32 outband_attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR5_ADDR);
db_status = SSS_GET_AF4(db_attr, DOORBELL_CTRL);
outbound_status = SSS_GET_AF5(outband_attr, OUTBOUND_CTRL);
if (db_status == DB_ENABLE && outbound_status == OUTBOUND_ENABLE)
return SSS_PROCESS_OK;
return SSS_PROCESS_DOING;
}
static int sss_wait_db_ready(struct sss_hwif *hwif)
{
return sss_check_handler_timeout(hwif, sss_chip_check_db_ready,
SSS_WAIT_DB_READY_TIMEOUT, USEC_PER_MSEC);
}
static void sss_hwif_init_bar_base(struct sss_pci_adapter *adapter)
{
struct sss_hwif *hwif = SSS_TO_HWIF(adapter->hwdev);
hwif->db_dwqe_len = adapter->db_dwqe_len;
hwif->db_base_vaddr = adapter->db_reg_bar;
hwif->db_base_paddr = adapter->db_base_paddr;
hwif->mgmt_reg_base = adapter->mgmt_reg_bar;
hwif->cfg_reg_base = (adapter->mgmt_reg_bar) ?
adapter->cfg_reg_bar :
((u8 *)adapter->cfg_reg_bar + SSS_VF_CFG_REG_OFFSET);
}
static int sss_hwif_wait_chip_ready(struct sss_hwdev *hwdev)
{
int ret;
u32 db_attr;
u32 outband_attr;
ret = sss_wait_pcie_link_up(hwdev);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Pcie is not link up\n");
return ret;
}
ret = sss_wait_db_ready(hwdev->hwif);
if (ret != 0) {
db_attr = sss_chip_read_reg(hwdev->hwif, SSS_CSR_HW_ATTR4_ADDR);
outband_attr = sss_chip_read_reg(hwdev->hwif, SSS_CSR_HW_ATTR5_ADDR);
sdk_err(hwdev->dev_hdl, "Hw doorbell is disabled, db 0x%x outbound 0x%x\n",
db_attr, outband_attr);
return ret;
}
return 0;
}
static void sss_hwif_init_pf(struct sss_hwdev *hwdev)
{
struct sss_hwif *hwif = hwdev->hwif;
if (!SSS_IS_VF(hwdev)) {
sss_chip_init_ppf(hwif);
if (SSS_IS_PPF(hwdev))
sss_chip_init_mpf(hwif);
sss_chip_get_mpf(hwif);
}
sss_chip_disable_all_msix(hwdev);
sss_chip_set_pf_status(hwif, SSS_PF_STATUS_INIT);
sdk_info(hwdev->dev_hdl,
"Global_func_id: %u, func_type: %d, host_id: %u, ppf: %u, mpf: %u\n",
SSS_GET_HWIF_GLOBAL_ID(hwif), SSS_GET_HWIF_FUNC_TYPE(hwif),
SSS_GET_HWIF_PCI_INTF_ID(hwif), SSS_GET_HWIF_PPF_ID(hwif),
SSS_GET_HWIF_MPF_ID(hwif));
}
int sss_hwif_init(struct sss_pci_adapter *adapter)
{
struct sss_hwdev *hwdev = adapter->hwdev;
struct sss_hwif *hwif = NULL;
int ret;
hwif = kzalloc(sizeof(*hwif), GFP_KERNEL);
if (!hwif)
return -ENOMEM;
hwif->pdev = hwdev->pcidev_hdl;
hwdev->hwif = hwif;
sss_hwif_init_bar_base(adapter);
ret = sss_hwif_alloc_db_pool(hwif);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to init db pool.\n");
goto alloc_db_pool_err;
}
ret = sss_hwif_wait_chip_ready(hwdev);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Chip is not ready\n");
goto wait_chip_ready_err;
}
ret = sss_hwif_init_func_attr(hwif);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail init hwif attr\n");
goto wait_chip_ready_err;
}
sss_hwif_init_pf(hwdev);
return 0;
wait_chip_ready_err:
sss_dump_chip_err_info(hwdev);
sss_hwif_free_db_pool(&hwif->db_pool);
alloc_db_pool_err:
kfree(hwif);
hwdev->hwif = NULL;
return ret;
}
void sss_hwif_deinit(struct sss_hwdev *hwdev)
{
sss_hwif_free_db_pool(&hwdev->hwif->db_pool);
kfree(hwdev->hwif);
hwdev->hwif = NULL;
}

View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HWIF_INIT_H
#define SSS_HWIF_INIT_H
#include "sss_hwdev.h"
#include "sss_adapter.h"
int sss_hwif_init(struct sss_pci_adapter *adapter);
void sss_hwif_deinit(struct sss_hwdev *hwdev);
#endif

View File

@ -0,0 +1,125 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include "sss_kernel.h"
#include "sss_hwdev.h"
#include "sss_hw_svc_cap.h"
#define SSS_GET_NEED_IRQ_NUM(hwif, intr_num) \
(SSS_GET_HWIF_MSIX_EN(hwif) ? (SSS_GET_HWIF_AEQ_NUM(hwif) + \
SSS_GET_HWIF_CEQ_NUM(hwif) + (hwif)->attr.sq_num) : (intr_num))
#define SSS_MIN_VECTOR 2
static int sss_alloc_irq_info(struct sss_hwdev *hwdev)
{
u16 total_num = SSS_GET_HWIF_IRQ_NUM(hwdev->hwif);
u16 need_num = SSS_GET_NEED_IRQ_NUM(hwdev->hwif, total_num);
struct sss_mgmt_info *mgmt_info = hwdev->mgmt_info;
struct sss_irq_info *irq_info = &mgmt_info->irq_info;
if (total_num == 0) {
sdk_err(hwdev->dev_hdl, "Mgmt irq info: intr total_num = 0, msix_flex_en %d\n",
SSS_GET_HWIF_MSIX_EN(hwdev->hwif));
return -EFAULT;
}
if (need_num > total_num) {
sdk_warn(hwdev->dev_hdl, "Mgmt irq info: intr total_num %d < need_num %d, msix_flex_en %d\n",
total_num, need_num, SSS_GET_HWIF_MSIX_EN(hwdev->hwif));
need_num = total_num;
}
irq_info->irq = kcalloc(total_num, sizeof(*irq_info->irq), GFP_KERNEL);
if (!irq_info->irq)
return -ENOMEM;
irq_info->max_num = need_num;
return 0;
}
static void sss_free_irq_info(struct sss_hwdev *hwdev)
{
kfree(hwdev->mgmt_info->irq_info.irq);
hwdev->mgmt_info->irq_info.irq = NULL;
}
int sss_init_irq_info(struct sss_hwdev *hwdev)
{
u16 i = 0;
u16 irq_num;
int enable_irq_num;
int ret;
struct sss_mgmt_info *mgmt_info = hwdev->mgmt_info;
struct sss_irq *irq = NULL;
struct msix_entry *entry = NULL;
ret = sss_alloc_irq_info(hwdev);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to alloc irq info, err: %d\n", ret);
return ret;
}
irq_num = mgmt_info->irq_info.max_num;
entry = kcalloc(irq_num, sizeof(*entry), GFP_KERNEL);
if (!entry) {
sss_free_irq_info(hwdev);
return -ENOMEM;
}
for (i = 0; i < irq_num; i++)
entry[i].entry = i;
enable_irq_num = pci_enable_msix_range(hwdev->pcidev_hdl, entry,
SSS_MIN_VECTOR, irq_num);
if (enable_irq_num < 0) {
kfree(entry);
sss_free_irq_info(hwdev);
sdk_err(hwdev->dev_hdl, "Fail to alloc msix entries with threshold 2. enabled_irq: %d\n",
enable_irq_num);
return -ENOMEM;
}
irq_num = (u16)enable_irq_num;
mgmt_info->irq_info.total_num = irq_num;
mgmt_info->irq_info.free_num = irq_num;
mgmt_info->svc_cap.intr_type = SSS_INTR_TYPE_MSIX;
irq = mgmt_info->irq_info.irq;
for (i = 0; i < irq_num; i++) {
irq[i].desc.msix_id = entry[i].entry;
irq[i].desc.irq_id = entry[i].vector;
irq[i].type = SSS_SERVICE_TYPE_MAX;
irq[i].busy = SSS_CFG_FREE;
}
mutex_init(&mgmt_info->irq_info.irq_mutex);
sdk_info(hwdev->dev_hdl, "Success to request %u msix vector.\n", irq_num);
kfree(entry);
return 0;
}
void sss_deinit_irq_info(struct sss_hwdev *hwdev)
{
struct sss_service_cap *svc_cap = &hwdev->mgmt_info->svc_cap;
struct sss_irq_info *irq_info = &hwdev->mgmt_info->irq_info;
if (irq_info->free_num != irq_info->total_num)
sdk_err(hwdev->dev_hdl, "Fail to reclaim all irq and eq, please check\n");
if (svc_cap->intr_type == SSS_INTR_TYPE_MSIX)
pci_disable_msix(hwdev->pcidev_hdl);
else if (svc_cap->intr_type == SSS_INTR_TYPE_MSI)
pci_disable_msi(hwdev->pcidev_hdl);
sss_free_irq_info(hwdev);
}

View File

@ -0,0 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HWIF_IRQ_H
#define SSS_HWIF_IRQ_H
#include "sss_hwdev.h"
int sss_init_irq_info(struct sss_hwdev *dev);
void sss_deinit_irq_info(struct sss_hwdev *dev);
#endif

View File

@ -0,0 +1,656 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/semaphore.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include "sss_kernel.h"
#include "sss_hw_common.h"
#include "sss_hwdev.h"
#include "sss_hwif_api.h"
#include "sss_hwif_eq.h"
#include "sss_hwif_mbx.h"
#include "sss_hwif_aeq.h"
#include "sss_csr.h"
#include "sss_common.h"
#define SSS_MBX_INT_DST_AEQN_SHIFT 10
#define SSS_MBX_INT_SRC_RESP_AEQN_SHIFT 12
#define SSS_MBX_INT_STAT_DMA_SHIFT 14
/* The size of data to be send (unit of 4 bytes) */
#define SSS_MBX_INT_TX_SIZE_SHIFT 20
/* SO_RO(strong order, relax order) */
#define SSS_MBX_INT_STAT_DMA_SO_RO_SHIFT 25
#define SSS_MBX_INT_WB_EN_SHIFT 28
#define SSS_MBX_INT_DST_AEQN_MASK 0x3
#define SSS_MBX_INT_SRC_RESP_AEQN_MASK 0x3
#define SSS_MBX_INT_STAT_DMA_MASK 0x3F
#define SSS_MBX_INT_TX_SIZE_MASK 0x1F
#define SSS_MBX_INT_STAT_DMA_SO_RO_MASK 0x3
#define SSS_MBX_INT_WB_EN_MASK 0x1
#define SSS_SET_MBX_INT(val, field) \
(((val) & SSS_MBX_INT_##field##_MASK) << \
SSS_MBX_INT_##field##_SHIFT)
enum sss_mbx_tx_status {
SSS_MBX_TX_NOT_COMPLETE = 1,
};
#define SSS_MBX_CTRL_TRIGGER_AEQE_SHIFT 0
#define SSS_MBX_CTRL_TX_STATUS_SHIFT 1
#define SSS_MBX_CTRL_DST_FUNC_SHIFT 16
#define SSS_MBX_CTRL_TRIGGER_AEQE_MASK 0x1
#define SSS_MBX_CTRL_TX_STATUS_MASK 0x1
#define SSS_MBX_CTRL_DST_FUNC_MASK 0x1FFF
#define SSS_SET_MBX_CTRL(val, field) \
(((val) & SSS_MBX_CTRL_##field##_MASK) << \
SSS_MBX_CTRL_##field##_SHIFT)
#define SSS_MBX_SEGLEN_MASK \
SSS_SET_MSG_HEADER(SSS_MSG_HEADER_SEG_LEN_MASK, SEG_LEN)
#define SSS_MBX_MSG_POLL_TIMEOUT_MS 8000
#define SSS_MBX_COMPLETE_WAIT_TIME_MS 40000U
#define SSS_SEQ_ID_START_VAL 0
/* mbx write back status is 16B, only first 4B is used */
#define SSS_MBX_WB_STATUS_ERRCODE_MASK 0xFFFF
#define SSS_MBX_WB_STATUS_MASK 0xFF
#define SSS_MBX_WB_ERRCODE_MASK 0xFF00
#define SSS_MBX_WB_STATUS_FINISHED_SUCCESS 0xFF
#define SSS_MBX_WB_STATUS_NOT_FINISHED 0x00
#define SSS_MBX_STATUS_FINISHED(wb) \
(((wb) & SSS_MBX_WB_STATUS_MASK) != SSS_MBX_WB_STATUS_NOT_FINISHED)
#define SSS_MBX_STATUS_SUCCESS(wb) \
(((wb) & SSS_MBX_WB_STATUS_MASK) == SSS_MBX_WB_STATUS_FINISHED_SUCCESS)
#define SSS_MBX_STATUS_ERRCODE(wb) \
((wb) & SSS_MBX_WB_ERRCODE_MASK)
#define SSS_NO_DMA_ATTR 0
#define SSS_MBX_MSG_ID_MASK 0xF
#define SSS_MBX_MSG_ID(mbx) ((mbx)->send_msg_id)
#define SSS_INCREASE_MBX_MSG_ID(mbx) \
((mbx)->send_msg_id = ((mbx)->send_msg_id + 1) & SSS_MBX_MSG_ID_MASK)
#define SSS_MBX_MSG_CHN_STOP(mbx) \
((((mbx)->lock_channel_en) && \
test_bit((mbx)->cur_msg_channel, &(mbx)->channel_stop)) ? true : false)
#define SSS_MBX_DMA_MSG_INIT_XOR_VAL 0x5a5a5a5a
#define SSS_MBX_XOR_DATA_ALIGN 4
#define SSS_MQ_ID_MASK(mq, id) ((id) & ((mq)->depth - 1))
#define SSS_IS_MSG_QUEUE_FULL(mq) \
(SSS_MQ_ID_MASK(mq, (mq)->pi + 1) == SSS_MQ_ID_MASK(mq, (mq)->ci))
#define SSS_MBX_TRY_LOCK_SLEPP_US 1000
#define SSS_FILL_MSG_HEADER(hwdev, msg_info, msg_len, mod, ack_type, type, direction, cmd) \
(SSS_SET_MSG_HEADER((msg_len), MSG_LEN) | \
SSS_SET_MSG_HEADER((mod), MODULE) | \
SSS_SET_MSG_HEADER(SSS_MBX_SEG_SIZE, SEG_LEN) | \
SSS_SET_MSG_HEADER((ack_type), NO_ACK) | \
SSS_SET_MSG_HEADER((type), DATA_TYPE) | \
SSS_SET_MSG_HEADER(SSS_SEQ_ID_START_VAL, SEQID) | \
SSS_SET_MSG_HEADER(SSS_NOT_LAST_SEG, LAST) | \
SSS_SET_MSG_HEADER((direction), DIRECTION) | \
SSS_SET_MSG_HEADER((cmd), CMD) | \
SSS_SET_MSG_HEADER((msg_info)->msg_id, MSG_ID) | \
SSS_SET_MSG_HEADER((((hwdev)->poll || \
(hwdev)->hwif->attr.aeq_num >= SSS_MGMT_RSP_MSG_AEQ) ? \
SSS_MBX_RSP_MSG_AEQ : SSS_ASYNC_MSG_AEQ), AEQ_ID) | \
SSS_SET_MSG_HEADER(SSS_MSG_SRC_MBX, SOURCE) | \
SSS_SET_MSG_HEADER(!!(msg_info)->state, STATUS) | \
SSS_SET_MSG_HEADER(sss_get_global_func_id(hwdev), SRC_GLB_FUNC_ID))
#define SSS_MBX_SEG_LEN_ALIGN 4
enum sss_msg_aeq_type {
SSS_ASYNC_MSG_AEQ = 0,
/* indicate dest func or mgmt cpu which aeq to response mbx message */
SSS_MBX_RSP_MSG_AEQ = 1,
/* indicate mgmt cpu which aeq to response adm message */
SSS_MGMT_RSP_MSG_AEQ = 2,
};
enum sss_mbx_order_type {
SSS_MBX_STRONG_ORDER,
};
enum sss_mbx_wb_type {
SSS_MBX_WB = 1,
};
enum sss_mbx_aeq_trig_type {
SSS_MBX_NOT_TRIG,
};
struct sss_mbx_dma_msg {
u32 xor;
u32 dma_addr_h;
u32 dma_addr_l;
u32 msg_len;
u64 rsvd;
};
static struct sss_msg_buffer *sss_get_msg_buffer_from_mgmt(struct sss_mbx *mbx)
{
return &mbx->mgmt_msg;
}
static struct sss_msg_buffer *sss_get_msg_buffer_from_pf(struct sss_mbx *mbx, u64 src_func_id)
{
struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx);
if (src_func_id != sss_get_pf_id_of_vf(hwdev) || !mbx->func_msg)
return NULL;
return mbx->func_msg;
}
static struct sss_msg_buffer *sss_get_msg_buffer_from_vf(struct sss_mbx *mbx, u64 src_func_id)
{
u16 func_id;
struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx);
func_id = (u16)(src_func_id - 1U) - sss_get_glb_pf_vf_offset(hwdev);
if (func_id >= mbx->num_func_msg)
return NULL;
return &mbx->func_msg[func_id];
}
static struct sss_msg_buffer *sss_get_msg_buffer_from_ppf(struct sss_mbx *mbx, u64 src_func_id)
{
u16 func_id;
struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx);
if (!mbx->support_h2h_msg)
return NULL;
for (func_id = 0; func_id < SSS_MAX_HOST_NUM(hwdev); func_id++) {
if (src_func_id == sss_chip_get_host_ppf_id(hwdev, (u8)func_id))
break;
}
if (func_id == SSS_MAX_HOST_NUM(hwdev) || !mbx->host_msg)
return NULL;
return &mbx->host_msg[func_id];
}
struct sss_msg_desc *sss_get_mbx_msg_desc(struct sss_mbx *mbx, u64 src_func_id, u64 direction)
{
struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx);
struct sss_msg_buffer *msg_buffer = NULL;
if (src_func_id == SSS_MGMT_SRC_ID)
msg_buffer = sss_get_msg_buffer_from_mgmt(mbx);
else if (SSS_IS_VF(hwdev))
msg_buffer = sss_get_msg_buffer_from_pf(mbx, src_func_id);
else if (src_func_id > sss_get_glb_pf_vf_offset(hwdev))
msg_buffer = sss_get_msg_buffer_from_vf(mbx, src_func_id);
else
msg_buffer = sss_get_msg_buffer_from_ppf(mbx, src_func_id);
return (direction == SSS_DIRECT_SEND_MSG) ?
&msg_buffer->recv_msg : &msg_buffer->resp_msg;
}
static u32 sss_mbx_dma_data_xor(u32 *data, u16 data_len)
{
u16 i;
u16 cnt = data_len / sizeof(u32);
u32 val = SSS_MBX_DMA_MSG_INIT_XOR_VAL;
for (i = 0; i < cnt; i++)
val ^= data[i];
return val;
}
static void sss_mbx_fill_dma_msg_buf(struct sss_mbx_dma_queue *queue,
struct sss_mbx_dma_msg *dma_msg,
void *data, u16 data_len)
{
u64 pi;
u64 dma_paddr;
void *dma_vaddr;
pi = queue->pi * SSS_MBX_BUF_SIZE_MAX;
dma_vaddr = (u8 *)queue->dma_buff_vaddr + pi;
dma_paddr = queue->dma_buff_paddr + pi;
memcpy(dma_vaddr, data, data_len);
dma_msg->dma_addr_h = upper_32_bits(dma_paddr);
dma_msg->dma_addr_l = lower_32_bits(dma_paddr);
dma_msg->msg_len = data_len;
dma_msg->xor = sss_mbx_dma_data_xor(dma_vaddr,
ALIGN(data_len, SSS_MBX_XOR_DATA_ALIGN));
}
static struct sss_mbx_dma_queue *
sss_get_mbx_dma_queue(struct sss_mbx *mbx,
enum sss_msg_ack_type ack_type)
{
u32 val;
struct sss_mbx_dma_queue *queue = NULL;
val = sss_chip_read_reg(SSS_TO_HWDEV(mbx)->hwif, SSS_MBX_MQ_CI_OFF);
if (ack_type == SSS_MSG_ACK) {
queue = &mbx->sync_msg_queue;
queue->ci = SSS_GET_MBX_MQ_CI(val, SYNC);
} else {
queue = &mbx->async_msg_queue;
queue->ci = SSS_GET_MBX_MQ_CI(val, ASYNC);
}
if (SSS_IS_MSG_QUEUE_FULL(queue)) {
sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, "Mbx sync mq is busy, pi: %u, ci: %u\n",
queue->pi, SSS_MQ_ID_MASK(queue, queue->ci));
return NULL;
}
return queue;
}
static void sss_fill_mbx_msg_body(struct sss_mbx_dma_queue *queue,
struct sss_mbx_dma_msg *dma_msg, void *msg_body, u16 body_len)
{
sss_mbx_fill_dma_msg_buf(queue, dma_msg, msg_body, body_len);
queue->pi = SSS_MQ_ID_MASK(queue, queue->pi + 1);
}
static void sss_clear_mbx_status(struct sss_mbx_send *send_mbx)
{
*send_mbx->wb_state = 0;
/* clear mbx wb state */
wmb();
}
static void sss_chip_send_mbx_msg_header(struct sss_hwdev *hwdev,
struct sss_mbx_send *send_mbx, u64 *msg_header)
{
u32 i;
u32 *header = (u32 *)msg_header;
u32 cnt = SSS_MBX_HEADER_SIZE / sizeof(u32);
for (i = 0; i < cnt; i++)
__raw_writel(cpu_to_be32(*(header + i)), send_mbx->data + i * sizeof(u32));
}
static void sss_chip_send_mbx_msg_body(struct sss_hwdev *hwdev,
struct sss_mbx_send *send_mbx, void *body, u16 body_len)
{
u32 *msg_data = body;
u32 size = sizeof(u32);
u32 i;
u8 buf[SSS_MBX_SEG_SIZE] = {0};
u32 cnt = ALIGN(body_len, size) / size;
if (body_len % size != 0) {
memcpy(buf, body, body_len);
msg_data = (u32 *)buf;
}
for (i = 0; i < cnt; i++) {
__raw_writel(cpu_to_be32(*(msg_data + i)),
send_mbx->data + SSS_MBX_HEADER_SIZE + i * size);
}
}
static void sss_chip_write_mbx_msg_attr(struct sss_mbx *mbx,
u16 dest, u16 aeq_num, u16 seg_len)
{
u16 size;
u16 dest_func_id;
u32 intr;
u32 ctrl;
size = ALIGN(seg_len + SSS_MBX_HEADER_SIZE, SSS_MBX_SEG_LEN_ALIGN) >> 2;
intr = SSS_SET_MBX_INT(aeq_num, DST_AEQN) |
SSS_SET_MBX_INT(0, SRC_RESP_AEQN) |
SSS_SET_MBX_INT(SSS_NO_DMA_ATTR, STAT_DMA) |
SSS_SET_MBX_INT(size, TX_SIZE) |
SSS_SET_MBX_INT(SSS_MBX_STRONG_ORDER, STAT_DMA_SO_RO) |
SSS_SET_MBX_INT(SSS_MBX_WB, WB_EN);
sss_chip_write_reg(SSS_TO_HWDEV(mbx)->hwif,
SSS_HW_CSR_MBX_INT_OFFSET_OFF, intr);
/* make sure write mbx intr attr reg */
wmb();
dest_func_id = (SSS_IS_VF(SSS_TO_HWDEV(mbx)) && dest != SSS_MGMT_SRC_ID) ? 0 : dest;
ctrl = SSS_SET_MBX_CTRL(SSS_MBX_TX_NOT_COMPLETE, TX_STATUS) |
SSS_SET_MBX_CTRL(SSS_MBX_NOT_TRIG, TRIGGER_AEQE) |
SSS_SET_MBX_CTRL(dest_func_id, DST_FUNC);
sss_chip_write_reg(SSS_TO_HWDEV(mbx)->hwif,
SSS_HW_CSR_MBX_CTRL_OFF, ctrl);
/* make sure write mbx ctrl reg */
wmb();
}
static void sss_dump_mbx_reg(struct sss_hwdev *hwdev)
{
u32 val1;
u32 val2;
val1 = sss_chip_read_reg(hwdev->hwif, SSS_HW_CSR_MBX_CTRL_OFF);
val2 = sss_chip_read_reg(hwdev->hwif, SSS_HW_CSR_MBX_INT_OFFSET_OFF);
sdk_err(hwdev->dev_hdl, "Mbx ctrl reg:0x%x, intr offset:0x%x\n", val1, val2);
}
static u16 sss_get_mbx_status(const struct sss_mbx_send *send_mbx)
{
u64 val = be64_to_cpu(*send_mbx->wb_state);
/* read wb state before returning it */
rmb();
return (u16)(val & SSS_MBX_WB_STATUS_ERRCODE_MASK);
}
static enum sss_process_ret sss_check_mbx_wb_status(void *priv_data)
{
u16 status;
struct sss_mbx *mbx = priv_data;
if (SSS_MBX_MSG_CHN_STOP(mbx) || !SSS_TO_HWDEV(mbx)->chip_present_flag)
return SSS_PROCESS_ERR;
status = sss_get_mbx_status(&mbx->mbx_send);
return SSS_MBX_STATUS_FINISHED(status) ? SSS_PROCESS_OK : SSS_PROCESS_DOING;
}
static int sss_chip_send_mbx_fragment(struct sss_mbx *mbx, u16 dest_func_id,
u64 msg_header, void *msg_body, u16 body_len)
{
u16 aeq_type;
u16 status = 0;
u16 err_code;
u16 direction;
int ret;
struct sss_mbx_send *send_mbx = &mbx->mbx_send;
struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx);
direction = SSS_GET_MSG_HEADER(msg_header, DIRECTION);
aeq_type = (SSS_GET_HWIF_AEQ_NUM(hwdev->hwif) > SSS_MBX_RSP_MSG_AEQ &&
direction != SSS_DIRECT_SEND_MSG) ? SSS_MBX_RSP_MSG_AEQ : SSS_ASYNC_MSG_AEQ;
sss_clear_mbx_status(send_mbx);
sss_chip_send_mbx_msg_header(hwdev, send_mbx, &msg_header);
sss_chip_send_mbx_msg_body(hwdev, send_mbx, msg_body, body_len);
sss_chip_write_mbx_msg_attr(mbx, dest_func_id, aeq_type, body_len);
ret = sss_check_handler_timeout(mbx, sss_check_mbx_wb_status,
SSS_MBX_MSG_POLL_TIMEOUT_MS, USEC_PER_MSEC);
status = sss_get_mbx_status(send_mbx);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Send mbx seg timeout, wb status: 0x%x\n", status);
sss_dump_mbx_reg(hwdev);
return -ETIMEDOUT;
}
if (!SSS_MBX_STATUS_SUCCESS(status)) {
sdk_err(hwdev->dev_hdl, "Fail to send mbx seg to func %u, wb status: 0x%x\n",
dest_func_id, status);
err_code = SSS_MBX_STATUS_ERRCODE(status);
return (err_code != 0) ? err_code : -EFAULT;
}
return 0;
}
static int sss_send_mbx_to_chip(struct sss_mbx *mbx, u16 dest_func_id,
u64 msg_header, u8 *msg_body, u16 body_len)
{
int ret;
u16 seg_len = SSS_MBX_SEG_SIZE;
u32 seq_id = 0;
struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx);
while (body_len > 0) {
if (body_len <= SSS_MBX_SEG_SIZE) {
msg_header &= ~SSS_MBX_SEGLEN_MASK;
msg_header |= SSS_SET_MSG_HEADER(body_len, SEG_LEN);
msg_header |= SSS_SET_MSG_HEADER(SSS_LAST_SEG, LAST);
seg_len = body_len;
}
ret = sss_chip_send_mbx_fragment(mbx, dest_func_id, msg_header, msg_body, seg_len);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to send mbx seg, seq_id=0x%llx\n",
SSS_GET_MSG_HEADER(msg_header, SEQID));
return ret;
}
seq_id++;
msg_body += seg_len;
body_len -= seg_len;
msg_header &= ~(SSS_SET_MSG_HEADER(SSS_MSG_HEADER_SEQID_MASK, SEQID));
msg_header |= SSS_SET_MSG_HEADER(seq_id, SEQID);
}
return 0;
}
int sss_send_mbx_msg(struct sss_mbx *mbx, u8 mod, u16 cmd, void *msg,
u16 msg_len, u16 dest_func_id, enum sss_msg_direction_type direction,
enum sss_msg_ack_type ack_type, struct sss_mbx_msg_info *msg_info)
{
u8 *msg_body = NULL;
u64 msg_header = 0;
int ret = 0;
struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx);
struct sss_mbx_dma_msg msg_dma = {0};
enum sss_data_type type = SSS_INLINE_DATA;
struct sss_mbx_dma_queue *queue = NULL;
mutex_lock(&mbx->msg_send_lock);
if (SSS_IS_DMA_MBX_MSG(dest_func_id) && !SSS_SUPPORT_MBX_SEGMENT(hwdev)) {
queue = sss_get_mbx_dma_queue(mbx, ack_type);
if (!queue) {
ret = -EBUSY;
goto out;
}
sss_fill_mbx_msg_body(queue, &msg_dma, msg, msg_len);
type = SSS_DMA_DATA;
msg = &msg_dma;
msg_len = sizeof(msg_dma);
}
msg_body = (u8 *)msg;
msg_header = SSS_FILL_MSG_HEADER(hwdev, msg_info, msg_len, mod,
ack_type, type, direction, cmd);
ret = sss_send_mbx_to_chip(mbx, dest_func_id, msg_header, msg_body, msg_len);
out:
mutex_unlock(&mbx->msg_send_lock);
return ret;
}
static void sss_set_mbx_event_flag(struct sss_mbx *mbx,
enum sss_mbx_event_state event_flag)
{
spin_lock(&mbx->mbx_lock);
mbx->event_flag = event_flag;
spin_unlock(&mbx->mbx_lock);
}
static enum sss_process_ret check_mbx_msg_finish(void *priv_data)
{
struct sss_mbx *mbx = priv_data;
if (SSS_MBX_MSG_CHN_STOP(mbx) || SSS_TO_HWDEV(mbx)->chip_present_flag == 0)
return SSS_PROCESS_ERR;
return (mbx->event_flag == SSS_EVENT_SUCCESS) ? SSS_PROCESS_OK : SSS_PROCESS_DOING;
}
static int sss_wait_mbx_msg_completion(struct sss_mbx *mbx, u32 timeout)
{
u32 wait_time;
int ret;
wait_time = (timeout != 0) ? timeout : SSS_MBX_COMPLETE_WAIT_TIME_MS;
ret = sss_check_handler_timeout(mbx, check_mbx_msg_finish,
wait_time, USEC_PER_MSEC);
if (ret != 0) {
sss_set_mbx_event_flag(mbx, SSS_EVENT_TIMEOUT);
return -ETIMEDOUT;
}
sss_set_mbx_event_flag(mbx, SSS_EVENT_END);
return 0;
}
static int sss_send_mbx_msg_lock(struct sss_mbx *mbx, u16 channel)
{
if (!mbx->lock_channel_en) {
mutex_lock(&mbx->mbx_send_lock);
return 0;
}
while (test_bit(channel, &mbx->channel_stop) == 0) {
if (mutex_trylock(&mbx->mbx_send_lock) != 0)
return 0;
usleep_range(SSS_MBX_TRY_LOCK_SLEPP_US - 1, SSS_MBX_TRY_LOCK_SLEPP_US);
}
return -EAGAIN;
}
static void sss_send_mbx_msg_unlock(struct sss_mbx *mbx)
{
mutex_unlock(&mbx->mbx_send_lock);
}
int sss_send_mbx_to_func(struct sss_mbx *mbx, u8 mod, u16 cmd,
u16 dest_func_id, void *buf_in, u16 in_size, void *buf_out,
u16 *out_size, u32 timeout, u16 channel)
{
struct sss_msg_desc *msg_desc = NULL;
struct sss_mbx_msg_info msg_info = {0};
int ret;
if (SSS_TO_HWDEV(mbx)->chip_present_flag == 0)
return -EPERM;
msg_desc = sss_get_mbx_msg_desc(mbx, dest_func_id, SSS_RESP_MSG);
if (!msg_desc)
return -EFAULT;
ret = sss_send_mbx_msg_lock(mbx, channel);
if (ret != 0)
return ret;
mbx->cur_msg_channel = channel;
SSS_INCREASE_MBX_MSG_ID(mbx);
sss_set_mbx_event_flag(mbx, SSS_EVENT_START);
msg_info.msg_id = SSS_MBX_MSG_ID(mbx);
ret = sss_send_mbx_msg(mbx, mod, cmd, buf_in, in_size, dest_func_id,
SSS_DIRECT_SEND_MSG, SSS_MSG_ACK, &msg_info);
if (ret != 0) {
sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl,
"Fail to send mbx mod %u, cmd %u, msg_id: %u, err: %d\n",
mod, cmd, msg_info.msg_id, ret);
sss_set_mbx_event_flag(mbx, SSS_EVENT_FAIL);
goto send_err;
}
if (sss_wait_mbx_msg_completion(mbx, timeout)) {
sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl,
"Send mbx msg timeout, msg_id: %u\n", msg_info.msg_id);
sss_dump_aeq_info(SSS_TO_HWDEV(mbx));
ret = -ETIMEDOUT;
goto send_err;
}
if (mod != msg_desc->mod || cmd != msg_desc->cmd) {
sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl,
"Invalid response mbx message, mod: 0x%x, cmd: 0x%x, expect mod: 0x%x, cmd: 0x%x\n",
msg_desc->mod, msg_desc->cmd, mod, cmd);
ret = -EFAULT;
goto send_err;
}
if (msg_desc->msg_info.state) {
ret = msg_desc->msg_info.state;
goto send_err;
}
if (buf_out && out_size) {
if (*out_size < msg_desc->msg_len) {
sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl,
"Invalid response mbx message length: %u for mod %d cmd %u, should less than: %u\n",
msg_desc->msg_len, mod, cmd, *out_size);
ret = -EFAULT;
goto send_err;
}
if (msg_desc->msg_len)
memcpy(buf_out, msg_desc->msg, msg_desc->msg_len);
*out_size = msg_desc->msg_len;
}
send_err:
sss_send_mbx_msg_unlock(mbx);
return ret;
}
int sss_send_mbx_to_func_no_ack(struct sss_hwdev *hwdev, u16 func_id,
u8 mod, u16 cmd, void *buf_in, u16 in_size, u16 channel)
{
struct sss_mbx_msg_info msg_info = {0};
int ret;
ret = sss_check_mbx_param(hwdev->mbx, buf_in, in_size, channel);
if (ret != 0)
return ret;
ret = sss_send_mbx_msg_lock(hwdev->mbx, channel);
if (ret != 0)
return ret;
ret = sss_send_mbx_msg(hwdev->mbx, mod, cmd, buf_in, in_size,
func_id, SSS_DIRECT_SEND_MSG, SSS_MSG_NO_ACK, &msg_info);
if (ret != 0)
sdk_err(hwdev->dev_hdl, "Fail to send mbx no ack\n");
sss_send_mbx_msg_unlock(hwdev->mbx);
return ret;
}

View File

@ -0,0 +1,94 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HWIF_MBX_H
#define SSS_HWIF_MBX_H
#include "sss_hw.h"
#include "sss_hwdev.h"
#define SSS_MGMT_SRC_ID 0x1FFF
#define SSS_IS_DMA_MBX_MSG(dest_func_id) ((dest_func_id) == SSS_MGMT_SRC_ID)
#define SSS_MBX_BUF_SIZE_MAX 2048U
#define SSS_MBX_HEADER_SIZE 8
/* MBX size is 64B, 8B for mbx_header, 8B reserved */
#define SSS_MBX_SEG_SIZE 48
#define SSS_MBX_DATA_SIZE (SSS_MBX_BUF_SIZE_MAX - SSS_MBX_HEADER_SIZE)
#define SSS_MBX_MQ_CI_OFF (SSS_CSR_CFG_FLAG + \
SSS_HW_CSR_MBX_DATA_OFF + SSS_MBX_HEADER_SIZE + SSS_MBX_SEG_SIZE)
#define SSS_MBX_MQ_SYNC_CI_SHIFT 0
#define SSS_MBX_MQ_ASYNC_CI_SHIFT 8
#define SSS_MBX_MQ_SYNC_CI_MASK 0xFF
#define SSS_MBX_MQ_ASYNC_CI_MASK 0xFF
#define SSS_GET_MBX_MQ_CI(val, field) \
(((val) >> SSS_MBX_MQ_##field##_CI_SHIFT) & SSS_MBX_MQ_##field##_CI_MASK)
#define SSS_CLEAR_MBX_MQ_CI(val, field) \
((val) & (~(SSS_MBX_MQ_##field##_CI_MASK << SSS_MBX_MQ_##field##_CI_SHIFT)))
/* Recv func mbx msg */
struct sss_recv_mbx {
void *buf;
u16 buf_len;
u8 msg_id;
u8 mod;
u16 cmd;
u16 src_func_id;
enum sss_msg_ack_type ack_type;
void *resp_buf;
};
enum sss_mbx_cb_state {
SSS_VF_RECV_HANDLER_REG = 0,
SSS_VF_RECV_HANDLER_RUN,
SSS_PF_RECV_HANDLER_REG,
SSS_PF_RECV_HANDLER_RUN,
SSS_PPF_RECV_HANDLER_REG,
SSS_PPF_RECV_HANDLER_RUN,
SSS_PPF_TO_PF_RECV_HANDLER_REG,
SSS_PPF_TO_PF_RECV_HANDLER_RUN,
};
static inline int sss_check_mbx_param(struct sss_mbx *mbx,
void *buf_in, u16 in_size, u16 channel)
{
if (!buf_in || in_size == 0)
return -EINVAL;
if (in_size > SSS_MBX_DATA_SIZE) {
sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl,
"Mbx msg len %u exceed limit: [1, %u]\n",
in_size, SSS_MBX_DATA_SIZE);
return -EINVAL;
}
if (channel >= SSS_CHANNEL_MAX) {
sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl,
"Invalid channel id: 0x%x\n", channel);
return -EINVAL;
}
return 0;
}
struct sss_msg_desc *sss_get_mbx_msg_desc(struct sss_mbx *mbx, u64 src_func_id, u64 direction);
int sss_send_mbx_msg(struct sss_mbx *mbx, u8 mod, u16 cmd,
void *msg, u16 msg_len, u16 dest, enum sss_msg_direction_type direction_type,
enum sss_msg_ack_type type, struct sss_mbx_msg_info *msg_info);
int sss_send_mbx_to_func(struct sss_mbx *mbx, u8 mod, u16 cmd,
u16 dest_func_id, void *buf_in, u16 in_size,
void *buf_out, u16 *out_size, u32 timeout, u16 channel);
int sss_send_mbx_to_func_no_ack(struct sss_hwdev *hwdev, u16 func_id,
u8 mod, u16 cmd, void *buf_in, u16 in_size, u16 channel);
#define sss_send_mbx_to_mgmt_no_ack(hwdev, mod, cmd, buf_in, in_size, channel) \
sss_send_mbx_to_func_no_ack(hwdev, SSS_MGMT_SRC_ID, mod, cmd, \
buf_in, in_size, channel)
#endif

View File

@ -0,0 +1,184 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <linux/types.h>
#include <linux/kernel.h>
#include "sss_kernel.h"
#include "sss_hw.h"
#include "sss_hwdev.h"
#include "sss_hwif_mbx.h"
#include "sss_hwif_export.h"
#define SSS_WAIT_CB_COMPLETE_MIN 900
#define SSS_WAIT_CB_COMPLETE_MAX 1000
int sss_register_pf_mbx_handler(void *hwdev, u8 mod, void *pri_handle, sss_pf_mbx_handler_t cb)
{
struct sss_mbx *mbx = NULL;
if (!hwdev || mod >= SSS_MOD_TYPE_MAX)
return -EFAULT;
mbx = ((struct sss_hwdev *)hwdev)->mbx;
mbx->pf_mbx_cb[mod] = cb;
mbx->pf_mbx_data[mod] = pri_handle;
set_bit(SSS_PF_RECV_HANDLER_REG, &mbx->pf_mbx_cb_state[mod]);
return 0;
}
EXPORT_SYMBOL(sss_register_pf_mbx_handler);
int sss_register_vf_mbx_handler(void *hwdev, u8 mod, void *pri_handle, sss_vf_mbx_handler_t cb)
{
struct sss_mbx *mbx = NULL;
if (!hwdev || mod >= SSS_MOD_TYPE_MAX)
return -EFAULT;
mbx = ((struct sss_hwdev *)hwdev)->mbx;
mbx->vf_mbx_cb[mod] = cb;
mbx->vf_mbx_data[mod] = pri_handle;
set_bit(SSS_VF_RECV_HANDLER_REG, &mbx->vf_mbx_cb_state[mod]);
return 0;
}
EXPORT_SYMBOL(sss_register_vf_mbx_handler);
void sss_unregister_pf_mbx_handler(void *hwdev, u8 mod)
{
struct sss_mbx *mbx = NULL;
if (!hwdev || mod >= SSS_MOD_TYPE_MAX)
return;
mbx = ((struct sss_hwdev *)hwdev)->mbx;
clear_bit(SSS_PF_RECV_HANDLER_REG, &mbx->pf_mbx_cb_state[mod]);
while (test_bit(SSS_PF_RECV_HANDLER_RUN, &mbx->pf_mbx_cb_state[mod]) != 0)
usleep_range(SSS_WAIT_CB_COMPLETE_MIN, SSS_WAIT_CB_COMPLETE_MAX);
mbx->pf_mbx_cb[mod] = NULL;
mbx->pf_mbx_data[mod] = NULL;
}
EXPORT_SYMBOL(sss_unregister_pf_mbx_handler);
void sss_unregister_vf_mbx_handler(void *hwdev, u8 mod)
{
struct sss_mbx *mbx = NULL;
if (!hwdev || mod >= SSS_MOD_TYPE_MAX)
return;
mbx = ((struct sss_hwdev *)hwdev)->mbx;
clear_bit(SSS_VF_RECV_HANDLER_REG, &mbx->vf_mbx_cb_state[mod]);
while (test_bit(SSS_VF_RECV_HANDLER_RUN, &mbx->vf_mbx_cb_state[mod]) != 0)
usleep_range(SSS_WAIT_CB_COMPLETE_MIN, SSS_WAIT_CB_COMPLETE_MAX);
mbx->vf_mbx_cb[mod] = NULL;
mbx->vf_mbx_data[mod] = NULL;
}
EXPORT_SYMBOL(sss_unregister_vf_mbx_handler);
int sss_mbx_send_to_pf(void *hwdev, u8 mod, u16 cmd, void *buf_in,
u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel)
{
struct sss_hwdev *dev = hwdev;
int ret;
if (!hwdev)
return -EINVAL;
if (!(dev->chip_present_flag))
return -EPERM;
ret = sss_check_mbx_param(dev->mbx, buf_in, in_size, channel);
if (ret != 0)
return ret;
if (!SSS_IS_VF(dev)) {
sdk_err(dev->dev_hdl, "Invalid func_type: %d\n",
SSS_GET_FUNC_TYPE(dev));
return -EINVAL;
}
return sss_send_mbx_to_func(dev->mbx, mod, cmd,
sss_get_pf_id_of_vf(dev), buf_in, in_size,
buf_out, out_size, timeout, channel);
}
EXPORT_SYMBOL(sss_mbx_send_to_pf);
int sss_mbx_send_to_vf(void *hwdev, u16 vf_id, u8 mod, u16 cmd, void *buf_in,
u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel)
{
struct sss_hwdev *dev = hwdev;
int ret = 0;
u16 dst_func_id;
if (!hwdev)
return -EINVAL;
ret = sss_check_mbx_param(dev->mbx, buf_in, in_size, channel);
if (ret != 0)
return ret;
if (SSS_IS_VF(dev)) {
sdk_err(dev->dev_hdl, "Invalid func_type: %d\n",
SSS_GET_FUNC_TYPE((struct sss_hwdev *)hwdev));
return -EINVAL;
}
if (vf_id == 0) {
sdk_err(dev->dev_hdl, "Invalid vf_id: %u\n", vf_id);
return -EINVAL;
}
/* vf_offset_to_pf + vf_id is the vf's global function id of vf in
* this pf
*/
dst_func_id = sss_get_glb_pf_vf_offset(hwdev) + vf_id;
return sss_send_mbx_to_func(dev->mbx, mod, cmd,
dst_func_id, buf_in, in_size,
buf_out, out_size, timeout, channel);
}
EXPORT_SYMBOL(sss_mbx_send_to_vf);
static int sss_send_mbx_to_mgmt(struct sss_hwdev *hwdev, u8 mod, u16 cmd,
void *buf_in, u16 in_size, void *buf_out, u16 *out_size,
u32 timeout, u16 channel)
{
struct sss_mbx *func_to_func = hwdev->mbx;
int ret;
ret = sss_check_mbx_param(func_to_func, buf_in, in_size, channel);
if (ret != 0)
return ret;
if (mod == SSS_MOD_TYPE_COMM && cmd == SSS_COMM_MGMT_CMD_SEND_API_ACK_BY_UP)
return 0;
return sss_send_mbx_to_func(func_to_func, mod, cmd, SSS_MGMT_SRC_ID,
buf_in, in_size, buf_out, out_size, timeout, channel);
}
int sss_sync_mbx_send_msg(void *hwdev, u8 mod, u16 cmd, void *buf_in,
u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel)
{
if (!hwdev)
return -EINVAL;
if (sss_get_dev_present_flag(hwdev) == 0)
return -EPERM;
return sss_send_mbx_to_mgmt(hwdev, mod, cmd, buf_in, in_size,
buf_out, out_size, timeout, channel);
}
EXPORT_SYMBOL(sss_sync_mbx_send_msg);

View File

@ -0,0 +1,888 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/semaphore.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include "sss_kernel.h"
#include "sss_hw_common.h"
#include "sss_hwdev.h"
#include "sss_hwif_api.h"
#include "sss_hwif_eq.h"
#include "sss_hwif_mbx.h"
#include "sss_csr.h"
#include "sss_common.h"
#include "sss_adapter_mgmt.h"
#define SSS_MBX_WB_STATUS_SIZE 16UL
#define SSS_MBX_DMA_MSG_QUEUE_DEPTH 32
#define SSS_MBX_WQ_NAME "sss_mbx"
#define SSS_MBX_AREA(hwif) \
((hwif)->cfg_reg_base + SSS_HW_CSR_MBX_DATA_OFF)
#define SSS_GET_MBX_BODY(header) ((u8 *)(header) + SSS_MBX_HEADER_SIZE)
#define SSS_MBX_LAST_SEG_MAX_SIZE \
(SSS_MBX_BUF_SIZE_MAX - SSS_MAX_SEG_ID * SSS_MBX_SEG_SIZE)
#define SSS_MSG_PROCESS_CNT_MAX 10
#define SSS_SRC_IS_PF_OR_PPF(hwdev, src_func_id) \
((src_func_id) < SSS_MAX_PF_NUM(hwdev))
#define SSS_MBX_MSG_NO_DATA_SIZE 1
#define SSS_MBX_PF_SEND_ERR 0x1
#define SSS_MAX_SEG_ID 42
struct sss_mbx_work {
struct work_struct work;
struct sss_mbx *mbx;
struct sss_recv_mbx *recv_mbx;
struct sss_msg_buffer *msg_buffer;
};
static int sss_alloc_mbx_mq_dma_buf(struct sss_hwdev *hwdev, struct sss_mbx_dma_queue *mq)
{
u32 size;
size = mq->depth * SSS_MBX_BUF_SIZE_MAX;
mq->dma_buff_vaddr = dma_zalloc_coherent(hwdev->dev_hdl, size, &mq->dma_buff_paddr,
GFP_KERNEL);
if (!mq->dma_buff_vaddr) {
sdk_err(hwdev->dev_hdl, "Fail to alloc dma_buffer\n");
return -ENOMEM;
}
return 0;
}
static void sss_free_mbx_mq_dma_buf(struct sss_hwdev *hwdev, struct sss_mbx_dma_queue *mq)
{
dma_free_coherent(hwdev->dev_hdl, mq->depth * SSS_MBX_BUF_SIZE_MAX,
mq->dma_buff_vaddr, mq->dma_buff_paddr);
mq->dma_buff_vaddr = NULL;
mq->dma_buff_paddr = 0;
}
static int sss_mbx_alloc_mq_dma_addr(struct sss_mbx *mbx)
{
int ret;
ret = sss_alloc_mbx_mq_dma_buf(SSS_TO_HWDEV(mbx), &mbx->sync_msg_queue);
if (ret != 0)
return ret;
ret = sss_alloc_mbx_mq_dma_buf(SSS_TO_HWDEV(mbx), &mbx->async_msg_queue);
if (ret != 0) {
sss_free_mbx_mq_dma_buf(SSS_TO_HWDEV(mbx), &mbx->sync_msg_queue);
return ret;
}
return 0;
}
static void sss_mbx_free_mq_dma_addr(struct sss_mbx *mbx)
{
sss_free_mbx_mq_dma_buf(SSS_TO_HWDEV(mbx), &mbx->sync_msg_queue);
sss_free_mbx_mq_dma_buf(SSS_TO_HWDEV(mbx), &mbx->async_msg_queue);
}
static int sss_mbx_alloc_mq_wb_addr(struct sss_mbx *mbx)
{
struct sss_mbx_send *send_mbx = &mbx->mbx_send;
struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx);
send_mbx->wb_vaddr = dma_zalloc_coherent(hwdev->dev_hdl, SSS_MBX_WB_STATUS_SIZE,
&send_mbx->wb_paddr, GFP_KERNEL);
if (!send_mbx->wb_vaddr)
return -ENOMEM;
send_mbx->wb_state = send_mbx->wb_vaddr;
return 0;
}
static void sss_mbx_free_mq_wb_addr(struct sss_mbx *mbx)
{
struct sss_mbx_send *send_mbx = &mbx->mbx_send;
struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx);
dma_free_coherent(hwdev->dev_hdl, SSS_MBX_WB_STATUS_SIZE,
send_mbx->wb_vaddr, send_mbx->wb_paddr);
send_mbx->wb_vaddr = NULL;
}
static int sss_alloc_mbx_msg_buffer(struct sss_msg_buffer *msg_buffer)
{
msg_buffer->resp_msg.msg = kzalloc(SSS_MBX_BUF_SIZE_MAX, GFP_KERNEL);
if (!msg_buffer->resp_msg.msg)
return -ENOMEM;
msg_buffer->recv_msg.msg = kzalloc(SSS_MBX_BUF_SIZE_MAX, GFP_KERNEL);
if (!msg_buffer->recv_msg.msg) {
kfree(msg_buffer->resp_msg.msg);
msg_buffer->resp_msg.msg = NULL;
return -ENOMEM;
}
atomic_set(&msg_buffer->recv_msg_cnt, 0);
msg_buffer->recv_msg.seq_id = SSS_MAX_SEG_ID;
msg_buffer->resp_msg.seq_id = SSS_MAX_SEG_ID;
return 0;
}
static void sss_free_mbx_msg_buffer(struct sss_msg_buffer *msg_buffer)
{
kfree(msg_buffer->recv_msg.msg);
msg_buffer->recv_msg.msg = NULL;
kfree(msg_buffer->resp_msg.msg);
msg_buffer->resp_msg.msg = NULL;
}
static int sss_mbx_alloc_dma_addr(struct sss_mbx *sss_mbx)
{
int ret;
ret = sss_mbx_alloc_mq_dma_addr(sss_mbx);
if (ret != 0) {
sdk_err(SSS_TO_HWDEV(sss_mbx)->dev_hdl, "Fail to alloc mbx dma queue\n");
return -ENOMEM;
}
ret = sss_mbx_alloc_mq_wb_addr(sss_mbx);
if (ret != 0) {
sdk_err(SSS_TO_HWDEV(sss_mbx)->dev_hdl, "Fail to init mbx dma wb addr\n");
goto alloc_dma_wb_addr_err;
}
return 0;
alloc_dma_wb_addr_err:
sss_mbx_free_mq_dma_addr(sss_mbx);
return -ENOMEM;
}
static void sss_mbx_free_dma_addr(struct sss_mbx *mbx)
{
sss_mbx_free_mq_wb_addr(mbx);
sss_mbx_free_mq_dma_addr(mbx);
}
static int sss_init_mbx_info(struct sss_mbx *mbx)
{
int ret;
mutex_init(&mbx->mbx_send_lock);
mutex_init(&mbx->msg_send_lock);
spin_lock_init(&mbx->mbx_lock);
mbx->sync_msg_queue.depth = SSS_MBX_DMA_MSG_QUEUE_DEPTH;
mbx->async_msg_queue.depth = SSS_MBX_DMA_MSG_QUEUE_DEPTH;
mbx->workq = create_singlethread_workqueue(SSS_MBX_WQ_NAME);
if (!mbx->workq) {
sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, "Fail to create mbx workq\n");
return -ENOMEM;
}
ret = sss_alloc_mbx_msg_buffer(&mbx->mgmt_msg);
if (ret != 0) {
sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, "Fail to alloc mgmt message buffer\n");
goto alloc_mbx_msg_buffer_err;
}
ret = sss_mbx_alloc_dma_addr(mbx);
if (ret != 0) {
sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, "Fail to alloc dma addr\n");
goto mbx_alloc_dma_addr_err;
}
return 0;
mbx_alloc_dma_addr_err:
sss_free_mbx_msg_buffer(&mbx->mgmt_msg);
alloc_mbx_msg_buffer_err:
destroy_workqueue(mbx->workq);
return -ENOMEM;
}
static void sss_deinit_mbx_info(struct sss_mbx *mbx)
{
if (mbx->workq) {
destroy_workqueue(mbx->workq);
mbx->workq = NULL;
}
sss_mbx_free_dma_addr(mbx);
sss_free_mbx_msg_buffer(&mbx->mgmt_msg);
}
static int sss_alloc_func_mbx_msg(struct sss_mbx *mbx, u16 func_num)
{
if (mbx->func_msg)
return (mbx->num_func_msg == func_num) ? 0 : -EFAULT;
mbx->func_msg = kcalloc(func_num, sizeof(*mbx->func_msg), GFP_KERNEL);
if (!mbx->func_msg)
return -ENOMEM;
return 0;
}
static void sss_free_func_mbx_msg(struct sss_mbx *mbx)
{
kfree(mbx->func_msg);
mbx->func_msg = NULL;
}
int sss_init_func_mbx_msg(void *hwdev, u16 func_num)
{
u16 i;
u16 cnt;
int ret;
struct sss_hwdev *dev = hwdev;
struct sss_mbx *mbx = dev->mbx;
if (!hwdev || func_num == 0 || func_num > SSS_MAX_FUNC)
return -EINVAL;
ret = sss_alloc_func_mbx_msg(mbx, func_num);
if (ret != 0) {
sdk_err(dev->dev_hdl, "Fail to alloc func msg\n");
return ret;
}
for (cnt = 0; cnt < func_num; cnt++) {
ret = sss_alloc_mbx_msg_buffer(&mbx->func_msg[cnt]);
if (ret != 0) {
sdk_err(dev->dev_hdl, "Fail to alloc func %hu msg buf\n", cnt);
goto alloc_mbx_msg_buf_err;
}
}
mbx->num_func_msg = func_num;
return 0;
alloc_mbx_msg_buf_err:
for (i = 0; i < cnt; i++)
sss_free_mbx_msg_buffer(&mbx->func_msg[i]);
sss_free_func_mbx_msg(mbx);
return -ENOMEM;
}
static void sss_deinit_func_mbx_msg(struct sss_mbx *mbx)
{
u16 i;
if (!mbx->func_msg)
return;
for (i = 0; i < mbx->num_func_msg; i++)
sss_free_mbx_msg_buffer(&mbx->func_msg[i]);
sss_free_func_mbx_msg(mbx);
}
static void sss_chip_reset_mbx_ci(struct sss_mbx *mbx)
{
u32 val;
val = sss_chip_read_reg(SSS_TO_HWDEV(mbx)->hwif, SSS_MBX_MQ_CI_OFF);
val = SSS_CLEAR_MBX_MQ_CI(val, SYNC);
val = SSS_CLEAR_MBX_MQ_CI(val, ASYNC);
sss_chip_write_reg(SSS_TO_HWDEV(mbx)->hwif, SSS_MBX_MQ_CI_OFF, val);
}
static void sss_chip_set_mbx_wb_attr(struct sss_mbx *mbx)
{
u32 addr_h;
u32 addr_l;
struct sss_mbx_send *send_mbx = &mbx->mbx_send;
struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx);
addr_h = upper_32_bits(send_mbx->wb_paddr);
addr_l = lower_32_bits(send_mbx->wb_paddr);
sss_chip_write_reg(hwdev->hwif, SSS_HW_CSR_MBX_RES_H_OFF, addr_h);
sss_chip_write_reg(hwdev->hwif, SSS_HW_CSR_MBX_RES_L_OFF, addr_l);
}
static void sss_chip_set_mbx_attr(struct sss_mbx *mbx)
{
sss_chip_reset_mbx_ci(mbx);
sss_chip_set_mbx_wb_attr(mbx);
}
static void sss_chip_reset_mbx_attr(struct sss_mbx *sss_mbx)
{
struct sss_hwdev *hwdev = SSS_TO_HWDEV(sss_mbx);
sss_chip_write_reg(hwdev->hwif, SSS_HW_CSR_MBX_RES_H_OFF, 0);
sss_chip_write_reg(hwdev->hwif, SSS_HW_CSR_MBX_RES_L_OFF, 0);
}
static void sss_prepare_send_mbx(struct sss_mbx *mbx)
{
struct sss_mbx_send *send_mbx = &mbx->mbx_send;
send_mbx->data = SSS_MBX_AREA(SSS_TO_HWDEV(mbx)->hwif);
}
static int sss_alloc_host_msg(struct sss_hwdev *hwdev)
{
int i;
int ret;
int host_id;
u8 max_host = SSS_MAX_HOST_NUM(hwdev);
struct sss_mbx *mbx = hwdev->mbx;
if (max_host == 0)
return 0;
mbx->host_msg = kcalloc(max_host, sizeof(*mbx->host_msg), GFP_KERNEL);
if (!mbx->host_msg)
return -ENOMEM;
for (host_id = 0; host_id < max_host; host_id++) {
ret = sss_alloc_mbx_msg_buffer(&mbx->host_msg[host_id]);
if (ret != 0) {
sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl,
"Fail to alloc host %d msg channel\n", host_id);
goto out;
}
}
mbx->support_h2h_msg = true;
return 0;
out:
for (i = 0; i < host_id; i++)
sss_free_mbx_msg_buffer(&mbx->host_msg[i]);
kfree(mbx->host_msg);
mbx->host_msg = NULL;
return -ENOMEM;
}
static void sss_free_host_msg(struct sss_mbx *mbx)
{
int i;
if (!mbx->host_msg)
return;
for (i = 0; i < SSS_MAX_HOST_NUM(SSS_TO_HWDEV(mbx)); i++)
sss_free_mbx_msg_buffer(&mbx->host_msg[i]);
kfree(mbx->host_msg);
mbx->host_msg = NULL;
}
int sss_hwif_init_mbx(struct sss_hwdev *hwdev)
{
int ret;
struct sss_mbx *mbx;
mbx = kzalloc(sizeof(*mbx), GFP_KERNEL);
if (!mbx)
return -ENOMEM;
hwdev->mbx = mbx;
mbx->hwdev = hwdev;
ret = sss_init_mbx_info(mbx);
if (ret != 0)
goto init_mbx_info_err;
if (SSS_IS_VF(hwdev)) {
ret = sss_init_func_mbx_msg(hwdev, 1);
if (ret != 0)
goto init_func_mbx_msg_err;
}
sss_chip_set_mbx_attr(mbx);
sss_prepare_send_mbx(mbx);
ret = sss_alloc_host_msg(hwdev);
if (ret != 0) {
sdk_err(hwdev->dev_hdl, "Fail to alloc host msg\n");
goto alloc_host_msg_err;
}
return 0;
alloc_host_msg_err:
sss_chip_reset_mbx_attr(mbx);
sss_deinit_func_mbx_msg(mbx);
init_func_mbx_msg_err:
sss_deinit_mbx_info(mbx);
init_mbx_info_err:
kfree(mbx);
hwdev->mbx = NULL;
return ret;
}
void sss_hwif_deinit_mbx(struct sss_hwdev *hwdev)
{
struct sss_mbx *mbx = hwdev->mbx;
destroy_workqueue(mbx->workq);
mbx->workq = NULL;
sss_chip_reset_mbx_attr(mbx);
sss_free_host_msg(mbx);
sss_deinit_func_mbx_msg(mbx);
sss_deinit_mbx_info(mbx);
kfree(mbx);
hwdev->mbx = NULL;
}
static bool sss_check_mbx_msg_header(void *dev_hdl,
struct sss_msg_desc *msg_desc, u64 mbx_header)
{
u8 seq_id = SSS_GET_MSG_HEADER(mbx_header, SEQID);
u8 seg_len = SSS_GET_MSG_HEADER(mbx_header, SEG_LEN);
u8 msg_id = SSS_GET_MSG_HEADER(mbx_header, MSG_ID);
u8 mod = SSS_GET_MSG_HEADER(mbx_header, MODULE);
u16 cmd = SSS_GET_MSG_HEADER(mbx_header, CMD);
if (seq_id > SSS_MAX_SEG_ID) {
sdk_err(dev_hdl, "Current seg info: seq_id = 0x%x\n", seq_id);
return false;
}
if (seg_len > SSS_MBX_SEG_SIZE) {
sdk_err(dev_hdl, "Current seg info: seg_len = 0x%x\n", seg_len);
return false;
}
if (seq_id == SSS_MAX_SEG_ID && seg_len > SSS_MBX_LAST_SEG_MAX_SIZE) {
sdk_err(dev_hdl, "Current seg info: seq_id = 0x%x, seg_len = 0x%x\n",
seq_id, seg_len);
return false;
}
if (seq_id == 0)
return true;
if (seq_id != msg_desc->seq_id + 1) {
sdk_err(dev_hdl, "Current seg info: seq_id = 0x%x, 0x%x\n",
seq_id, msg_desc->seq_id);
return false;
}
if (msg_id != msg_desc->msg_info.msg_id) {
sdk_err(dev_hdl, "Current seg info: msg_id = 0x%x, 0x%x\n",
msg_id, msg_desc->msg_info.msg_id);
return false;
}
if (mod != msg_desc->mod) {
sdk_err(dev_hdl, "Current seg info: mod = 0x%x, 0x%x\n",
mod, msg_desc->mod);
return false;
}
if (cmd != msg_desc->cmd) {
sdk_err(dev_hdl, "Current seg info: cmd = 0x%x, 0x%x\n",
cmd, msg_desc->cmd);
return false;
}
return true;
}
static void sss_fill_msg_desc(struct sss_msg_desc *msg_desc, u64 *msg_header)
{
u64 mbx_header = *msg_header;
u8 seq_id = SSS_GET_MSG_HEADER(mbx_header, SEQID);
u8 seg_len = SSS_GET_MSG_HEADER(mbx_header, SEG_LEN);
u8 msg_id = SSS_GET_MSG_HEADER(mbx_header, MSG_ID);
u8 mod = SSS_GET_MSG_HEADER(mbx_header, MODULE);
u16 cmd = SSS_GET_MSG_HEADER(mbx_header, CMD);
u32 offset = seq_id * SSS_MBX_SEG_SIZE;
void *msg_body = SSS_GET_MBX_BODY(((void *)msg_header));
msg_desc->seq_id = seq_id;
if (seq_id == 0) {
msg_desc->msg_info.msg_id = msg_id;
msg_desc->mod = mod;
msg_desc->cmd = cmd;
}
msg_desc->msg_len = SSS_GET_MSG_HEADER(mbx_header, MSG_LEN);
msg_desc->msg_info.state = SSS_GET_MSG_HEADER(mbx_header, STATUS);
memcpy((u8 *)msg_desc->msg + offset, msg_body, seg_len);
}
static struct sss_recv_mbx *sss_alloc_recv_mbx(void)
{
struct sss_recv_mbx *recv_mbx = NULL;
recv_mbx = kzalloc(sizeof(*recv_mbx), GFP_KERNEL);
if (!recv_mbx)
return NULL;
recv_mbx->buf = kzalloc(SSS_MBX_BUF_SIZE_MAX, GFP_KERNEL);
if (!recv_mbx->buf)
goto alloc_recv_mbx_buf_err;
recv_mbx->resp_buf = kzalloc(SSS_MBX_BUF_SIZE_MAX, GFP_KERNEL);
if (!recv_mbx->resp_buf)
goto alloc_recv_mbx_resp_buf_err;
return recv_mbx;
alloc_recv_mbx_resp_buf_err:
kfree(recv_mbx->buf);
alloc_recv_mbx_buf_err:
kfree(recv_mbx);
return NULL;
}
static void sss_free_recv_mbx(struct sss_recv_mbx *recv_mbx)
{
kfree(recv_mbx->resp_buf);
kfree(recv_mbx->buf);
kfree(recv_mbx);
}
static int sss_recv_vf_mbx_handler(struct sss_mbx *mbx,
struct sss_recv_mbx *recv_mbx, void *resp_buf, u16 *size)
{
int ret;
sss_vf_mbx_handler_t callback;
struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx);
if (recv_mbx->mod >= SSS_MOD_TYPE_MAX) {
sdk_warn(hwdev->dev_hdl, "Recv err mbx msg, mod = %hhu\n", recv_mbx->mod);
return -EINVAL;
}
set_bit(SSS_VF_RECV_HANDLER_RUN, &mbx->vf_mbx_cb_state[recv_mbx->mod]);
callback = mbx->vf_mbx_cb[recv_mbx->mod];
if (callback &&
test_bit(SSS_VF_RECV_HANDLER_REG, &mbx->vf_mbx_cb_state[recv_mbx->mod])) {
ret = callback(mbx->vf_mbx_data[recv_mbx->mod], recv_mbx->cmd, recv_mbx->buf,
recv_mbx->buf_len, resp_buf, size);
} else {
sdk_warn(hwdev->dev_hdl, "VF mbx cb is unregistered\n");
ret = -EINVAL;
}
clear_bit(SSS_VF_RECV_HANDLER_RUN, &mbx->vf_mbx_cb_state[recv_mbx->mod]);
return ret;
}
static int sss_recv_pf_from_ppf_handler(struct sss_mbx *mbx,
struct sss_recv_mbx *recv_mbx, void *resp_buf, u16 *size)
{
int ret;
sss_pf_from_ppf_mbx_handler_t callback;
enum sss_mod_type mod = recv_mbx->mod;
struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx);
if (mod >= SSS_MOD_TYPE_MAX) {
sdk_warn(hwdev->dev_hdl, "Recv err mbx msg, mod = %d\n", mod);
return -EINVAL;
}
set_bit(SSS_PPF_TO_PF_RECV_HANDLER_RUN, &mbx->ppf_to_pf_mbx_cb_state[mod]);
callback = mbx->pf_recv_ppf_mbx_cb[mod];
if (callback &&
test_bit(SSS_PPF_TO_PF_RECV_HANDLER_REG, &mbx->ppf_to_pf_mbx_cb_state[mod]) != 0) {
ret = callback(mbx->pf_recv_ppf_mbx_data[mod], recv_mbx->cmd,
recv_mbx->buf, recv_mbx->buf_len, resp_buf, size);
} else {
sdk_warn(hwdev->dev_hdl, "PF recv ppf mbx cb is not registered\n");
ret = -EINVAL;
}
clear_bit(SSS_PPF_TO_PF_RECV_HANDLER_RUN, &mbx->ppf_to_pf_mbx_cb_state[mod]);
return ret;
}
static int sss_recv_ppf_mbx_handler(struct sss_mbx *mbx,
struct sss_recv_mbx *recv_mbx, u8 pf_id,
void *resp_buf, u16 *size)
{
int ret;
u16 vf_id = 0;
sss_ppf_mbx_handler_t callback;
struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx);
if (recv_mbx->mod >= SSS_MOD_TYPE_MAX) {
sdk_warn(hwdev->dev_hdl, "Recv err mbx msg, mod = %hhu\n", recv_mbx->mod);
return -EINVAL;
}
set_bit(SSS_PPF_RECV_HANDLER_RUN, &mbx->ppf_mbx_cb_state[recv_mbx->mod]);
callback = mbx->ppf_mbx_cb[recv_mbx->mod];
if (callback &&
test_bit(SSS_PPF_RECV_HANDLER_REG, &mbx->ppf_mbx_cb_state[recv_mbx->mod])) {
ret = callback(mbx->ppf_mbx_data[recv_mbx->mod], pf_id, vf_id, recv_mbx->cmd,
recv_mbx->buf, recv_mbx->buf_len, resp_buf, size);
} else {
sdk_warn(hwdev->dev_hdl, "PPF mbx cb is unregistered, mod = %hhu\n", recv_mbx->mod);
ret = -EINVAL;
}
clear_bit(SSS_PPF_RECV_HANDLER_RUN, &mbx->ppf_mbx_cb_state[recv_mbx->mod]);
return ret;
}
static int sss_recv_pf_from_vf_mbx_handler(struct sss_mbx *mbx,
struct sss_recv_mbx *recv_mbx,
u16 src_func_id, void *resp_buf,
u16 *size)
{
int ret;
u16 vf_id = 0;
sss_pf_mbx_handler_t callback;
struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx);
if (recv_mbx->mod >= SSS_MOD_TYPE_MAX) {
sdk_warn(hwdev->dev_hdl, "Recv err mbx msg, mod = %hhu\n", recv_mbx->mod);
return -EINVAL;
}
set_bit(SSS_PF_RECV_HANDLER_RUN, &mbx->pf_mbx_cb_state[recv_mbx->mod]);
callback = mbx->pf_mbx_cb[recv_mbx->mod];
if (callback &&
test_bit(SSS_PF_RECV_HANDLER_REG, &mbx->pf_mbx_cb_state[recv_mbx->mod]) != 0) {
vf_id = src_func_id - sss_get_glb_pf_vf_offset(SSS_TO_HWDEV(mbx));
ret = callback(mbx->pf_mbx_data[recv_mbx->mod], vf_id, recv_mbx->cmd,
recv_mbx->buf, recv_mbx->buf_len, resp_buf, size);
} else {
sdk_warn(hwdev->dev_hdl, "PF mbx mod(0x%x) cb is unregistered\n", recv_mbx->mod);
ret = -EINVAL;
}
clear_bit(SSS_PF_RECV_HANDLER_RUN, &mbx->pf_mbx_cb_state[recv_mbx->mod]);
return ret;
}
static void sss_send_mbx_response(struct sss_mbx *mbx,
struct sss_recv_mbx *recv_mbx, int ret, u16 size, u16 src_func_id)
{
u16 data_size;
struct sss_mbx_msg_info msg_info = {0};
struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx);
msg_info.msg_id = recv_mbx->msg_id;
if (ret != 0)
msg_info.state = SSS_MBX_PF_SEND_ERR;
data_size = (size == 0 || ret != 0) ? SSS_MBX_MSG_NO_DATA_SIZE : size;
if (data_size > SSS_MBX_DATA_SIZE) {
sdk_err(hwdev->dev_hdl, "Resp msg len(%d), out of range: %d\n",
data_size, SSS_MBX_DATA_SIZE);
data_size = SSS_MBX_DATA_SIZE;
}
sss_send_mbx_msg(mbx, recv_mbx->mod, recv_mbx->cmd, recv_mbx->resp_buf, data_size,
src_func_id, SSS_RESP_MSG, SSS_MSG_NO_ACK, &msg_info);
}
static void sss_recv_mbx_handler(struct sss_mbx *mbx,
struct sss_recv_mbx *recv_mbx)
{
int ret = 0;
void *resp_buf = recv_mbx->resp_buf;
u16 size = SSS_MBX_DATA_SIZE;
u16 src_func_id = recv_mbx->src_func_id;
struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx);
if (SSS_IS_VF(hwdev)) {
ret = sss_recv_vf_mbx_handler(mbx, recv_mbx, resp_buf, &size);
goto out;
}
if (SSS_SRC_IS_PF_OR_PPF(hwdev, src_func_id)) {
if (SSS_IS_PPF(hwdev))
ret = sss_recv_ppf_mbx_handler(mbx, recv_mbx,
(u8)src_func_id,
resp_buf, &size);
else
ret = sss_recv_pf_from_ppf_handler(mbx, recv_mbx, resp_buf, &size);
} else {
ret = sss_recv_pf_from_vf_mbx_handler(mbx,
recv_mbx, src_func_id,
resp_buf, &size);
}
out:
if (recv_mbx->ack_type == SSS_MSG_ACK)
sss_send_mbx_response(mbx, recv_mbx, ret, size, src_func_id);
}
static void sss_recv_mbx_work_handler(struct work_struct *work)
{
struct sss_mbx_work *mbx_work = container_of(work, struct sss_mbx_work, work);
sss_recv_mbx_handler(mbx_work->mbx, mbx_work->recv_mbx);
atomic_dec(&mbx_work->msg_buffer->recv_msg_cnt);
destroy_work(&mbx_work->work);
sss_free_recv_mbx(mbx_work->recv_mbx);
kfree(mbx_work);
}
static void sss_init_recv_mbx_param(struct sss_recv_mbx *recv_mbx,
struct sss_msg_desc *msg_desc, u64 msg_header)
{
recv_mbx->msg_id = msg_desc->msg_info.msg_id;
recv_mbx->mod = SSS_GET_MSG_HEADER(msg_header, MODULE);
recv_mbx->cmd = SSS_GET_MSG_HEADER(msg_header, CMD);
recv_mbx->ack_type = SSS_GET_MSG_HEADER(msg_header, NO_ACK);
recv_mbx->src_func_id = SSS_GET_MSG_HEADER(msg_header, SRC_GLB_FUNC_ID);
recv_mbx->buf_len = msg_desc->msg_len;
memcpy(recv_mbx->buf, msg_desc->msg, msg_desc->msg_len);
}
static int sss_init_mbx_work(struct sss_mbx *mbx, struct sss_recv_mbx *recv_mbx,
struct sss_msg_buffer *msg_buffer)
{
struct sss_mbx_work *mbx_work = NULL;
mbx_work = kzalloc(sizeof(*mbx_work), GFP_KERNEL);
if (!mbx_work)
return -ENOMEM;
atomic_inc(&msg_buffer->recv_msg_cnt);
mbx_work->msg_buffer = msg_buffer;
mbx_work->recv_mbx = recv_mbx;
mbx_work->mbx = mbx;
INIT_WORK(&mbx_work->work, sss_recv_mbx_work_handler);
queue_work_on(WORK_CPU_UNBOUND, mbx->workq, &mbx_work->work);
return 0;
}
static void sss_recv_mbx_msg_handler(struct sss_mbx *mbx,
struct sss_msg_desc *msg_desc, u64 msg_header)
{
u32 msg_cnt;
int ret;
struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx);
struct sss_recv_mbx *recv_mbx = NULL;
struct sss_msg_buffer *msg_buffer = container_of(msg_desc, struct sss_msg_buffer, recv_msg);
msg_cnt = atomic_read(&msg_buffer->recv_msg_cnt);
if (msg_cnt > SSS_MSG_PROCESS_CNT_MAX) {
u64 src_func_id = SSS_GET_MSG_HEADER(msg_header, SRC_GLB_FUNC_ID);
sdk_warn(hwdev->dev_hdl, "This func(%llu) have %u msg wait to process\n",
src_func_id, msg_cnt);
return;
}
recv_mbx = sss_alloc_recv_mbx();
if (!recv_mbx) {
sdk_err(hwdev->dev_hdl, "Fail to alloc receive recv_mbx message buffer\n");
return;
}
sss_init_recv_mbx_param(recv_mbx, msg_desc, msg_header);
ret = sss_init_mbx_work(mbx, recv_mbx, msg_buffer);
if (ret != 0)
sss_free_recv_mbx(recv_mbx);
}
static void sss_resp_mbx_handler(struct sss_mbx *mbx,
const struct sss_msg_desc *msg_desc)
{
spin_lock(&mbx->mbx_lock);
if (msg_desc->msg_info.msg_id == mbx->send_msg_id &&
mbx->event_flag == SSS_EVENT_START)
mbx->event_flag = SSS_EVENT_SUCCESS;
else
sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl,
"Mbx resp timeout, current send msg_id(0x%x), recv msg_id(0x%x), status(0x%x)\n",
mbx->send_msg_id, msg_desc->msg_info.msg_id, msg_desc->msg_info.state);
spin_unlock(&mbx->mbx_lock);
}
static void sss_recv_mbx_aeq(struct sss_mbx *mbx, u64 *msg_header,
struct sss_msg_desc *msg_desc)
{
u64 header = *msg_header;
if (!sss_check_mbx_msg_header(SSS_TO_HWDEV(mbx)->dev_hdl, msg_desc, header)) {
msg_desc->seq_id = SSS_MAX_SEG_ID;
return;
}
sss_fill_msg_desc(msg_desc, msg_header);
if (!SSS_GET_MSG_HEADER(header, LAST))
return;
if (SSS_GET_MSG_HEADER(header, DIRECTION) == SSS_DIRECT_SEND_MSG) {
sss_recv_mbx_msg_handler(mbx, msg_desc, header);
return;
}
sss_resp_mbx_handler(mbx, msg_desc);
}
void sss_recv_mbx_aeq_handler(void *handle, u8 *header, u8 size)
{
u64 msg_header = *((u64 *)header);
u64 src_func_id = SSS_GET_MSG_HEADER(msg_header, SRC_GLB_FUNC_ID);
u64 direction = SSS_GET_MSG_HEADER(msg_header, DIRECTION);
struct sss_msg_desc *msg_desc = NULL;
struct sss_hwdev *hwdev = (struct sss_hwdev *)handle;
struct sss_mbx *mbx = hwdev->mbx;
msg_desc = sss_get_mbx_msg_desc(mbx, src_func_id, direction);
if (!msg_desc) {
sdk_err(hwdev->dev_hdl, "Invalid mbx src_func_id: %u\n", (u32)src_func_id);
return;
}
sss_recv_mbx_aeq(mbx, (u64 *)header, msg_desc);
}

View File

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HWIF_MBX_INIT_H
#define SSS_HWIF_MBX_INIT_H
#include "sss_hwdev.h"
int sss_init_func_mbx_msg(void *hwdev, u16 func_num);
int sss_hwif_init_mbx(struct sss_hwdev *hwdev);
void sss_hwif_deinit_mbx(struct sss_hwdev *hwdev);
void sss_recv_mbx_aeq_handler(void *handle, u8 *header, u8 size);
#endif

View File

@ -0,0 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HWIF_MGMT_COMMON_H
#define SSS_HWIF_MGMT_COMMON_H
#define SSS_ASYNC_MSG_FLAG 0x8
#define SSS_PF_MGMT_BUF_LEN_MAX 2048UL
#define SSS_MSG_TO_MGMT_LEN_MAX 2016
#define SSS_SEG_LEN 48
#define SSS_MGMT_SEQ_ID_MAX \
(ALIGN(SSS_MSG_TO_MGMT_LEN_MAX, SSS_SEG_LEN) / SSS_SEG_LEN)
#define SSS_MGMT_LAST_SEG_LEN_MAX \
(SSS_PF_MGMT_BUF_LEN_MAX - SSS_SEG_LEN * SSS_MGMT_SEQ_ID_MAX)
#endif

View File

@ -0,0 +1,298 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
#include "sss_kernel.h"
#include "sss_hw.h"
#include "sss_hwdev.h"
#include "sss_hwif_mbx.h"
#include "sss_hwif_mbx_init.h"
#include "sss_hwif_mgmt_common.h"
#include "sss_hwif_ctrlq_init.h"
#include "sss_hwif_adm_init.h"
#define SSS_DEF_OUT_SIZE 1
struct sss_mgmt_msg_handle_work {
struct work_struct work;
struct sss_msg_pf_to_mgmt *pf_to_mgmt;
void *msg;
u16 msg_len;
u8 no_ack;
u8 resvd;
enum sss_mod_type mod;
u16 cmd;
u16 msg_id;
};
static void sss_send_response_mbx_to_mgmt(struct sss_hwdev *hwdev, u8 mod, u16 cmd,
void *buf_in, u16 in_size, u16 msg_id)
{
struct sss_mbx_msg_info info;
info.msg_id = (u8)msg_id;
info.state = 0;
sss_send_mbx_msg(hwdev->mbx, mod, cmd, buf_in, in_size,
SSS_MGMT_SRC_ID, SSS_RESP_MSG, SSS_MSG_NO_ACK, &info);
}
static void sss_mgmt_recv_msg_handler(struct sss_msg_pf_to_mgmt *mgmt_msg,
u8 mod, u16 cmd, void *in_buf,
u16 in_size, u16 msg_id, int resp_need)
{
u16 size;
u16 out_size = 0;
void *dev_hdl = SSS_TO_HWDEV(mgmt_msg)->dev_hdl;
void *out_buf = mgmt_msg->ack_buf;
memset(out_buf, 0, SSS_PF_MGMT_BUF_LEN_MAX);
if (mod >= SSS_MOD_TYPE_HW_MAX) {
sdk_warn(dev_hdl, "Recv illegal msg from mgmt cpu, mod = %d\n", mod);
out_size = sizeof(struct sss_mgmt_msg_head);
((struct sss_mgmt_msg_head *)out_buf)->state = SSS_MGMT_CMD_UNSUPPORTED;
goto out;
}
set_bit(SSS_CALLBACK_RUNNING, &mgmt_msg->recv_handler_state[mod]);
if (!mgmt_msg->recv_handler[mod] ||
!test_bit(SSS_CALLBACK_REG, &mgmt_msg->recv_handler_state[mod])) {
sdk_warn(dev_hdl, "Recv mgmt cb is null, mod = %d\n", mod);
clear_bit(SSS_CALLBACK_RUNNING, &mgmt_msg->recv_handler_state[mod]);
out_size = sizeof(struct sss_mgmt_msg_head);
((struct sss_mgmt_msg_head *)out_buf)->state = SSS_MGMT_CMD_UNSUPPORTED;
goto out;
}
mgmt_msg->recv_handler[mod](mgmt_msg->recv_data[mod],
cmd, in_buf, in_size, out_buf, &out_size);
clear_bit(SSS_CALLBACK_RUNNING, &mgmt_msg->recv_handler_state[mod]);
out:
if (resp_need != 0) {
size = (out_size == 0) ? SSS_DEF_OUT_SIZE : out_size;
sss_send_response_mbx_to_mgmt(SSS_TO_HWDEV(mgmt_msg), mod, cmd,
out_buf, size, msg_id);
}
}
static void sss_recv_mgmt_msg_work_handler(struct work_struct *work)
{
struct sss_mgmt_msg_handle_work *msg_work =
container_of(work, struct sss_mgmt_msg_handle_work, work);
sss_mgmt_recv_msg_handler(msg_work->pf_to_mgmt, msg_work->mod,
msg_work->cmd, msg_work->msg, msg_work->msg_len, msg_work->msg_id,
!msg_work->no_ack);
destroy_work(&msg_work->work);
kfree(msg_work->msg);
kfree(msg_work);
}
static void sss_init_mgmt_recv_msg(struct sss_recv_msg *msg_recv, u64 msg_header)
{
msg_recv->cmd = SSS_GET_MSG_HEADER(msg_header, CMD);
msg_recv->mod = SSS_GET_MSG_HEADER(msg_header, MODULE);
msg_recv->no_ack = SSS_GET_MSG_HEADER(msg_header, NO_ACK);
msg_recv->buf_len = SSS_GET_MSG_HEADER(msg_header, MSG_LEN);
msg_recv->msg_id = SSS_GET_MSG_HEADER(msg_header, MSG_ID);
msg_recv->seq_id = SSS_MGMT_SEQ_ID_MAX;
}
static bool sss_check_mgmt_head_info(struct sss_recv_msg *msg_recv, u64 header)
{
u8 seg_len = SSS_GET_MSG_HEADER(header, SEG_LEN);
u8 seg_id = SSS_GET_MSG_HEADER(header, SEQID);
u16 msg_id = SSS_GET_MSG_HEADER(header, MSG_ID);
if (seg_id > SSS_MGMT_SEQ_ID_MAX || seg_len > SSS_SEG_LEN ||
(seg_id == SSS_MGMT_SEQ_ID_MAX && seg_len > SSS_MGMT_LAST_SEG_LEN_MAX))
return false;
if (seg_id == 0) {
msg_recv->msg_id = msg_id;
msg_recv->seq_id = seg_id;
return true;
}
if (seg_id != (msg_recv->seq_id + 1) || msg_id != msg_recv->msg_id)
return false;
msg_recv->seq_id = seg_id;
return true;
}
static void sss_mgmt_resp_msg_handler(struct sss_msg_pf_to_mgmt *mgmt_msg,
struct sss_recv_msg *msg_recv)
{
void *dev_hdl = SSS_TO_HWDEV(mgmt_msg)->dev_hdl;
if ((msg_recv->msg_id & SSS_ASYNC_MSG_FLAG) != 0)
return;
spin_lock(&mgmt_msg->sync_event_lock);
if (msg_recv->msg_id == mgmt_msg->sync_msg_id &&
mgmt_msg->event_state == SSS_ADM_EVENT_START) {
mgmt_msg->event_state = SSS_ADM_EVENT_SUCCESS;
complete(&msg_recv->done);
spin_unlock(&mgmt_msg->sync_event_lock);
return;
}
sdk_err(dev_hdl, "Send msg id(0x%x) recv msg id(0x%x) dismatch, event state=%d\n",
mgmt_msg->sync_msg_id, msg_recv->msg_id, mgmt_msg->event_state);
sdk_err(dev_hdl, "Wait timeout, send and recv msg id(0x%x)(0x%x), event state=%d\n",
mgmt_msg->sync_msg_id, msg_recv->msg_id, mgmt_msg->event_state);
spin_unlock(&mgmt_msg->sync_event_lock);
}
static void sss_init_mgmt_msg_work(struct sss_msg_pf_to_mgmt *mgmt_msg,
struct sss_recv_msg *msg_recv)
{
struct sss_mgmt_msg_handle_work *msg_work = NULL;
msg_work = kzalloc(sizeof(*msg_work), GFP_KERNEL);
if (!msg_work)
return;
if (msg_recv->buf_len != 0) {
msg_work->msg = kzalloc(msg_recv->buf_len, GFP_KERNEL);
if (!msg_work->msg) {
kfree(msg_work);
return;
}
}
msg_work->pf_to_mgmt = mgmt_msg;
msg_work->msg_len = msg_recv->buf_len;
memcpy(msg_work->msg, msg_recv->buf, msg_recv->buf_len);
msg_work->msg_id = msg_recv->msg_id;
msg_work->mod = msg_recv->mod;
msg_work->cmd = msg_recv->cmd;
msg_work->no_ack = msg_recv->no_ack;
INIT_WORK(&msg_work->work, sss_recv_mgmt_msg_work_handler);
queue_work_on(WORK_CPU_UNBOUND, mgmt_msg->workq, &msg_work->work);
}
static void sss_recv_mgmt_msg_handler(struct sss_msg_pf_to_mgmt *mgmt_msg,
u8 *msg_header, struct sss_recv_msg *msg_recv)
{
u8 seq_id;
u8 seq_len;
u16 msg_id;
u32 msg_offset;
u64 dir;
u64 header = *((u64 *)msg_header);
void *msg_body;
struct sss_hwdev *hwdev = SSS_TO_HWDEV(mgmt_msg);
dir = SSS_GET_MSG_HEADER(header, DIRECTION);
msg_id = SSS_GET_MSG_HEADER(header, MSG_ID);
if (dir == SSS_RESP_MSG && (msg_id & SSS_ASYNC_MSG_FLAG) != 0)
return;
if (!sss_check_mgmt_head_info(msg_recv, header)) {
msg_recv->seq_id = SSS_MGMT_SEQ_ID_MAX;
sdk_err(hwdev->dev_hdl, "Fail to check Mgmt msg seq id and seq len\n");
return;
}
seq_len = SSS_GET_MSG_HEADER(header, SEG_LEN);
seq_id = SSS_GET_MSG_HEADER(header, SEQID);
msg_offset = seq_id * SSS_SEG_LEN;
msg_body = msg_header + sizeof(header);
memcpy((u8 *)msg_recv->buf + msg_offset, msg_body, seq_len);
if (!SSS_GET_MSG_HEADER(header, LAST))
return;
sss_init_mgmt_recv_msg(msg_recv, header);
if (SSS_GET_MSG_HEADER(header, DIRECTION) == SSS_RESP_MSG) {
sss_mgmt_resp_msg_handler(mgmt_msg, msg_recv);
return;
}
sss_init_mgmt_msg_work(mgmt_msg, msg_recv);
}
static void sss_set_mbx_event_timeout(struct sss_hwdev *hwdev)
{
struct sss_mbx *mbx = hwdev->mbx;
spin_lock(&mbx->mbx_lock);
if (mbx->event_flag == SSS_EVENT_START)
mbx->event_flag = SSS_EVENT_TIMEOUT;
spin_unlock(&mbx->mbx_lock);
}
void sss_mgmt_msg_aeqe_handler(void *hwdev, u8 *msg_header, u8 size)
{
bool msg_dir;
struct sss_recv_msg *msg = NULL;
struct sss_msg_pf_to_mgmt *mgmt_msg = NULL;
struct sss_hwdev *dev = (struct sss_hwdev *)hwdev;
if (SSS_GET_MSG_HEADER(*(u64 *)msg_header, SOURCE) == SSS_MSG_SRC_MBX) {
sss_recv_mbx_aeq_handler(hwdev, msg_header, size);
return;
}
mgmt_msg = dev->pf_to_mgmt;
if (!mgmt_msg)
return;
msg_dir = SSS_GET_MSG_HEADER(*(u64 *)msg_header, DIRECTION) == SSS_DIRECT_SEND_MSG;
msg = msg_dir ? &mgmt_msg->recv_msg : &mgmt_msg->recv_resp_msg;
sss_recv_mgmt_msg_handler(mgmt_msg, msg_header, msg);
}
void sss_force_complete_all(void *dev)
{
struct sss_hwdev *hwdev = dev;
spin_lock_bh(&hwdev->channel_lock);
if (sss_get_func_type(hwdev) != SSS_FUNC_TYPE_VF &&
test_bit(SSS_HW_ADM_INIT_OK, &hwdev->func_state))
sss_complete_adm_event(hwdev);
if (test_bit(SSS_HW_MBX_INIT_OK, &hwdev->func_state))
sss_set_mbx_event_timeout(hwdev);
if (test_bit(SSS_HW_CTRLQ_INIT_OK, &hwdev->func_state))
sss_ctrlq_flush_sync_cmd(hwdev);
spin_unlock_bh(&hwdev->channel_lock);
}
void sss_flush_mgmt_workq(void *hwdev)
{
struct sss_hwdev *dev = (struct sss_hwdev *)hwdev;
flush_workqueue(dev->aeq_info->workq);
if (sss_get_func_type(dev) != SSS_FUNC_TYPE_VF)
flush_workqueue(dev->pf_to_mgmt->workq);
}

View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HWIF_MGMT_INIT_H
#define SSS_HWIF_MGMT_INIT_H
#include "sss_hwdev.h"
void sss_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, u8 size);
void sss_force_complete_all(void *dev);
void sss_flush_mgmt_workq(void *hwdev);
#endif

View File

@ -0,0 +1,47 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <net/addrconf.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/io-mapping.h>
#include <linux/interrupt.h>
#include <linux/inetdevice.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/rtc.h>
#include <linux/aer.h>
#include <linux/debugfs.h>
#include "sss_kernel.h"
#include "sss_hw.h"
#include "sss_pci_id_tbl.h"
#include "sss_pci_sriov.h"
#include "sss_adapter_mgmt.h"
#include "sss_hwdev.h"
static void sss_record_pcie_error(void *dev)
{
struct sss_hwdev *hwdev = (struct sss_hwdev *)dev;
atomic_inc(&hwdev->hw_stats.fault_event_stats.pcie_fault_stats);
}
pci_ers_result_t sss_detect_pci_error(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct sss_pci_adapter *adapter = sss_get_adapter_by_pcidev(pdev);
sdk_err(&pdev->dev, "Pci error, state: 0x%08x\n", state);
pci_cleanup_aer_uncorrect_error_status(pdev);
if (adapter)
sss_record_pcie_error(adapter->hwdev);
return PCI_ERS_RESULT_CAN_RECOVER;
}

View File

@ -0,0 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_PCI_ERROR_H
#define SSS_PCI_ERROR_H
#include <linux/pci.h>
pci_ers_result_t sss_detect_pci_error(struct pci_dev *pdev,
pci_channel_state_t state);
#endif

View File

@ -0,0 +1,65 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <net/addrconf.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/io-mapping.h>
#include <linux/interrupt.h>
#include <linux/inetdevice.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/rtc.h>
#include <linux/aer.h>
#include <linux/debugfs.h>
#include "sss_kernel.h"
#include "sss_hw.h"
static bool attach_uld = true;
module_param(attach_uld, bool, 0444);
MODULE_PARM_DESC(attach_uld, "enable attach upper driver - default is true");
static struct sss_uld_info g_uld_info[SSS_SERVICE_TYPE_MAX];
static const char *g_uld_name[SSS_SERVICE_TYPE_MAX] = {
"nic", "ovs", "roce", "toe", "ioe",
"fc", "vbs", "ipsec", "virtio", "migrate", "ppa", "custom"
};
/* lock for attach/detach all uld and register/ unregister uld */
struct mutex g_uld_mutex;
void sss_init_uld_lock(void)
{
mutex_init(&g_uld_mutex);
}
void sss_lock_uld(void)
{
mutex_lock(&g_uld_mutex);
}
void sss_unlock_uld(void)
{
mutex_unlock(&g_uld_mutex);
}
const char **sss_get_uld_names(void)
{
return g_uld_name;
}
struct sss_uld_info *sss_get_uld_info(void)
{
return g_uld_info;
}
bool sss_attach_is_enable(void)
{
return attach_uld;
}

View File

@ -0,0 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_PCI_GLOBAL_H
#define SSS_PCI_GLOBAL_H
#include <linux/types.h>
#include "sss_hw_uld_driver.h"
struct sss_uld_info *sss_get_uld_info(void);
bool sss_attach_is_enable(void);
const char **sss_get_uld_names(void);
void sss_init_uld_lock(void);
void sss_lock_uld(void);
void sss_unlock_uld(void);
#endif

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_PCI_ID_TBL_H
#define SSS_PCI_ID_TBL_H
#define PCI_VENDOR_ID_SSSNIC 0x1F3F
#define SSS_DEV_ID_STANDARD 0x9020
#define SSS_DEV_ID_SPN120 0x9021
#define SSS_DEV_ID_VF 0x9001
#define SSS_DEV_ID_VF_HV 0x9002
#define SSS_DEV_ID_SPU 0xAC00
#endif

View File

@ -0,0 +1,587 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <net/addrconf.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/io-mapping.h>
#include <linux/interrupt.h>
#include <linux/inetdevice.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/rtc.h>
#include <linux/aer.h>
#include <linux/debugfs.h>
#include "sss_kernel.h"
#include "sss_hw.h"
#include "sss_pci_id_tbl.h"
#include "sss_pci_sriov.h"
#include "sss_adapter_mgmt.h"
#include "sss_hwdev_init.h"
#include "sss_hwdev_api.h"
#include "sss_pci_remove.h"
#include "sss_pci_global.h"
#include "sss_tool.h"
#define SSS_SYNC_YEAR_OFFSET 1900
#define SSS_SYNC_MONTH_OFFSET 1
#define SSS_CHECK_EVENT_INFO(event) \
((event)->service == SSS_EVENT_SRV_COMM && \
(event)->type == SSS_EVENT_FAULT)
#define SSS_CHECK_FAULT_EVENT_INFO(hwdev, fault_event) \
((fault_event)->fault_level == SSS_FAULT_LEVEL_SERIOUS_FLR && \
(fault_event)->info.chip.func_id < sss_get_max_pf_num(hwdev))
#define SSS_GET_CFG_REG_BAR(pdev) (SSS_IS_VF_DEV(pdev) ? \
SSS_VF_PCI_CFG_REG_BAR : SSS_PF_PCI_CFG_REG_BAR)
static bool sss_get_vf_load_state(struct pci_dev *pdev)
{
struct sss_pci_adapter *adapter = NULL;
struct pci_dev *dev = NULL;
if (pci_is_root_bus(pdev->bus))
return false;
dev = pdev->is_virtfn ? pdev->physfn : pdev;
adapter = pci_get_drvdata(dev);
if (!adapter) {
sdk_err(&pdev->dev, "Invalid adapter, is null.\n");
return false;
}
return true;
}
static int sss_init_pci_dev(struct pci_dev *pdev)
{
int ret;
ret = pci_enable_device(pdev);
if (ret != 0) {
sdk_err(&pdev->dev, "Fail to enable pci device\n");
goto enable_err;
}
ret = pci_request_regions(pdev, SSS_DRV_NAME);
if (ret != 0) {
sdk_err(&pdev->dev, "Fail to request regions\n");
goto regions_err;
}
pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (ret != 0) {
sdk_warn(&pdev->dev, "Fail to set 64-bit DMA mask\n");
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret != 0) {
sdk_err(&pdev->dev, "Fail to set DMA mask\n");
goto dma_err;
}
}
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (ret != 0) {
sdk_warn(&pdev->dev, "Fail to set 64-bit coherent DMA mask\n");
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret != 0) {
sdk_err(&pdev->dev, "Fail to set coherent DMA mask\n");
goto dma_err;
}
}
return 0;
dma_err:
pci_clear_master(pdev);
pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
regions_err:
pci_disable_device(pdev);
enable_err:
pci_set_drvdata(pdev, NULL);
return ret;
}
void sss_set_adapter_probe_state(struct sss_pci_adapter *adapter, int state)
{
mutex_lock(&adapter->uld_attach_mutex);
adapter->init_state = state;
mutex_unlock(&adapter->uld_attach_mutex);
}
static int sss_map_pci_bar(struct pci_dev *pdev,
struct sss_pci_adapter *adapter)
{
adapter->db_base_paddr = pci_resource_start(pdev, SSS_PCI_DB_BAR);
adapter->db_dwqe_len = pci_resource_len(pdev, SSS_PCI_DB_BAR);
adapter->db_reg_bar = pci_ioremap_bar(pdev, SSS_PCI_DB_BAR);
if (!adapter->db_reg_bar) {
sdk_err(&pdev->dev, "Fail to map db reg bar\n");
return -ENOMEM;
}
if (!SSS_IS_VF_DEV(pdev)) {
adapter->mgmt_reg_bar = pci_ioremap_bar(pdev, SSS_PCI_MGMT_REG_BAR);
if (!adapter->mgmt_reg_bar) {
sdk_err(&pdev->dev, "Fail to map mgmt reg bar\n");
goto mgmt_bar_err;
}
}
adapter->intr_reg_bar = pci_ioremap_bar(pdev, SSS_PCI_INTR_REG_BAR);
if (!adapter->intr_reg_bar) {
sdk_err(&pdev->dev, "Fail to map intr reg bar\n");
goto intr_bar_err;
}
adapter->cfg_reg_bar = pci_ioremap_bar(pdev, SSS_GET_CFG_REG_BAR(pdev));
if (!adapter->cfg_reg_bar) {
sdk_err(&pdev->dev, "Fail to map config reg bar\n");
goto cfg_bar_err;
}
return 0;
cfg_bar_err:
iounmap(adapter->intr_reg_bar);
intr_bar_err:
if (!SSS_IS_VF_DEV(pdev))
iounmap(adapter->mgmt_reg_bar);
mgmt_bar_err:
iounmap(adapter->db_reg_bar);
return -ENOMEM;
}
static void sss_send_event_to_uld(struct sss_pci_adapter *adapter,
struct sss_event_info *event_info)
{
enum sss_service_type type;
const char **uld_name = sss_get_uld_names();
struct sss_uld_info *uld_info = sss_get_uld_info();
for (type = SSS_SERVICE_TYPE_NIC; type < SSS_SERVICE_TYPE_MAX; type++) {
if (test_and_set_bit(type, &adapter->uld_run_state)) {
sdk_warn(&adapter->pcidev->dev,
"Fail to send event, svc: 0x%x, event type: 0x%x, uld_name: %s\n",
event_info->service, event_info->type, uld_name[type]);
continue;
}
if (uld_info[type].event)
uld_info[type].event(&adapter->hal_dev,
adapter->uld_dev[type], event_info);
clear_bit(type, &adapter->uld_run_state);
}
}
static void sss_send_event_to_dst(struct sss_pci_adapter *adapter, u16 func_id,
struct sss_event_info *event_info)
{
struct sss_pci_adapter *dest_adapter = NULL;
sss_hold_chip_node();
list_for_each_entry(dest_adapter, &adapter->chip_node->func_list, node) {
if (adapter->init_state == SSS_IN_REMOVE)
continue;
if (sss_get_func_type(dest_adapter->hwdev) == SSS_FUNC_TYPE_VF)
continue;
if (sss_get_global_func_id(dest_adapter->hwdev) == func_id) {
sss_send_event_to_uld(dest_adapter, event_info);
break;
}
}
sss_put_chip_node();
}
static void sss_send_event_to_all_pf(struct sss_pci_adapter *adapter,
struct sss_event_info *event_info)
{
struct sss_pci_adapter *dest_adapter = NULL;
sss_hold_chip_node();
list_for_each_entry(dest_adapter, &adapter->chip_node->func_list, node) {
if (adapter->init_state == SSS_IN_REMOVE)
continue;
if (sss_get_func_type(dest_adapter->hwdev) == SSS_FUNC_TYPE_VF)
continue;
sss_send_event_to_uld(dest_adapter, event_info);
}
sss_put_chip_node();
}
static void sss_process_event(void *data, struct sss_event_info *event_info)
{
u16 id;
struct sss_pci_adapter *pci_adapter = data;
struct sss_fault_event *fault_event = (void *)event_info->event_data;
if (SSS_CHECK_EVENT_INFO(event_info) &&
SSS_CHECK_FAULT_EVENT_INFO(pci_adapter->hwdev, fault_event)) {
id = fault_event->info.chip.func_id;
return sss_send_event_to_dst(pci_adapter, id, event_info);
}
if (event_info->type == SSS_EVENT_MGMT_WATCHDOG)
sss_send_event_to_all_pf(pci_adapter, event_info);
else
sss_send_event_to_uld(pci_adapter, event_info);
}
static void sss_sync_time_to_chip(struct sss_pci_adapter *adapter)
{
int ret;
u64 mstime;
struct timeval val = {0};
struct rtc_time r_time = {0};
do_gettimeofday(&val);
mstime = (u64)(val.tv_sec * MSEC_PER_SEC + val.tv_usec / USEC_PER_MSEC);
ret = sss_chip_sync_time(adapter->hwdev, mstime);
if (ret != 0) {
sdk_err(&adapter->pcidev->dev, "Fail to sync UTC time to fw, ret:%d.\n", ret);
} else {
rtc_time_to_tm((unsigned long)(val.tv_sec), &r_time);
sdk_info(&adapter->pcidev->dev,
"Success to sync UTC time to fw. UTC time %d-%02d-%02d %02d:%02d:%02d.\n",
r_time.tm_year + SSS_SYNC_YEAR_OFFSET,
r_time.tm_mon + SSS_SYNC_MONTH_OFFSET,
r_time.tm_mday, r_time.tm_hour, r_time.tm_min, r_time.tm_sec);
}
}
int sss_attach_uld_driver(struct sss_pci_adapter *adapter,
enum sss_service_type type, const struct sss_uld_info *uld_info)
{
int ret = 0;
void *uld = NULL;
const char **name = sss_get_uld_names();
struct pci_dev *pdev = adapter->pcidev;
mutex_lock(&adapter->uld_attach_mutex);
if (adapter->uld_dev[type]) {
sdk_err(&pdev->dev, "Fail to attach pci dev, driver %s\n", name[type]);
mutex_unlock(&adapter->uld_attach_mutex);
return 0;
}
ret = uld_info->probe(&adapter->hal_dev, &uld, adapter->uld_dev_name[type]);
if (ret != 0) {
sdk_err(&pdev->dev, "Fail to probe for driver %s\n", name[type]);
mutex_unlock(&adapter->uld_attach_mutex);
return ret;
}
adapter->uld_dev[type] = uld;
set_bit(type, &adapter->uld_attach_state);
mutex_unlock(&adapter->uld_attach_mutex);
sdk_info(&pdev->dev, "Success to attach %s driver\n", name[type]);
return 0;
}
static bool sss_get_vf_service_load(struct pci_dev *pdev,
enum sss_service_type service_type)
{
struct sss_pci_adapter *adapter = NULL;
struct pci_dev *dev = NULL;
if (!pdev) {
pr_err("Invalid pdev, is null.\n");
return false;
}
dev = (pdev->is_virtfn != 0) ? pdev->physfn : pdev;
adapter = pci_get_drvdata(dev);
if (!adapter) {
sdk_err(&pdev->dev, "Invalid pci adapter, is null.\n");
return false;
}
return true;
}
static void sss_attach_all_uld_driver(struct sss_pci_adapter *adapter)
{
enum sss_service_type type;
struct pci_dev *pdev = adapter->pcidev;
struct sss_uld_info *info = sss_get_uld_info();
sss_hold_chip_node();
sss_lock_uld();
for (type = SSS_SERVICE_TYPE_NIC; type < SSS_SERVICE_TYPE_MAX; type++) {
if (!info[type].probe)
continue;
if (pdev->is_virtfn &&
!sss_get_vf_service_load(pdev, type)) {
sdk_info(&pdev->dev,
"VF dev disable service_type = %d load in host\n", type);
continue;
}
sss_attach_uld_driver(adapter, type, &info[type]);
}
sss_unlock_uld();
sss_put_chip_node();
}
static int sss_attach_uld_dev(struct sss_pci_adapter *adapter)
{
struct pci_dev *pdev = adapter->pcidev;
adapter->hal_dev.pdev = pdev;
adapter->hal_dev.hwdev = adapter->hwdev;
if (!sss_attach_is_enable())
return 0;
sss_attach_all_uld_driver(adapter);
return 0;
}
int sss_register_uld(enum sss_service_type type, struct sss_uld_info *uld_info)
{
struct sss_pci_adapter *adapter = NULL;
struct sss_card_node *card_node = NULL;
struct list_head *list = NULL;
struct sss_uld_info *info = sss_get_uld_info();
const char **uld_name = sss_get_uld_names();
if (type >= SSS_SERVICE_TYPE_MAX) {
pr_err("Unknown type %d of uld to register\n", type);
return -EINVAL;
}
if (!uld_info || !uld_info->probe || !uld_info->remove) {
pr_err("Invalid info of %s driver to register\n", uld_name[type]);
return -EINVAL;
}
sss_hold_chip_node();
sss_lock_uld();
if (info[type].probe) {
sss_unlock_uld();
sss_put_chip_node();
pr_err("Driver %s already register\n", uld_name[type]);
return -EINVAL;
}
list = sss_get_chip_list();
memcpy(&info[type], uld_info, sizeof(*uld_info));
list_for_each_entry(card_node, list, node) {
list_for_each_entry(adapter, &card_node->func_list, node) {
if (sss_attach_uld_driver(adapter, type, uld_info) != 0) {
sdk_err(&adapter->pcidev->dev,
"Fail to attach %s driver to pci dev\n", uld_name[type]);
continue;
}
}
}
sss_unlock_uld();
sss_put_chip_node();
pr_info("Success to register %s driver\n", uld_name[type]);
return 0;
}
EXPORT_SYMBOL(sss_register_uld);
static int sss_notify_ok_to_chip(struct sss_pci_adapter *adapter)
{
int ret;
struct pci_dev *pdev = adapter->pcidev;
if (sss_get_func_type(adapter->hwdev) == SSS_FUNC_TYPE_VF)
return 0;
ret = sss_chip_set_pci_bdf_num(adapter->hwdev, pdev->bus->number,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
if (ret != 0) {
sdk_err(&pdev->dev, "Fail to set BDF info to chip\n");
return ret;
}
return 0;
}
static int sss_init_function(struct pci_dev *pdev, struct sss_pci_adapter *adapter)
{
int ret;
ret = sss_init_hwdev(adapter);
if (ret != 0) {
adapter->hwdev = NULL;
sdk_err(&pdev->dev, "Fail to init hardware device\n");
return -EFAULT;
}
sss_register_dev_event(adapter->hwdev, adapter, sss_process_event);
if (sss_get_func_type(adapter->hwdev) != SSS_FUNC_TYPE_VF) {
set_bit(SSS_SRIOV_PRESENT, &adapter->sriov_info.state);
sss_sync_time_to_chip(adapter);
}
sss_chip_node_lock();
ret = sss_tool_init(adapter->hwdev, adapter->chip_node);
if (ret) {
sss_chip_node_unlock();
sdk_err(&pdev->dev, "Failed to initialize dbgtool\n");
goto nictool_init_err;
}
sss_chip_node_unlock();
sss_add_func_list(adapter);
ret = sss_attach_uld_dev(adapter);
if (ret != 0) {
sdk_err(&pdev->dev, "Fail to attach uld dev\n");
goto attach_uld_err;
}
return 0;
attach_uld_err:
sss_del_func_list(adapter);
sss_chip_node_lock();
sss_tool_uninit(adapter->hwdev, adapter->chip_node);
sss_chip_node_unlock();
nictool_init_err:
sss_unregister_dev_event(adapter->hwdev);
sss_deinit_hwdev(adapter->hwdev);
return ret;
}
static int sss_init_adapter(struct sss_pci_adapter *adapter)
{
int ret;
struct pci_dev *pdev = adapter->pcidev;
if (pdev->is_virtfn != 0 && (!sss_get_vf_load_state(pdev))) {
sdk_info(&pdev->dev, "Vf dev disable load in host\n");
return 0;
}
sss_set_adapter_probe_state(adapter, SSS_PROBE_START);
ret = sss_map_pci_bar(pdev, adapter);
if (ret != 0) {
sdk_err(&pdev->dev, "Fail to map bar\n");
goto map_bar_fail;
}
/* if chip information of pcie function exist, add the function into chip */
ret = sss_alloc_chip_node(adapter);
if (ret != 0) {
sdk_err(&pdev->dev, "Fail to add new chip node to global list\n");
goto alloc_chip_node_fail;
}
ret = sss_init_function(pdev, adapter);
if (ret != 0)
goto func_init_err;
ret = sss_notify_ok_to_chip(adapter);
if (ret != 0) {
sdk_err(&pdev->dev, "Fail to notify ok\n");
goto notify_err;
}
sss_set_adapter_probe_state(adapter, SSS_PROBE_OK);
return 0;
notify_err:
sss_deinit_function(pdev);
func_init_err:
sss_free_chip_node(adapter);
alloc_chip_node_fail:
sss_unmap_pci_bar(adapter);
map_bar_fail:
sdk_err(&pdev->dev, "Fail to init adapter\n");
return ret;
}
static void sss_init_adapter_param(struct sss_pci_adapter *adapter,
struct pci_dev *pdev)
{
adapter->pcidev = pdev;
adapter->init_state = SSS_NO_PROBE;
spin_lock_init(&adapter->dettach_uld_lock);
mutex_init(&adapter->uld_attach_mutex);
pci_set_drvdata(pdev, adapter);
}
int sss_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int ret;
struct sss_pci_adapter *adapter = NULL;
sdk_info(&pdev->dev, "Pci probe begin\n");
if (!pdev)
return -EINVAL;
adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
if (!adapter) {
ret = -ENOMEM;
goto init_pci_err;
}
sss_init_adapter_param(adapter, pdev);
ret = sss_init_pci_dev(pdev);
if (ret != 0) {
kfree(adapter);
sdk_err(&pdev->dev, "Fail to init pci device\n");
goto init_pci_err;
}
ret = sss_init_adapter(adapter);
if (ret != 0)
goto init_adapter_err;
sdk_info(&pdev->dev, "Success to probe pci\n");
return 0;
init_adapter_err:
sss_deinit_pci_dev(pdev);
init_pci_err:
sdk_err(&pdev->dev, "Fail to pci probe\n");
return ret;
}

View File

@ -0,0 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_PCI_PROBE_H
#define SSS_PCI_PROBE_H
#include <linux/pci.h>
#include "sss_adapter.h"
int sss_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
#endif

View File

@ -0,0 +1,263 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <net/addrconf.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/io-mapping.h>
#include <linux/interrupt.h>
#include <linux/inetdevice.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/rtc.h>
#include <linux/aer.h>
#include <linux/debugfs.h>
#include "sss_kernel.h"
#include "sss_hw.h"
#include "sss_pci_id_tbl.h"
#include "sss_pci_sriov.h"
#include "sss_adapter_mgmt.h"
#include "sss_hwdev_init.h"
#include "sss_hwdev_api.h"
#include "sss_hwif_mgmt_init.h"
#include "sss_pci_global.h"
#include "sss_tool.h"
#define SSS_WAIT_SRIOV_CFG_TIMEOUT 15000
#define SSS_EVENT_PROCESS_TIMEOUT 10000
#define SSS_SRIOV_MIN_USLEEP 9900
#define SSS_SRIOV_MAX_USLEEP 10000
#define SSS_EVENT_MIN_USLEEP 900
#define SSS_EVENT_MAX_USLEEP 1000
static void sss_set_adapter_remove_state(struct sss_pci_adapter *adapter)
{
struct pci_dev *pdev = adapter->pcidev;
mutex_lock(&adapter->uld_attach_mutex);
if (adapter->init_state != SSS_PROBE_OK) {
sdk_warn(&pdev->dev, "Current function don not need remove\n");
mutex_unlock(&adapter->uld_attach_mutex);
}
adapter->init_state = SSS_IN_REMOVE;
mutex_unlock(&adapter->uld_attach_mutex);
}
static void sss_wait_sriov_cfg_complete(struct sss_pci_adapter *adapter)
{
unsigned long end_time;
struct sss_sriov_info *info = &adapter->sriov_info;
clear_bit(SSS_SRIOV_PRESENT, &info->state);
usleep_range(SSS_SRIOV_MIN_USLEEP, SSS_SRIOV_MAX_USLEEP);
end_time = jiffies + msecs_to_jiffies(SSS_WAIT_SRIOV_CFG_TIMEOUT);
do {
if (!test_bit(SSS_SRIOV_ENABLE, &info->state) &&
!test_bit(SSS_SRIOV_DISABLE, &info->state))
return;
usleep_range(SSS_SRIOV_MIN_USLEEP, SSS_SRIOV_MAX_USLEEP);
} while (time_before(jiffies, end_time));
}
static bool sss_wait_uld_dev_timeout(struct sss_pci_adapter *adapter,
enum sss_service_type type)
{
unsigned long end_time;
end_time = jiffies + msecs_to_jiffies(SSS_EVENT_PROCESS_TIMEOUT);
do {
if (!test_and_set_bit(type, &adapter->uld_run_state))
return false;
usleep_range(SSS_EVENT_MIN_USLEEP, SSS_EVENT_MAX_USLEEP);
} while (time_before(jiffies, end_time));
if (!test_and_set_bit(type, &adapter->uld_run_state))
return false;
return true;
}
void sss_detach_uld_driver(struct sss_pci_adapter *adapter,
enum sss_service_type type)
{
bool timeout;
struct sss_uld_info *info = sss_get_uld_info();
const char **name = sss_get_uld_names();
mutex_lock(&adapter->uld_attach_mutex);
if (!adapter->uld_dev[type]) {
mutex_unlock(&adapter->uld_attach_mutex);
return;
}
timeout = sss_wait_uld_dev_timeout(adapter, type);
spin_lock_bh(&adapter->dettach_uld_lock);
clear_bit(type, &adapter->uld_attach_state);
spin_unlock_bh(&adapter->dettach_uld_lock);
info[type].remove(&adapter->hal_dev, adapter->uld_dev[type]);
adapter->uld_dev[type] = NULL;
if (!timeout)
clear_bit(type, &adapter->uld_run_state);
sdk_info(&adapter->pcidev->dev,
"Success to detach %s driver from pci device\n", name[type]);
mutex_unlock(&adapter->uld_attach_mutex);
}
void sss_detach_all_uld_driver(struct sss_pci_adapter *adapter)
{
struct sss_uld_info *info = sss_get_uld_info();
enum sss_service_type type;
sss_hold_chip_node();
sss_lock_uld();
for (type = SSS_SERVICE_TYPE_MAX - 1; type > SSS_SERVICE_TYPE_NIC; type--) {
if (info[type].probe)
sss_detach_uld_driver(adapter, type);
}
if (info[SSS_SERVICE_TYPE_NIC].probe)
sss_detach_uld_driver(adapter, SSS_SERVICE_TYPE_NIC);
sss_unlock_uld();
sss_put_chip_node();
}
void sss_dettach_uld_dev(struct sss_pci_adapter *adapter)
{
sss_detach_all_uld_driver(adapter);
}
void sss_unregister_uld(enum sss_service_type type)
{
struct sss_pci_adapter *adapter = NULL;
struct sss_card_node *card_node = NULL;
struct list_head *card_list = NULL;
struct sss_uld_info *info = sss_get_uld_info();
if (type >= SSS_SERVICE_TYPE_MAX) {
pr_err("Unknown type %d of uld to unregister\n", type);
return;
}
sss_hold_chip_node();
sss_lock_uld();
card_list = sss_get_chip_list();
list_for_each_entry(card_node, card_list, node) {
/* detach vf first */
list_for_each_entry(adapter, &card_node->func_list, node)
if (sss_get_func_type(adapter->hwdev) == SSS_FUNC_TYPE_VF)
sss_detach_uld_driver(adapter, type);
list_for_each_entry(adapter, &card_node->func_list, node)
if (sss_get_func_type(adapter->hwdev) == SSS_FUNC_TYPE_PF)
sss_detach_uld_driver(adapter, type);
list_for_each_entry(adapter, &card_node->func_list, node)
if (sss_get_func_type(adapter->hwdev) == SSS_FUNC_TYPE_PPF)
sss_detach_uld_driver(adapter, type);
}
memset(&info[type], 0, sizeof(*info));
sss_unlock_uld();
sss_put_chip_node();
}
EXPORT_SYMBOL(sss_unregister_uld);
void sss_deinit_function(struct pci_dev *pdev)
{
struct sss_pci_adapter *adapter = sss_get_adapter_by_pcidev(pdev);
sss_chip_disable_mgmt_channel(adapter->hwdev);
sss_flush_mgmt_workq(adapter->hwdev);
sss_del_func_list(adapter);
sss_chip_node_lock();
sss_tool_uninit(adapter->hwdev, adapter->chip_node);
sss_chip_node_unlock();
sss_dettach_uld_dev(adapter);
sss_unregister_dev_event(adapter->hwdev);
sss_deinit_hwdev(adapter->hwdev);
}
void sss_unmap_pci_bar(struct sss_pci_adapter *adapter)
{
iounmap(adapter->cfg_reg_bar);
iounmap(adapter->intr_reg_bar);
if (!SSS_IS_VF_DEV(adapter->pcidev))
iounmap(adapter->mgmt_reg_bar);
iounmap(adapter->db_reg_bar);
}
int sss_deinit_adapter(struct sss_pci_adapter *adapter)
{
struct pci_dev *pdev = adapter->pcidev;
sss_set_adapter_remove_state(adapter);
sss_hwdev_detach(adapter->hwdev);
if (sss_get_func_type(adapter->hwdev) != SSS_FUNC_TYPE_VF) {
sss_wait_sriov_cfg_complete(adapter);
sss_pci_disable_sriov(adapter);
}
sss_deinit_function(pdev);
sss_free_chip_node(adapter);
sss_unmap_pci_bar(adapter);
sss_set_adapter_probe_state(adapter, SSS_NO_PROBE);
sdk_info(&pdev->dev, "Pcie device removed function\n");
return 0;
}
void sss_deinit_pci_dev(struct pci_dev *pdev)
{
struct sss_pci_adapter *adapter = sss_get_adapter_by_pcidev(pdev);
pci_clear_master(pdev);
pci_release_regions(pdev);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
kfree(adapter);
}
void sss_pci_remove(struct pci_dev *pdev)
{
struct sss_pci_adapter *adapter = sss_get_adapter_by_pcidev(pdev);
if (!adapter)
return;
sdk_info(&pdev->dev, "Begin pcie device remove\n");
sss_deinit_adapter(adapter);
sss_deinit_pci_dev(pdev);
sdk_info(&pdev->dev, "Success to remove pcie device\n");
}

View File

@ -0,0 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_PCI_REMOVE_H
#define SSS_PCI_REMOVE_H
#include <linux/pci.h>
#include "sss_hw_svc_cap.h"
#include "sss_adapter.h"
void sss_detach_uld_driver(struct sss_pci_adapter *adapter, enum sss_service_type type);
void sss_detach_all_uld_driver(struct sss_pci_adapter *adapter);
void sss_dettach_uld_dev(struct sss_pci_adapter *adapter);
void sss_deinit_function(struct pci_dev *pdev);
void sss_unmap_pci_bar(struct sss_pci_adapter *adapter);
int sss_deinit_adapter(struct sss_pci_adapter *adapter);
void sss_deinit_pci_dev(struct pci_dev *pdev);
void sss_pci_remove(struct pci_dev *pdev);
#endif

View File

@ -0,0 +1,41 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <net/addrconf.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/io-mapping.h>
#include <linux/interrupt.h>
#include <linux/inetdevice.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/rtc.h>
#include <linux/aer.h>
#include <linux/debugfs.h>
#include "sss_kernel.h"
#include "sss_hw.h"
#include "sss_pci_id_tbl.h"
#include "sss_pci_sriov.h"
#include "sss_adapter_mgmt.h"
#include "sss_hwdev_api.h"
#include "sss_hwdev_init.h"
void sss_pci_shutdown(struct pci_dev *pdev)
{
struct sss_pci_adapter *adapter = sss_get_adapter_by_pcidev(pdev);
sdk_info(&pdev->dev, "Shutdown device\n");
if (adapter)
sss_hwdev_shutdown(adapter->hwdev);
pci_disable_device(pdev);
if (adapter)
sss_hwdev_stop(adapter->hwdev);
}

View File

@ -0,0 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_PCI_SHUTDOWN_H
#define SSS_PCI_SHUTDOWN_H
#include <linux/pci.h>
void sss_pci_shutdown(struct pci_dev *pdev);
#endif

View File

@ -0,0 +1,190 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
#include <linux/pci.h>
#include <linux/interrupt.h>
#include "sss_kernel.h"
#include "sss_hw.h"
#include "sss_adapter_mgmt.h"
#include "sss_hwif_mbx.h"
#include "sss_hwif_mbx_init.h"
#include "sss_pci_sriov.h"
#include "sss_hwdev_api.h"
#include "sss_hwif_api.h"
static int sss_init_vf_hw(void *hwdev, u16 vf_num)
{
int ret;
u16 i;
u16 id;
/* mbx msg channel resources will be freed during remove process */
ret = sss_init_func_mbx_msg(hwdev, sss_get_max_vf_num(hwdev));
if (ret != 0)
return ret;
/* vf use 256K as default wq page size, and can't change it */
for (i = 1; i <= vf_num; i++) {
id = sss_get_glb_pf_vf_offset(hwdev) + i;
ret = sss_chip_set_wq_page_size(hwdev, id, SSS_DEFAULT_WQ_PAGE_SIZE);
if (ret != 0)
return ret;
}
return 0;
}
static void sss_deinit_vf_hw(void *hwdev, u16 vf_num)
{
u16 i;
u16 id;
for (i = 1; i <= vf_num; i++) {
id = sss_get_glb_pf_vf_offset(hwdev) + i;
sss_chip_set_wq_page_size(hwdev, id, SSS_HW_WQ_PAGE_SIZE);
}
}
#ifdef CONFIG_PCI_IOV
static void sss_notify_sriov_state_change(void *hwdev, u16 vf_num)
{
struct sss_event_info event = {0};
event.service = SSS_EVENT_SRV_COMM;
event.type = SSS_EVENT_SRIOV_STATE_CHANGE;
if (vf_num > 0) {
((struct sss_sriov_state_info *)(void *)event.event_data)->enable = 1;
((struct sss_sriov_state_info *)(void *)event.event_data)->vf_num = vf_num;
}
sss_do_event_callback(hwdev, &event);
}
#endif
int sss_pci_disable_sriov(struct sss_pci_adapter *adapter)
{
#ifdef CONFIG_PCI_IOV
void *hwdev = adapter->hwdev;
struct pci_dev *pdev = adapter->pcidev;
struct sss_sriov_info *info = &adapter->sriov_info;
if (!info->enabled)
return 0;
if (test_and_set_bit(SSS_SRIOV_DISABLE, &info->state)) {
sdk_err(&pdev->dev, "SR-IOV disable in process.");
return -EPERM;
}
if (pci_vfs_assigned(pdev) != 0) {
clear_bit(SSS_SRIOV_DISABLE, &info->state);
sdk_warn(&pdev->dev, "VFs are assigned - VFs will not be deallocated\n");
return -EPERM;
}
sss_notify_sriov_state_change(hwdev, 0);
info->enabled = false;
pci_disable_sriov(pdev);
sss_deinit_vf_hw(hwdev, (u16)info->vf_num);
info->vf_num = 0;
clear_bit(SSS_SRIOV_DISABLE, &info->state);
#endif
return 0;
}
#ifdef CONFIG_PCI_IOV
static int sss_check_existing_vf(struct sss_pci_adapter *adapter, u16 vf_num)
{
int ret;
struct pci_dev *pdev = adapter->pcidev;
int existing_vf = pci_num_vf(pdev);
struct sss_sriov_info *info = &adapter->sriov_info;
if (existing_vf != 0 && existing_vf != vf_num) {
ret = sss_pci_disable_sriov(adapter);
if (ret != 0) {
clear_bit(SSS_SRIOV_ENABLE, &info->state);
return ret;
}
} else if (existing_vf == vf_num) {
clear_bit(SSS_SRIOV_ENABLE, &info->state);
return vf_num;
}
return 0;
}
#endif
static int sss_pci_enable_sriov(struct sss_pci_adapter *adapter, u16 vf_num)
{
#ifdef CONFIG_PCI_IOV
int ret = 0;
void *hwdev = adapter->hwdev;
struct pci_dev *pdev = adapter->pcidev;
struct sss_sriov_info *info = &adapter->sriov_info;
if (test_and_set_bit(SSS_SRIOV_ENABLE, &info->state)) {
sdk_err(&pdev->dev, "SR-IOV disable, vf_num %d\n", vf_num);
return -EPERM;
}
if (vf_num > pci_sriov_get_totalvfs(pdev)) {
clear_bit(SSS_SRIOV_ENABLE, &info->state);
return -ERANGE;
}
ret = sss_check_existing_vf(adapter, vf_num);
if (ret != 0)
return ret;
ret = sss_init_vf_hw(hwdev, vf_num);
if (ret != 0) {
sdk_err(&pdev->dev, "Fail to init vf in hw, ret: %d\n", ret);
clear_bit(SSS_SRIOV_ENABLE, &info->state);
return ret;
}
ret = pci_enable_sriov(pdev, vf_num);
if (ret != 0) {
sdk_err(&pdev->dev, "Fail to enable SR-IOV, ret: %d\n", ret);
clear_bit(SSS_SRIOV_ENABLE, &info->state);
return ret;
}
info->enabled = true;
info->vf_num = vf_num;
sss_notify_sriov_state_change(hwdev, vf_num);
clear_bit(SSS_SRIOV_ENABLE, &info->state);
return vf_num;
#else
return 0;
#endif
}
int sss_pci_configure_sriov(struct pci_dev *pdev, int vf_num)
{
struct sss_pci_adapter *adapter = sss_get_adapter_by_pcidev(pdev);
if (!adapter)
return -EFAULT;
if (!test_bit(SSS_SRIOV_PRESENT, &adapter->sriov_info.state))
return -EFAULT;
return (vf_num == 0) ? sss_pci_disable_sriov(adapter) :
sss_pci_enable_sriov(adapter, (u16)vf_num);
}

View File

@ -0,0 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_PCI_SRIOV_H
#define SSS_PCI_SRIOV_H
#include <linux/types.h>
#include <linux/pci.h>
#include "sss_sriov_info.h"
#include "sss_adapter.h"
int sss_pci_disable_sriov(struct sss_pci_adapter *adapter);
int sss_pci_configure_sriov(struct pci_dev *pdev, int num_vfs);
#endif

View File

@ -0,0 +1,160 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include "sss_kernel.h"
#include "sss_common.h"
#include "sss_hwdev.h"
#include "sss_hw_wq.h"
#define SSS_WQ_MIN_DEPTH 64
#define SSS_WQ_MAX_DEPTH 65536
#define SSS_WQ_MAX_PAGE_NUM (PAGE_SIZE / sizeof(u64))
static int sss_init_wq_block(struct sss_wq *wq)
{
int i;
if (SSS_WQ_IS_0_LEVEL_CLA(wq)) {
wq->block_paddr = wq->page[0].align_paddr;
wq->block_vaddr = wq->page[0].align_vaddr;
return 0;
}
if (wq->page_num > SSS_WQ_MAX_PAGE_NUM) {
sdk_err(wq->dev_hdl, "Wq page num: 0x%x out of range: %lu\n",
wq->page_num, SSS_WQ_MAX_PAGE_NUM);
return -EFAULT;
}
wq->block_vaddr = dma_zalloc_coherent(wq->dev_hdl, PAGE_SIZE,
&wq->block_paddr, GFP_KERNEL);
if (!wq->block_vaddr) {
sdk_err(wq->dev_hdl, "Fail to alloc wq block vaddr\n");
return -ENOMEM;
}
for (i = 0; i < wq->page_num; i++)
wq->block_vaddr[i] = cpu_to_be64(wq->page[i].align_paddr);
return 0;
}
static void sss_deinit_wq_block(struct sss_wq *wq)
{
if (!SSS_WQ_IS_0_LEVEL_CLA(wq))
dma_free_coherent(wq->dev_hdl, PAGE_SIZE, wq->block_vaddr,
wq->block_paddr);
}
static int sss_alloc_wq_page(struct sss_wq *wq)
{
int i;
int ret;
int id;
wq->page = kcalloc(wq->page_num, sizeof(*wq->page), GFP_KERNEL);
if (!wq->page)
return -ENOMEM;
for (id = 0; id < wq->page_num; id++) {
ret = sss_dma_zalloc_coherent_align(wq->dev_hdl, wq->page_size,
wq->page_size, GFP_KERNEL, &wq->page[id]);
if (ret != 0) {
sdk_err(wq->dev_hdl, "Fail to alloc wq dma page\n");
goto dma_page_err;
}
}
ret = sss_init_wq_block(wq);
if (ret != 0)
goto block_err;
return 0;
block_err:
dma_page_err:
for (i = 0; i < id; i++)
sss_dma_free_coherent_align(wq->dev_hdl, &wq->page[i]);
kfree(wq->page);
wq->page = NULL;
return -ENOMEM;
}
static void sss_free_wq_page(struct sss_wq *wq)
{
int i;
sss_deinit_wq_block(wq);
for (i = 0; i < wq->page_num; i++)
sss_dma_free_coherent_align(wq->dev_hdl, &wq->page[i]);
kfree(wq->page);
wq->page = NULL;
}
static void sss_init_wq_param(struct sss_hwdev *hwdev, struct sss_wq *wq,
u32 q_depth, u16 block_size)
{
u32 page_size = ALIGN(hwdev->wq_page_size, PAGE_SIZE);
wq->ci = 0;
wq->pi = 0;
wq->dev_hdl = hwdev->dev_hdl;
wq->q_depth = q_depth;
wq->id_mask = (u16)(q_depth - 1);
wq->elem_size = block_size;
wq->elem_size_shift = (u16)ilog2(wq->elem_size);
wq->page_size = page_size;
wq->elem_per_page = min(page_size / block_size, q_depth);
wq->elem_per_page_shift = (u16)ilog2(wq->elem_per_page);
wq->elem_per_page_mask = (u16)(wq->elem_per_page - 1);
wq->page_num =
(u16)(ALIGN(((u32)q_depth * block_size), page_size) / page_size);
}
int sss_create_wq(void *hwdev, struct sss_wq *wq, u32 q_depth, u16 block_size)
{
if (!wq || !hwdev) {
pr_err("Invalid wq or dev_hdl\n");
return -EINVAL;
}
if (q_depth < SSS_WQ_MIN_DEPTH || q_depth > SSS_WQ_MAX_DEPTH ||
(q_depth & (q_depth - 1)) != 0) {
sdk_err(SSS_TO_DEV(hwdev), "Invalid q_depth(%u)\n", q_depth);
return -EINVAL;
}
if (block_size == 0 || (block_size & (block_size - 1)) != 0) {
sdk_err(SSS_TO_DEV(hwdev), "Invalid block_size(%u)\n", block_size);
return -EINVAL;
}
sss_init_wq_param(hwdev, wq, q_depth, block_size);
return sss_alloc_wq_page(wq);
}
EXPORT_SYMBOL(sss_create_wq);
void sss_destroy_wq(struct sss_wq *wq)
{
if (!wq)
return;
sss_free_wq_page(wq);
}
EXPORT_SYMBOL(sss_destroy_wq);

View File

@ -0,0 +1,30 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSSNIC_NICTOOL_H
#define SSSNIC_NICTOOL_H
#include "sss_tool_chip.h"
#include "sss_tool_sdk.h"
#include "sss_tool_sm.h"
#include "sss_tool_comm.h"
#ifndef _LLT_TEST_
#define SSS_TOOL_PAGE_ORDER (10)
#else
#define SSS_TOOL_PAGE_ORDER (1)
#endif
#define SSS_TOOL_MEM_MAP_SIZE (PAGE_SIZE * (1 << SSS_TOOL_PAGE_ORDER))
#define SSS_TOOL_CARD_MAX (64)
int sss_tool_init(void *hwdev, void *chip_node);
void sss_tool_uninit(void *hwdev, void *chip_node);
extern u64 g_card_pa[SSS_TOOL_CARD_MAX];
extern void *g_card_va[SSS_TOOL_CARD_MAX];
extern int g_card_id;
#endif

View File

@ -0,0 +1,802 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/completion.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/semaphore.h>
#include <linux/jiffies.h>
#include <linux/delay.h>
#include "sss_kernel.h"
#include "sss_hwdev.h"
#include "sss_common.h"
#include "sss_pci_sriov.h"
#include "sss_adapter_mgmt.h"
#include "sss_hwif_adm.h"
#include "sss_hwif_adm_common.h"
#include "sss_hwif_mgmt_common.h"
#include "sss_hwif_ctrlq.h"
#include "sss_hwif_api.h"
#include "sss_hw_common.h"
#include "sss_mgmt_channel.h"
#include "sss_linux_kernel.h"
#include "sss_csr.h"
#include "sss_hw.h"
#include "sss_adapter.h"
#include "sss_tool.h"
#define SSS_TOOL_DW_WIDTH 4
/* completion timeout interval, unit is millisecond */
#define SSS_TOOL_UPDATE_MSG_TIMEOUT 50000U
#define SSS_TOOL_CLP_REG_GAP 0x20
#define SSS_TOOL_CLP_INPUT_BUF_LEN 4096UL
#define SSS_TOOL_CLP_DATA_UNIT 4UL
#define SSS_TOOL_CLP_MAX_DATA_SIZE (SSS_TOOL_CLP_INPUT_BUF_LEN / SSS_TOOL_CLP_DATA_UNIT)
#define SSS_TOOL_CLP_REQ_SIZE_OFFSET 0
#define SSS_TOOL_CLP_RSP_SIZE_OFFSET 16
#define SSS_TOOL_CLP_BASE_OFFSET 0
#define SSS_TOOL_CLP_LEN_OFFSET 0
#define SSS_TOOL_CLP_START_OFFSET 31
#define SSS_TOOL_CLP_READY_OFFSET 31
#define SSS_TOOL_CLP_OFFSET(member) (SSS_TOOL_CLP_##member##_OFFSET)
#define SSS_TOOL_CLP_SIZE_MASK 0x7ffUL
#define SSS_TOOL_CLP_BASE_MASK 0x7ffffffUL
#define SSS_TOOL_CLP_LEN_MASK 0x7ffUL
#define SSS_TOOL_CLP_START_MASK 0x1UL
#define SSS_TOOL_CLP_READY_MASK 0x1UL
#define SSS_TOOL_CLP_MASK(member) (SSS_TOOL_CLP_##member##_MASK)
#define SSS_TOOL_CLP_DELAY_CNT_MAX 200UL
#define SSS_TOOL_CLP_SRAM_SIZE_REG_MAX 0x3ff
#define SSS_TOOL_CLP_SRAM_BASE_REG_MAX 0x7ffffff
#define SSS_TOOL_CLP_LEN_REG_MAX 0x3ff
#define SSS_TOOL_CLP_START_OR_READY_REG_MAX 0x1
#define SSS_TOOL_CLP_DATA_REAL_SIZE(in_size, header) \
(((in_size) + (u16)sizeof(header) + \
(((in_size) % SSS_TOOL_CLP_DATA_UNIT) ? SSS_TOOL_CLP_DATA_UNIT : 0)) / \
SSS_TOOL_CLP_DATA_UNIT)
#define SSS_TOOL_CLP_REG_VALUE(value, offset, mask) \
(((value) >> SSS_TOOL_CLP_OFFSET(offset)) & SSS_TOOL_CLP_MASK(mask))
enum sss_tool_clp_data_type {
SSS_TOOL_CLP_REQ = 0,
SSS_TOOL_CLP_RSP = 1
};
enum sss_tool_clp_reg_type {
SSS_TOOL_CLP_BASE = 0,
SSS_TOOL_CLP_SIZE = 1,
SSS_TOOL_CLP_LEN = 2,
SSS_TOOL_CLP_START_REQ = 3,
SSS_TOOL_CLP_READY_RSP = 4
};
enum SSS_TOOL_ADM_CSR_DATA_OPERATION {
SSS_TOOL_ADM_CSR_WRITE = 0x1E,
SSS_TOOL_ADM_CSR_READ = 0x1F
};
enum SSS_TOOL_ADM_CSR_NEED_RESP_DATA {
SSS_TOOL_ADM_CSR_NO_RESP_DATA = 0,
SSS_TOOL_ADM_CSR_NEED_RESP_DATA = 1
};
enum SSS_TOOL_ADM_CSR_DATA_SIZE {
SSS_TOOL_ADM_CSR_DATA_SZ_32 = 0,
SSS_TOOL_ADM_CSR_DATA_SZ_64 = 1
};
struct sss_tool_csr_request_adm_data {
u32 dw0;
union {
struct {
u32 reserved1:13;
/* this field indicates the write/read data size:
* 2'b00: 32 bits
* 2'b01: 64 bits
* 2'b10~2'b11:reserved
*/
u32 data_size:2;
/* this field indicates that requestor expect receive a
* response data or not.
* 1'b0: expect not to receive a response data.
* 1'b1: expect to receive a response data.
*/
u32 need_response:1;
/* this field indicates the operation that the requestor
* expected.
* 5'b1_1110: write value to csr space.
* 5'b1_1111: read register from csr space.
*/
u32 operation_id:5;
u32 reserved2:6;
/* this field specifies the Src node ID for this API
* request message.
*/
u32 src_node_id:5;
} bits;
u32 val32;
} dw1;
union {
struct {
/* it specifies the CSR address. */
u32 csr_addr:26;
u32 reserved3:6;
} bits;
u32 val32;
} dw2;
/* if data_size=2'b01, it is high 32 bits of write data. else, it is
* 32'hFFFF_FFFF.
*/
u32 csr_write_data_h;
/* the low 32 bits of write data. */
u32 csr_write_data_l;
};
struct sss_tool_csr_read {
u32 rd_len;
u32 addr;
};
struct sss_tool_csr_write {
u32 rd_len;
u32 addr;
u8 *data;
};
static u32 sss_tool_get_timeout_val(enum sss_mod_type mod, u16 cmd)
{
if (mod == SSS_MOD_TYPE_COMM &&
(cmd == SSS_COMM_MGMT_CMD_UPDATE_FW ||
cmd == SSS_COMM_MGMT_CMD_UPDATE_BIOS ||
cmd == SSS_COMM_MGMT_CMD_ACTIVE_FW ||
cmd == SSS_COMM_MGMT_CMD_SWITCH_CFG ||
cmd == SSS_COMM_MGMT_CMD_HOT_ACTIVE_FW))
return SSS_TOOL_UPDATE_MSG_TIMEOUT;
return 0; /* use default mbox/adm timeout time */
}
static int sss_tool_get_clp_reg(void *hwdev, enum sss_tool_clp_data_type data_type,
enum sss_tool_clp_reg_type type, u32 *addr)
{
switch (type) {
case SSS_TOOL_CLP_BASE:
*addr = (data_type == SSS_TOOL_CLP_REQ) ?
SSS_CLP_REG(REQBASE) : SSS_CLP_REG(RSPBASE);
break;
case SSS_TOOL_CLP_SIZE:
*addr = SSS_CLP_REG(SIZE);
break;
case SSS_TOOL_CLP_LEN:
*addr = (data_type == SSS_TOOL_CLP_REQ) ?
SSS_CLP_REG(REQ) : SSS_CLP_REG(RSP);
break;
case SSS_TOOL_CLP_START_REQ:
*addr = SSS_CLP_REG(REQ);
break;
case SSS_TOOL_CLP_READY_RSP:
*addr = SSS_CLP_REG(RSP);
break;
default:
*addr = 0;
break;
}
return (*addr == 0) ? -EINVAL : 0;
}
static inline int sss_tool_clp_param_valid(enum sss_tool_clp_data_type data_type,
enum sss_tool_clp_reg_type reg_type)
{
if (data_type == SSS_TOOL_CLP_REQ && reg_type == SSS_TOOL_CLP_READY_RSP)
return -EINVAL;
if (data_type == SSS_TOOL_CLP_RSP && reg_type == SSS_TOOL_CLP_START_REQ)
return -EINVAL;
return 0;
}
static u32 sss_tool_get_clp_reg_value(struct sss_hwdev *hwdev,
enum sss_tool_clp_data_type data_type,
enum sss_tool_clp_reg_type reg_type, u32 reg_addr)
{
u32 value;
value = sss_chip_read_reg(hwdev->hwif, reg_addr);
switch (reg_type) {
case SSS_TOOL_CLP_BASE:
value = SSS_TOOL_CLP_REG_VALUE(value, BASE, BASE);
break;
case SSS_TOOL_CLP_SIZE:
if (data_type == SSS_TOOL_CLP_REQ)
value = SSS_TOOL_CLP_REG_VALUE(value, REQ_SIZE, SIZE);
else
value = SSS_TOOL_CLP_REG_VALUE(value, RSP_SIZE, SIZE);
break;
case SSS_TOOL_CLP_LEN:
value = SSS_TOOL_CLP_REG_VALUE(value, LEN, LEN);
break;
case SSS_TOOL_CLP_START_REQ:
value = SSS_TOOL_CLP_REG_VALUE(value, START, START);
break;
case SSS_TOOL_CLP_READY_RSP:
value = SSS_TOOL_CLP_REG_VALUE(value, READY, READY);
break;
default:
break;
}
return value;
}
static int sss_tool_read_clp_reg(struct sss_hwdev *hwdev,
enum sss_tool_clp_data_type data_type,
enum sss_tool_clp_reg_type reg_type, u32 *read_value)
{
u32 reg_addr;
int ret;
ret = sss_tool_clp_param_valid(data_type, reg_type);
if (ret)
return ret;
ret = sss_tool_get_clp_reg(hwdev, data_type, reg_type, &reg_addr);
if (ret)
return ret;
*read_value = sss_tool_get_clp_reg_value(hwdev, data_type, reg_type, reg_addr);
return 0;
}
static int sss_tool_check_reg_value(enum sss_tool_clp_reg_type reg_type, u32 value)
{
if (reg_type == SSS_TOOL_CLP_BASE &&
value > SSS_TOOL_CLP_SRAM_BASE_REG_MAX)
return -EINVAL;
if (reg_type == SSS_TOOL_CLP_SIZE &&
value > SSS_TOOL_CLP_SRAM_SIZE_REG_MAX)
return -EINVAL;
if (reg_type == SSS_TOOL_CLP_LEN &&
value > SSS_TOOL_CLP_LEN_REG_MAX)
return -EINVAL;
if ((reg_type == SSS_TOOL_CLP_START_REQ ||
reg_type == SSS_TOOL_CLP_READY_RSP) &&
value > SSS_TOOL_CLP_START_OR_READY_REG_MAX)
return -EINVAL;
return 0;
}
static int sss_tool_check_clp_init_status(struct sss_hwdev *hwdev)
{
int ret;
u32 reg_value = 0;
ret = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_REQ,
SSS_TOOL_CLP_BASE, &reg_value);
if (ret || !reg_value) {
tool_err("Fail to read clp reg: 0x%x\n", reg_value);
return -EINVAL;
}
ret = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_RSP,
SSS_TOOL_CLP_BASE, &reg_value);
if (ret || !reg_value) {
tool_err("Fail to read rsp ba value: 0x%x\n", reg_value);
return -EINVAL;
}
ret = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_REQ,
SSS_TOOL_CLP_SIZE, &reg_value);
if (ret || !reg_value) {
tool_err("Fail to read req size\n");
return -EINVAL;
}
ret = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_RSP,
SSS_TOOL_CLP_SIZE, &reg_value);
if (ret || !reg_value) {
tool_err("Fail to read rsp size\n");
return -EINVAL;
}
return 0;
}
static void sss_tool_write_clp_reg(struct sss_hwdev *hwdev,
enum sss_tool_clp_data_type data_type,
enum sss_tool_clp_reg_type reg_type, u32 value)
{
u32 reg_addr, reg_value;
if (sss_tool_clp_param_valid(data_type, reg_type))
return;
if (sss_tool_check_reg_value(reg_type, value))
return;
if (sss_tool_get_clp_reg(hwdev, data_type, reg_type, &reg_addr))
return;
reg_value = sss_chip_read_reg(hwdev->hwif, reg_addr);
switch (reg_type) {
case SSS_TOOL_CLP_LEN:
reg_value &= (~(SSS_TOOL_CLP_MASK(LEN) << SSS_TOOL_CLP_OFFSET(LEN)));
reg_value |= (value << SSS_TOOL_CLP_OFFSET(LEN));
break;
case SSS_TOOL_CLP_START_REQ:
reg_value &= (~(SSS_TOOL_CLP_MASK(START) << SSS_TOOL_CLP_OFFSET(START)));
reg_value |= (value << SSS_TOOL_CLP_OFFSET(START));
break;
case SSS_TOOL_CLP_READY_RSP:
reg_value &= (~(SSS_TOOL_CLP_MASK(READY) << SSS_TOOL_CLP_OFFSET(READY)));
reg_value |= (value << SSS_TOOL_CLP_OFFSET(READY));
break;
default:
return;
}
sss_chip_write_reg(hwdev->hwif, reg_addr, reg_value);
}
static int sss_tool_read_clp_data(struct sss_hwdev *hwdev, void *buf_out, u16 *out_size)
{
int err;
u32 reg = SSS_CLP_DATA(RSP);
u32 ready, delay_cnt;
u32 *ptr = (u32 *)buf_out;
u32 temp_out_size = 0;
err = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_RSP,
SSS_TOOL_CLP_READY_RSP, &ready);
if (err)
return err;
delay_cnt = 0;
while (ready == 0) {
usleep_range(9000, 10000); /* sleep 9000 us ~ 10000 us */
delay_cnt++;
err = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_RSP,
SSS_TOOL_CLP_READY_RSP, &ready);
if (err || delay_cnt > SSS_TOOL_CLP_DELAY_CNT_MAX) {
tool_err("Fail to read clp delay rsp, timeout delay_cnt: %u\n",
delay_cnt);
return -EINVAL;
}
}
err = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_RSP,
SSS_TOOL_CLP_LEN, &temp_out_size);
if (err)
return err;
if (temp_out_size > SSS_TOOL_CLP_SRAM_SIZE_REG_MAX || !temp_out_size) {
tool_err("Invalid temp out size: %u\n", temp_out_size);
return -EINVAL;
}
*out_size = (u16)temp_out_size;
for (; temp_out_size > 0; temp_out_size--) {
*ptr = sss_chip_read_reg(hwdev->hwif, reg);
ptr++;
/* read 4 bytes every time */
reg = reg + 4;
}
sss_tool_write_clp_reg(hwdev, SSS_TOOL_CLP_RSP,
SSS_TOOL_CLP_READY_RSP, (u32)0x0);
sss_tool_write_clp_reg(hwdev, SSS_TOOL_CLP_RSP, SSS_TOOL_CLP_LEN, (u32)0x0);
return 0;
}
static int sss_tool_write_clp_data(struct sss_hwdev *hwdev, void *buf_in, u16 in_size)
{
int ret;
u32 reg = SSS_CLP_DATA(REQ);
u32 start = 1;
u32 delay_cnt = 0;
u32 *ptr = (u32 *)buf_in;
u16 size_in = in_size;
ret = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_REQ,
SSS_TOOL_CLP_START_REQ, &start);
if (ret != 0)
return ret;
while (start == 1) {
usleep_range(9000, 10000); /* sleep 9000 us ~ 10000 us */
delay_cnt++;
ret = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_REQ,
SSS_TOOL_CLP_START_REQ, &start);
if (ret || delay_cnt > SSS_TOOL_CLP_DELAY_CNT_MAX)
return -EINVAL;
}
sss_tool_write_clp_reg(hwdev, SSS_TOOL_CLP_REQ, SSS_TOOL_CLP_LEN, size_in);
sss_tool_write_clp_reg(hwdev, SSS_TOOL_CLP_REQ, SSS_TOOL_CLP_START_REQ, (u32)0x1);
for (; size_in > 0; size_in--) {
sss_chip_write_reg(hwdev->hwif, reg, *ptr);
ptr++;
reg = reg + sizeof(u32);
}
return 0;
}
static void sss_tool_clear_clp_data(struct sss_hwdev *hwdev,
enum sss_tool_clp_data_type data_type)
{
u32 reg = (data_type == SSS_TOOL_CLP_REQ) ?
SSS_CLP_DATA(REQ) : SSS_CLP_DATA(RSP);
u32 count = SSS_TOOL_CLP_MAX_DATA_SIZE;
for (; count > 0; count--) {
sss_chip_write_reg(hwdev->hwif, reg, 0x0);
reg = reg + sizeof(u32);
}
}
static void sss_tool_clp_prepare_header(struct sss_hwdev *hwdev, u64 *header,
u16 msg_len, u8 mod, enum sss_mgmt_cmd cmd)
{
struct sss_hwif *hwif = hwdev->hwif;
*header = SSS_SET_MSG_HEADER(msg_len, MSG_LEN) |
SSS_SET_MSG_HEADER(mod, MODULE) |
SSS_SET_MSG_HEADER(msg_len, SEG_LEN) |
SSS_SET_MSG_HEADER(0, NO_ACK) |
SSS_SET_MSG_HEADER(SSS_INLINE_DATA, DATA_TYPE) |
SSS_SET_MSG_HEADER(0, SEQID) |
SSS_SET_MSG_HEADER(SSS_ADM_MSG_AEQ_ID, AEQ_ID) |
SSS_SET_MSG_HEADER(SSS_LAST_SEG, LAST) |
SSS_SET_MSG_HEADER(0, DIRECTION) |
SSS_SET_MSG_HEADER(cmd, CMD) |
SSS_SET_MSG_HEADER(hwif->attr.func_id, SRC_GLB_FUNC_ID) |
SSS_SET_MSG_HEADER(0, MSG_ID);
}
int sss_tool_send_clp_msg(struct sss_hwdev *hwdev, u8 mod, u16 cmd, const void *buf_in,
u16 in_size, void *buf_out, u16 *out_size)
{
struct sss_clp_pf_to_mgmt *clp_msg;
u64 header;
u16 size;
u8 *msg_buf;
int ret;
if (!hwdev || SSS_GET_FUNC_TYPE(hwdev) == SSS_FUNC_TYPE_VF)
return -EINVAL;
if (!hwdev->chip_present_flag || !SSS_SUPPORT_CLP(hwdev))
return -EPERM;
clp_msg = hwdev->clp_pf_to_mgmt;
if (!clp_msg)
return -EPERM;
msg_buf = clp_msg->clp_msg_buf;
/* 4 bytes alignment */
size = SSS_TOOL_CLP_DATA_REAL_SIZE(in_size, header);
if (size > SSS_TOOL_CLP_MAX_DATA_SIZE) {
tool_err("Invalid data size: %u\n", size);
return -EINVAL;
}
down(&clp_msg->clp_msg_lock);
ret = sss_tool_check_clp_init_status(hwdev);
if (ret) {
tool_err("Fail to check clp init status\n");
up(&clp_msg->clp_msg_lock);
return ret;
}
sss_tool_clear_clp_data(hwdev, SSS_TOOL_CLP_RSP);
sss_tool_write_clp_reg(hwdev, SSS_TOOL_CLP_RSP,
SSS_TOOL_CLP_READY_RSP, 0x0);
/* Send request */
memset(msg_buf, 0x0, SSS_TOOL_CLP_INPUT_BUF_LEN);
sss_tool_clp_prepare_header(hwdev, &header, in_size, mod, cmd);
memcpy(msg_buf, &header, sizeof(header));
msg_buf += sizeof(header);
memcpy(msg_buf, buf_in, in_size);
msg_buf = clp_msg->clp_msg_buf;
sss_tool_clear_clp_data(hwdev, SSS_TOOL_CLP_REQ);
ret = sss_tool_write_clp_data(hwdev, clp_msg->clp_msg_buf, size);
if (ret) {
tool_err("Fail to send clp request\n");
up(&clp_msg->clp_msg_lock);
return -EINVAL;
}
/* Get response */
msg_buf = clp_msg->clp_msg_buf;
memset(msg_buf, 0x0, SSS_TOOL_CLP_INPUT_BUF_LEN);
ret = sss_tool_read_clp_data(hwdev, msg_buf, &size);
sss_tool_clear_clp_data(hwdev, SSS_TOOL_CLP_RSP);
if (ret) {
tool_err("Fail to read clp response\n");
up(&clp_msg->clp_msg_lock);
return -EINVAL;
}
size = (u16)((size * SSS_TOOL_CLP_DATA_UNIT) & 0xffff);
if (size <= sizeof(header) || size > SSS_TOOL_CLP_INPUT_BUF_LEN) {
tool_err("Invalid response size: %u", size);
up(&clp_msg->clp_msg_lock);
return -EINVAL;
}
if (size != *out_size + sizeof(header)) {
tool_err("Invalid size:%u, out_size: %u\n", size, *out_size);
up(&clp_msg->clp_msg_lock);
return -EINVAL;
}
memcpy(buf_out, (msg_buf + sizeof(header)), size);
up(&clp_msg->clp_msg_lock);
return 0;
}
int sss_tool_adm_csr_rd32(struct sss_hwdev *hwdev, u8 dest, u32 addr, u32 *val)
{
int ret;
u32 csr_val = 0;
struct sss_tool_csr_request_adm_data adm_data = {0};
if (!hwdev || !val)
return -EFAULT;
if (!SSS_SUPPORT_ADM_MSG(hwdev))
return -EPERM;
adm_data.dw0 = 0;
adm_data.dw1.bits.operation_id = SSS_TOOL_ADM_CSR_READ;
adm_data.dw1.bits.need_response = SSS_TOOL_ADM_CSR_NEED_RESP_DATA;
adm_data.dw1.bits.data_size = SSS_TOOL_ADM_CSR_DATA_SZ_32;
adm_data.dw1.val32 = cpu_to_be32(adm_data.dw1.val32);
adm_data.dw2.bits.csr_addr = addr;
adm_data.dw2.val32 = cpu_to_be32(adm_data.dw2.val32);
ret = sss_adm_msg_read_ack(hwdev, dest, (u8 *)(&adm_data),
sizeof(adm_data), &csr_val, 0x4);
if (ret) {
tool_err("Fail to read 32 bit csr, dest %u addr 0x%x, ret: 0x%x\n",
dest, addr, ret);
return ret;
}
*val = csr_val;
return 0;
}
int sss_tool_adm_csr_wr32(struct sss_hwdev *hwdev, u8 dest, u32 addr, u32 val)
{
int ret;
struct sss_tool_csr_request_adm_data adm_data = {0};
if (!hwdev)
return -EFAULT;
if (!SSS_SUPPORT_ADM_MSG(hwdev))
return -EPERM;
adm_data.dw1.bits.operation_id = SSS_TOOL_ADM_CSR_WRITE;
adm_data.dw1.bits.need_response = SSS_TOOL_ADM_CSR_NO_RESP_DATA;
adm_data.dw1.bits.data_size = SSS_TOOL_ADM_CSR_DATA_SZ_32;
adm_data.dw1.val32 = cpu_to_be32(adm_data.dw1.val32);
adm_data.dw2.bits.csr_addr = addr;
adm_data.dw2.val32 = cpu_to_be32(adm_data.dw2.val32);
adm_data.csr_write_data_h = 0xffffffff;
adm_data.csr_write_data_l = val;
ret = sss_adm_msg_write_nack(hwdev, dest, (u8 *)(&adm_data), sizeof(adm_data));
if (ret) {
tool_err("Fail to write 32 bit csr! dest %u addr 0x%x val 0x%x\n",
dest, addr, val);
return ret;
}
return 0;
}
static int sss_tool_adm_csr_read(void *hwdev, struct sss_tool_msg *tool_msg,
void *buf_in, u32 in_size, void *buf_out, u32 *out_size)
{
int ret = 0;
u32 cnt = 0;
u32 offset = 0;
u32 i;
struct sss_tool_csr_read *rd_msg = (struct sss_tool_csr_read *)buf_in;
u8 node_id = (u8)tool_msg->mpu_cmd.mod;
u32 rd_len = rd_msg->rd_len;
u32 rd_addr = rd_msg->addr;
if (!buf_in || !buf_out || in_size != sizeof(*rd_msg) ||
*out_size != rd_len || rd_len % SSS_TOOL_DW_WIDTH != 0)
return -EINVAL;
cnt = rd_len / SSS_TOOL_DW_WIDTH;
for (i = 0; i < cnt; i++) {
ret = sss_tool_adm_csr_rd32(hwdev, node_id, rd_addr + offset,
(u32 *)(((u8 *)buf_out) + offset));
if (ret) {
tool_err("Fail to read csr, err: %d, node_id: %u, csr addr: 0x%08x\n",
ret, node_id, rd_addr + offset);
return ret;
}
offset += SSS_TOOL_DW_WIDTH;
}
*out_size = rd_len;
return ret;
}
static int sss_tool_adm_csr_write(void *hwdev, struct sss_tool_msg *tool_msg,
void *buf_in, u32 in_size, void *buf_out, u32 *out_size)
{
int ret = 0;
u32 cnt = 0;
u32 offset = 0;
u32 i;
struct sss_tool_csr_write *wr_msg = (struct sss_tool_csr_write *)buf_in;
u8 node_id = (u8)tool_msg->mpu_cmd.mod;
u32 rd_len = wr_msg->rd_len;
u32 rd_addr = wr_msg->addr;
u8 *data = NULL;
if (!buf_in || in_size != sizeof(*wr_msg) ||
wr_msg->rd_len % SSS_TOOL_DW_WIDTH != 0)
return -EINVAL;
data = kzalloc(rd_len, GFP_KERNEL);
if (!data)
return -EFAULT;
if (copy_from_user(data, (void *)wr_msg->data, rd_len)) {
tool_err("Fail to copy information from user\n");
kfree(data);
return -EFAULT;
}
cnt = rd_len / SSS_TOOL_DW_WIDTH;
for (i = 0; i < cnt; i++) {
ret = sss_tool_adm_csr_wr32(hwdev, node_id, rd_addr + offset,
*((u32 *)(data + offset)));
if (ret) {
tool_err("Fail to write csr, ret: %d, node_id: %u, csr addr: 0x%08x\n",
ret, rd_addr + offset, node_id);
kfree(data);
return ret;
}
offset += SSS_TOOL_DW_WIDTH;
}
*out_size = 0;
kfree(data);
return ret;
}
int sss_tool_msg_to_mpu(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg,
void *buf_in, u32 in_size, void *buf_out, u32 *out_size)
{
int ret = 0;
u16 cmd = tool_msg->mpu_cmd.cmd;
enum sss_mod_type mod = (enum sss_mod_type)tool_msg->mpu_cmd.mod;
u32 timeout = sss_tool_get_timeout_val(mod, cmd);
void *hwdev = hal_dev->hwdev;
if (tool_msg->mpu_cmd.channel == SSS_TOOL_CHANNEL_MBOX ||
tool_msg->mpu_cmd.channel == SSS_TOOL_CHANNEL_CLP) {
if (tool_msg->mpu_cmd.channel == SSS_TOOL_CHANNEL_MBOX) {
ret = sss_sync_mbx_send_msg(hwdev, mod, cmd, buf_in, (u16)in_size,
buf_out, (u16 *)out_size, timeout,
SSS_CHANNEL_DEFAULT);
} else {
ret = sss_tool_send_clp_msg(hwdev, mod, cmd, buf_in, (u16)in_size,
buf_out, (u16 *)out_size);
}
if (ret) {
tool_err("Fail to send msg to mgmt cpu, mod: %d, cmd: %u\n", mod, cmd);
return ret;
}
} else if (tool_msg->mpu_cmd.channel == SSS_TOOL_CHANNEL_ADM_MSG_BYPASS) {
if (tool_msg->mpu_cmd.cmd == SSS_TOOL_ADM_MSG_WRITE)
return sss_tool_adm_csr_write(hwdev, tool_msg, buf_in, in_size,
buf_out, out_size);
ret = sss_tool_adm_csr_read(hwdev, tool_msg, buf_in, in_size, buf_out, out_size);
} else if (tool_msg->mpu_cmd.channel == SSS_TOOL_CHANNEL_ADM_MSG_TO_MPU) {
if (SSS_GET_HWIF_PCI_INTF_ID(SSS_TO_HWIF(hwdev)) != SSS_SPU_HOST_ID)
ret = sss_sync_send_adm_msg(hwdev, mod, cmd, buf_in, (u16)in_size,
buf_out, (u16 *)out_size, timeout);
else
ret = sss_sync_mbx_send_msg(hwdev, mod, cmd, buf_in, (u16)in_size,
buf_out, (u16 *)out_size, timeout,
SSS_CHANNEL_DEFAULT);
if (ret) {
tool_err("Fail to send adm msg to mgmt cpu, mod: %d, cmd: %u\n",
mod, cmd);
return ret;
}
} else {
tool_err("Invalid channel %d\n", tool_msg->mpu_cmd.channel);
return -EINVAL;
}
return ret;
}
int sss_tool_msg_to_npu(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg,
void *buf_in, u32 in_size, void *buf_out, u32 *out_size)
{
int ret = 0;
u8 cmd = tool_msg->npu_cmd.cmd;
enum sss_mod_type mod = (enum sss_mod_type)tool_msg->npu_cmd.mod;
if (tool_msg->npu_cmd.direct_resp) {
ret = sss_ctrlq_direct_reply(hal_dev->hwdev, mod, cmd, buf_in,
buf_out, 0, SSS_CHANNEL_DEFAULT);
if (ret)
tool_err("Fail to send direct ctrlq, ret: %d\n", ret);
} else {
ret = sss_ctrlq_sync_cmd_detail_reply(hal_dev->hwdev, mod, cmd, buf_in, buf_out,
NULL, 0, SSS_CHANNEL_DEFAULT);
if (ret)
tool_err("Fail to send detail ctrlq, ret: %d\n", ret);
}
return ret;
}

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_TOOL_CHIP_H
#define SSS_TOOL_CHIP_H
#include "sss_hw.h"
#include "sss_tool_comm.h"
#include "sss_tool_hw.h"
int sss_tool_msg_to_mpu(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg,
void *buf_in, u32 in_size, void *buf_out, u32 *out_size);
int sss_tool_msg_to_npu(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg,
void *buf_in, u32 in_size, void *buf_out, u32 *out_size);
#endif

View File

@ -0,0 +1,212 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_TOOL_HW_H
#define SSS_TOOL_HW_H
#define SSS_TOOL_CMD_TYPE (0x18)
#define SSS_TOOL_PF_DEV_MAX 32
/* Indicates the maximum number of interrupts that can be recorded.
* Subsequent interrupts are not recorded in FFM.
*/
#define SSS_TOOL_FFM_RECORD_MAX 64
#define SSS_TOOL_PF_INFO_MAX (16)
#define SSS_TOOL_BUSINFO_LEN (32)
#define SSS_TOOL_CHIP_FAULT_SIZE (110 * 1024)
#define SSS_TOOL_DRV_BUF_SIZE_MAX 4096
/* dbgtool command type */
/* You can add commands as required. The dbgtool command can be
* used to invoke all interfaces of the kernel-mode x86 driver.
*/
enum sss_tool_dbg_cmd {
SSS_TOOL_DBG_CMD_API_RD = 0,
SSS_TOOL_DBG_CMD_API_WR,
SSS_TOOL_DBG_CMD_FFM_RD,
SSS_TOOL_DBG_CMD_FFM_CLR,
SSS_TOOL_DBG_CMD_PF_DEV_INFO_GET,
SSS_TOOL_DBG_CMD_MSG_2_UP,
SSS_TOOL_DBG_CMD_FREE_MEM,
SSS_TOOL_DBG_CMD_NUM
};
enum module_name {
SSS_TOOL_MSG_TO_NPU = 1,
SSS_TOOL_MSG_TO_MPU,
SSS_TOOL_MSG_TO_SM,
SSS_TOOL_MSG_TO_HW_DRIVER,
#define SSS_TOOL_MSG_TO_SRV_DRV_BASE (SSS_TOOL_MSG_TO_HW_DRIVER + 1)
SSS_TOOL_MSG_TO_NIC_DRIVER = SSS_TOOL_MSG_TO_SRV_DRV_BASE,
SSS_TOOL_MSG_TO_OVS_DRIVER,
SSS_TOOL_MSG_TO_ROCE_DRIVER,
SSS_TOOL_MSG_TO_TOE_DRIVER,
SSS_TOOL_MSG_TO_IOE_DRIVER,
SSS_TOOL_MSG_TO_FC_DRIVER,
SSS_TOOL_MSG_TO_VBS_DRIVER,
SSS_TOOL_MSG_TO_IPSEC_DRIVER,
SSS_TOOL_MSG_TO_VIRTIO_DRIVER,
SSS_TOOL_MSG_TO_MIGRATE_DRIVER,
SSS_TOOL_MSG_TO_PPA_DRIVER,
SSS_TOOL_MSG_TO_CUSTOM_DRIVER = SSS_TOOL_MSG_TO_SRV_DRV_BASE + 11,
SSS_TOOL_MSG_TO_DRIVER_MAX = SSS_TOOL_MSG_TO_SRV_DRV_BASE + 15, /* reserved */
};
enum sss_tool_adm_msg_type {
SSS_TOOL_ADM_MSG_READ,
SSS_TOOL_ADM_MSG_WRITE
};
enum sss_tool_sm_cmd_type {
SSS_TOOL_SM_CMD_RD16 = 1,
SSS_TOOL_SM_CMD_RD32,
SSS_TOOL_SM_CMD_RD64_PAIR,
SSS_TOOL_SM_CMD_RD64,
SSS_TOOL_SM_CMD_RD32_CLEAR,
SSS_TOOL_SM_CMD_RD64_PAIR_CLEAR,
SSS_TOOL_SM_CMD_RD64_CLEAR
};
enum sss_tool_channel_type {
SSS_TOOL_CHANNEL_MBOX = 1,
SSS_TOOL_CHANNEL_ADM_MSG_BYPASS,
SSS_TOOL_CHANNEL_ADM_MSG_TO_MPU,
SSS_TOOL_CHANNEL_CLP,
};
struct sss_tool_api_cmd_rd {
u32 pf_id;
u8 dest;
u8 *cmd;
u16 size;
void *ack;
u16 ack_size;
};
struct sss_tool_api_cmd_wr {
u32 pf_id;
u8 dest;
u8 *cmd;
u16 size;
};
struct sss_tool_pf_dev_info {
u64 bar0_size;
u8 bus;
u8 slot;
u8 func;
u64 phy_addr;
};
struct sss_tool_ffm_intr_info {
u8 node_id;
/* error level of the interrupt source */
u8 err_level;
/* Classification by interrupt source properties */
u16 err_type;
u32 err_csr_addr;
u32 err_csr_value;
};
struct sss_tool_ffm_intr_tm_info {
struct sss_tool_ffm_intr_info intr_info;
u8 times;
u8 sec;
u8 min;
u8 hour;
u8 mday;
u8 mon;
u16 year;
};
struct sss_tool_ffm_record_info {
u32 ffm_num;
u32 last_err_csr_addr;
u32 last_err_csr_value;
struct sss_tool_ffm_intr_tm_info ffm[SSS_TOOL_FFM_RECORD_MAX];
};
struct sss_tool_knl_dbg_info {
struct semaphore dbgtool_sem;
struct sss_tool_ffm_record_info *ffm;
};
struct sss_tool_msg_to_up {
u8 pf_id;
u8 mod;
u8 cmd;
void *buf_in;
u16 in_size;
void *buf_out;
u16 *out_size;
};
struct sss_tool_dbg_param {
union {
struct sss_tool_api_cmd_rd api_rd;
struct sss_tool_api_cmd_wr api_wr;
struct sss_tool_pf_dev_info *dev_info;
struct sss_tool_ffm_record_info *ffm_rd;
struct sss_tool_msg_to_up msg2up;
} param;
char chip_name[16];
};
struct sss_tool_pf {
char name[IFNAMSIZ];
char bus_info[SSS_TOOL_BUSINFO_LEN];
u32 pf_type;
};
struct sss_tool_card_info {
struct sss_tool_pf pf[SSS_TOOL_PF_INFO_MAX];
u32 pf_num;
};
struct sss_tool_pf_info {
u32 valid;
u32 pf_id;
};
struct sss_tool_cmd_chip_fault_stats {
u32 offset;
u8 chip_fault_stats[SSS_TOOL_DRV_BUF_SIZE_MAX];
};
struct sss_tool_npu_msg {
u32 mod : 8;
u32 cmd : 8;
u32 ack_type : 3;
u32 direct_resp : 1;
u32 len : 12;
};
struct sss_tool_mpu_msg {
u32 channel : 8;
u32 mod : 8;
u32 cmd : 16;
};
struct sss_tool_msg {
char device_name[IFNAMSIZ];
u32 module;
union {
u32 msg_formate; /* for driver */
struct sss_tool_npu_msg npu_cmd;
struct sss_tool_mpu_msg mpu_cmd;
};
u32 timeout; /* for mpu/npu cmd */
u32 func_id;
u32 buf_in_size;
u32 buf_out_size;
void *in_buf;
void *out_buf;
int bus_num;
u8 port_id;
u8 rsvd1[3];
u32 rsvd2[4];
};
#endif /* SSS_TOOL_HW_H */

View File

@ -0,0 +1,736 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt
#include <net/sock.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/time.h>
#include "sss_adapter_mgmt.h"
#include "sss_linux_kernel.h"
#include "sss_hw.h"
#include "sss_tool_comm.h"
#include "sss_tool_hw.h"
#include "sss_tool.h"
#define SSS_TOOL_DEV_PATH "/dev/sssnic_nictool_dev"
#define SSS_TOOL_DEV_CLASS "sssnic_nictool_class"
#define SSS_TOOL_DEV_NAME "sssnic_nictool_dev"
#define SSS_TOOL_CTRLQ_BUF_SIZE_MAX 2048U
#define SSS_TOOL_MSG_IN_SIZE_MAX (2048 * 1024)
#define SSS_TOOL_MSG_OUT_SIZE_MAX (2048 * 1024)
#define SSS_TOOL_BUF_SIZE_MAX (2048 * 1024)
typedef int (*sss_tool_deal_handler_fun)(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg,
void *in_buf, u32 in_len, void *out_buf, u32 *out_len);
struct sss_tool_deal_handler {
enum module_name msg_name;
sss_tool_deal_handler_fun func;
};
static int g_nictool_ref_cnt;
static dev_t g_dev_id = {0};
static struct class *g_nictool_class;
static struct cdev g_nictool_cdev;
static void *g_card_node_array[SSS_TOOL_CARD_MAX] = {0};
void *g_card_va[SSS_TOOL_CARD_MAX] = {0};
u64 g_card_pa[SSS_TOOL_CARD_MAX] = {0};
int g_card_id;
static int sss_tool_msg_to_nic(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg,
void *in_buf, u32 in_len, void *out_buf, u32 *out_len)
{
int ret = -EINVAL;
void *uld_dev = NULL;
enum sss_service_type service_type;
struct sss_uld_info *uld_info = sss_get_uld_info();
service_type = tool_msg->module - SSS_TOOL_MSG_TO_SRV_DRV_BASE;
if (service_type >= SSS_SERVICE_TYPE_MAX) {
tool_err("Invalid input module id: %u\n", tool_msg->module);
return -EINVAL;
}
uld_dev = sss_get_uld_dev(hal_dev, service_type);
if (!uld_dev) {
if (tool_msg->msg_formate == SSS_TOOL_GET_DRV_VERSION)
return 0;
tool_err("Fail to get uld device\n");
return -EINVAL;
}
if (uld_info[service_type].ioctl)
ret = uld_info[service_type].ioctl(uld_dev, tool_msg->msg_formate,
in_buf, in_len, out_buf, out_len);
sss_uld_dev_put(hal_dev, service_type);
return ret;
}
void sss_tool_free_in_buf(void *hwdev, const struct sss_tool_msg *tool_msg, void *in_buf)
{
if (!in_buf)
return;
if (tool_msg->module == SSS_TOOL_MSG_TO_NPU)
sss_free_ctrlq_msg_buf(hwdev, in_buf);
else
kfree(in_buf);
}
void sss_tool_free_out_buf(void *hwdev, struct sss_tool_msg *tool_msg,
void *out_buf)
{
if (!out_buf)
return;
if (tool_msg->module == SSS_TOOL_MSG_TO_NPU &&
!tool_msg->npu_cmd.direct_resp)
sss_free_ctrlq_msg_buf(hwdev, out_buf);
else
kfree(out_buf);
}
int sss_tool_alloc_in_buf(void *hwdev, struct sss_tool_msg *tool_msg,
u32 in_len, void **in_buf)
{
void *msg_buf = NULL;
if (!in_len)
return 0;
if (tool_msg->module == SSS_TOOL_MSG_TO_NPU) {
struct sss_ctrl_msg_buf *cmd_buf = NULL;
if (in_len > SSS_TOOL_CTRLQ_BUF_SIZE_MAX) {
tool_err("Invalid ctrlq in len(%u) more than %u\n",
in_len, SSS_TOOL_CTRLQ_BUF_SIZE_MAX);
return -ENOMEM;
}
cmd_buf = sss_alloc_ctrlq_msg_buf(hwdev);
if (!cmd_buf) {
tool_err("Fail to alloc ctrlq msg buf\n");
return -ENOMEM;
}
*in_buf = (void *)cmd_buf;
cmd_buf->size = (u16)in_len;
} else {
if (in_len > SSS_TOOL_MSG_IN_SIZE_MAX) {
tool_err("Invalid in len(%u) more than %u\n",
in_len, SSS_TOOL_MSG_IN_SIZE_MAX);
return -ENOMEM;
}
msg_buf = kzalloc(in_len, GFP_KERNEL);
*in_buf = msg_buf;
}
if (!(*in_buf)) {
tool_err("Fail to alloc in buf\n");
return -ENOMEM;
}
return 0;
}
int sss_tool_alloc_out_buf(void *hwdev, struct sss_tool_msg *tool_msg,
u32 out_len, void **out_buf)
{
if (!out_len) {
tool_info("out len is 0, need not alloc buf\n");
return 0;
}
if (tool_msg->module == SSS_TOOL_MSG_TO_NPU &&
!tool_msg->npu_cmd.direct_resp) {
struct sss_ctrl_msg_buf *msg_buf = NULL;
if (out_len > SSS_TOOL_CTRLQ_BUF_SIZE_MAX) {
tool_err("Invalid ctrlq out len(%u) more than %u\n",
out_len, SSS_TOOL_CTRLQ_BUF_SIZE_MAX);
return -ENOMEM;
}
msg_buf = sss_alloc_ctrlq_msg_buf(hwdev);
*out_buf = (void *)msg_buf;
} else {
if (out_len > SSS_TOOL_MSG_OUT_SIZE_MAX) {
tool_err("Invalid out len(%u) more than %u\n",
out_len, SSS_TOOL_MSG_OUT_SIZE_MAX);
return -ENOMEM;
}
*out_buf = kzalloc(out_len, GFP_KERNEL);
}
if (!(*out_buf)) {
tool_err("Fail to alloc out buf\n");
return -ENOMEM;
}
return 0;
}
int sss_tool_copy_to_user(struct sss_tool_msg *tool_msg,
u32 out_len, void *out_buf)
{
void *out_msg = NULL;
if (tool_msg->module == SSS_TOOL_MSG_TO_NPU && !tool_msg->npu_cmd.direct_resp) {
out_msg = ((struct sss_ctrl_msg_buf *)out_buf)->buf;
if (copy_to_user(tool_msg->out_buf, out_msg, out_len))
return -EFAULT;
return 0;
}
if (copy_to_user(tool_msg->out_buf, out_buf, out_len))
return -EFAULT;
return 0;
}
static int sss_tool_alloc_buf(void *hwdev, struct sss_tool_msg *tool_msg, u32 in_len,
void **in_buf, u32 out_len, void **out_buf)
{
int ret;
ret = sss_tool_alloc_in_buf(hwdev, tool_msg, in_len, in_buf);
if (ret) {
tool_err("Fail to alloc tool msg in buf\n");
return ret;
}
if (copy_from_user(*in_buf, tool_msg->in_buf, in_len)) {
tool_err("Fail to copy tool_msg to in buf\n");
sss_tool_free_in_buf(hwdev, tool_msg, *in_buf);
return -EFAULT;
}
ret = sss_tool_alloc_out_buf(hwdev, tool_msg, out_len, out_buf);
if (ret) {
tool_err("Fail to alloc tool msg out buf\n");
goto alloc_out_buf_err;
}
return 0;
alloc_out_buf_err:
sss_tool_free_in_buf(hwdev, tool_msg, *in_buf);
return ret;
}
static void sss_tool_free_buf(void *hwdev, struct sss_tool_msg *tool_msg,
void *in_buf, void *out_buf)
{
sss_tool_free_out_buf(hwdev, tool_msg, out_buf);
sss_tool_free_in_buf(hwdev, tool_msg, in_buf);
}
const struct sss_tool_deal_handler g_deal_msg_handle[] = {
{SSS_TOOL_MSG_TO_NPU, sss_tool_msg_to_npu},
{SSS_TOOL_MSG_TO_MPU, sss_tool_msg_to_mpu},
{SSS_TOOL_MSG_TO_SM, sss_tool_msg_to_sm},
{SSS_TOOL_MSG_TO_HW_DRIVER, sss_tool_msg_to_hw},
{SSS_TOOL_MSG_TO_NIC_DRIVER, sss_tool_msg_to_nic}
};
static int sss_tool_deal_cmd(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg,
void *in_buf, u32 in_len, void *out_buf, u32 *out_len)
{
int ret = 0;
int index;
int msg_num = ARRAY_LEN(g_deal_msg_handle);
for (index = 0; index < msg_num; index++) {
if (tool_msg->module != g_deal_msg_handle[index].msg_name)
continue;
ret = g_deal_msg_handle[index].func(hal_dev, tool_msg,
in_buf, in_len, out_buf, out_len);
break;
}
if (index == msg_num)
ret = sss_tool_msg_to_nic(hal_dev, tool_msg,
in_buf, in_len, out_buf, out_len);
return ret;
}
static struct sss_hal_dev *sss_tool_get_hal_dev_by_msg(struct sss_tool_msg *tool_msg)
{
struct sss_hal_dev *hal_dev = NULL;
if (tool_msg->module >= SSS_TOOL_MSG_TO_SRV_DRV_BASE &&
tool_msg->module < SSS_TOOL_MSG_TO_DRIVER_MAX &&
tool_msg->msg_formate != SSS_TOOL_GET_DRV_VERSION) {
hal_dev = sss_get_lld_dev_by_dev_name(tool_msg->device_name,
tool_msg->module -
SSS_TOOL_MSG_TO_SRV_DRV_BASE);
} else {
hal_dev = sss_get_lld_dev_by_chip_name(tool_msg->device_name);
if (!hal_dev)
hal_dev = sss_get_lld_dev_by_dev_name(tool_msg->device_name,
SSS_SERVICE_TYPE_MAX);
}
if (tool_msg->module == SSS_TOOL_MSG_TO_NIC_DRIVER &&
(tool_msg->msg_formate == SSS_TOOL_GET_XSFP_INFO ||
tool_msg->msg_formate == SSS_TOOL_GET_XSFP_PRESENT))
hal_dev = sss_get_lld_dev_by_chip_and_port(tool_msg->device_name,
tool_msg->port_id);
return hal_dev;
}
static int sss_tool_check_msg_valid(struct sss_tool_msg *tool_msg)
{
if (tool_msg->buf_out_size > SSS_TOOL_BUF_SIZE_MAX ||
tool_msg->buf_in_size > SSS_TOOL_BUF_SIZE_MAX) {
tool_err("Invalid in buf len: %u or out buf len: %u\n",
tool_msg->buf_in_size, tool_msg->buf_out_size);
return -EFAULT;
}
return 0;
}
static long sss_tool_msg_ioctl(unsigned long arg)
{
int ret = 0;
u32 in_len = 0;
u32 expect_out_len = 0;
u32 out_len = 0;
void *in_buf = NULL;
void *out_buf = NULL;
struct sss_hal_dev *hal_dev = NULL;
struct sss_tool_msg tool_msg = {0};
if (copy_from_user(&tool_msg, (void *)arg, sizeof(tool_msg))) {
tool_err("Fail to copy msg from user space\n");
return -EFAULT;
}
if (sss_tool_check_msg_valid(&tool_msg)) {
tool_err("Fail to check msg valid\n");
return -EFAULT;
}
tool_msg.device_name[IFNAMSIZ - 1] = '\0';
expect_out_len = tool_msg.buf_out_size;
in_len = tool_msg.buf_in_size;
hal_dev = sss_tool_get_hal_dev_by_msg(&tool_msg);
if (!hal_dev) {
if (tool_msg.msg_formate != SSS_TOOL_DEV_NAME_TEST)
tool_err("Fail to find device %s for module %d\n",
tool_msg.device_name, tool_msg.module);
return -ENODEV;
}
if (tool_msg.msg_formate == SSS_TOOL_DEV_NAME_TEST)
return 0;
ret = sss_tool_alloc_buf(hal_dev->hwdev, &tool_msg,
in_len, &in_buf, expect_out_len, &out_buf);
if (ret) {
tool_err("Fail to alloc cmd buf\n");
goto out_free_lock;
}
out_len = expect_out_len;
ret = sss_tool_deal_cmd(hal_dev, &tool_msg, in_buf, in_len, out_buf, &out_len);
if (ret) {
tool_err("Fail to execute cmd, module: %u, ret: %d.\n", tool_msg.module, ret);
goto out_free_buf;
}
if (out_len > expect_out_len) {
ret = -EFAULT;
tool_err("Fail to execute cmd, expected out len from user: %u, out len: %u\n",
expect_out_len, out_len);
goto out_free_buf;
}
ret = sss_tool_copy_to_user(&tool_msg, out_len, out_buf);
if (ret)
tool_err("Fail to copy return information to user space\n");
out_free_buf:
sss_tool_free_buf(hal_dev->hwdev, &tool_msg, in_buf, out_buf);
out_free_lock:
lld_dev_put(hal_dev);
return (long)ret;
}
static long sss_tool_knl_ffm_info_rd(struct sss_tool_dbg_param *dbg_param,
struct sss_tool_knl_dbg_info *dbg_info)
{
if (copy_to_user(dbg_param->param.ffm_rd, dbg_info->ffm,
(unsigned int)sizeof(*dbg_param->param.ffm_rd))) {
tool_err("Fail to copy ffm_info to user space\n");
return -EFAULT;
}
return 0;
}
static struct sss_card_node *sss_tool_find_card_node(char *chip_name)
{
int i;
struct sss_card_node *card_node = NULL;
for (i = 0; i < SSS_TOOL_CARD_MAX; i++) {
card_node = (struct sss_card_node *)g_card_node_array[i];
if (!card_node)
continue;
if (!strncmp(chip_name, card_node->chip_name, IFNAMSIZ))
break;
}
if (i == SSS_TOOL_CARD_MAX || !card_node)
return NULL;
g_card_id = i;
return card_node;
}
static long sss_tool_dbg_ioctl(unsigned int cmd_type, unsigned long arg)
{
struct sss_tool_knl_dbg_info *dbg_info = NULL;
struct sss_card_node *card_node = NULL;
struct sss_tool_dbg_param param = {0};
long ret;
if (copy_from_user(&param, (void *)arg, sizeof(param))) {
tool_err("Fail to copy msg param from user\n");
return -EFAULT;
}
sss_hold_chip_node();
card_node = sss_tool_find_card_node(param.chip_name);
if (!card_node) {
sss_put_chip_node();
tool_err("Fail to find card node %s\n", param.chip_name);
return -EFAULT;
}
dbg_info = (struct sss_tool_knl_dbg_info *)card_node->dbgtool_info;
down(&dbg_info->dbgtool_sem);
if (cmd_type == SSS_TOOL_DBG_CMD_FFM_RD) {
ret = sss_tool_knl_ffm_info_rd(&param, dbg_info);
} else if (cmd_type == SSS_TOOL_DBG_CMD_MSG_2_UP) {
tool_info("cmd(0x%x) not suppose.\n", cmd_type);
ret = 0;
} else {
tool_err("Fail to execute cmd(0x%x) ,it is not support\n", cmd_type);
ret = -EFAULT;
}
up(&dbg_info->dbgtool_sem);
sss_put_chip_node();
return ret;
}
static int sss_tool_release(struct inode *pnode, struct file *pfile)
{
return 0;
}
static int sss_tool_open(struct inode *pnode, struct file *pfile)
{
return 0;
}
static ssize_t sss_tool_read(struct file *pfile, char __user *ubuf,
size_t size, loff_t *ppos)
{
return 0;
}
static ssize_t sss_tool_write(struct file *pfile, const char __user *ubuf,
size_t size, loff_t *ppos)
{
return 0;
}
static long sss_tool_unlocked_ioctl(struct file *pfile,
unsigned int cmd, unsigned long arg)
{
unsigned int cmd_type = _IOC_NR(cmd);
if (cmd_type == SSS_TOOL_CMD_TYPE)
return sss_tool_msg_ioctl(arg);
return sss_tool_dbg_ioctl(cmd_type, arg);
}
static int sss_tool_mem_mmap(struct file *filp, struct vm_area_struct *mem_area)
{
unsigned long mem_size = mem_area->vm_end - mem_area->vm_start;
phys_addr_t offset = (phys_addr_t)mem_area->vm_pgoff << PAGE_SHIFT;
phys_addr_t phy_addr;
if (mem_size > SSS_TOOL_MEM_MAP_SIZE) {
tool_err("Fail to map mem, mem_size :%ld, alloc size: %ld\n",
mem_size, SSS_TOOL_MEM_MAP_SIZE);
return -EAGAIN;
}
phy_addr = offset ? offset : g_card_pa[g_card_id];
if (!phy_addr) {
tool_err("Fail to map mem, card_id = %d phy_addr is 0\n", g_card_id);
return -EAGAIN;
}
mem_area->vm_page_prot = pgprot_noncached(mem_area->vm_page_prot);
if (remap_pfn_range(mem_area, mem_area->vm_start, (phy_addr >> PAGE_SHIFT),
mem_size, mem_area->vm_page_prot)) {
tool_err("Fail to remap pfn range.\n");
return -EAGAIN;
}
return 0;
}
static const struct file_operations sss_tool_file_ops = {
.owner = THIS_MODULE,
.release = sss_tool_release,
.open = sss_tool_open,
.read = sss_tool_read,
.write = sss_tool_write,
.unlocked_ioctl = sss_tool_unlocked_ioctl,
.mmap = sss_tool_mem_mmap,
};
static struct sss_tool_knl_dbg_info *sss_tool_alloc_dbg_info(void *hwdev)
{
struct sss_tool_knl_dbg_info *dbg_info = NULL;
dbg_info = (struct sss_tool_knl_dbg_info *)
kzalloc(sizeof(struct sss_tool_knl_dbg_info), GFP_KERNEL);
if (!dbg_info)
return NULL;
dbg_info->ffm = (struct sss_tool_ffm_record_info *)
kzalloc(sizeof(*dbg_info->ffm), GFP_KERNEL);
if (!dbg_info->ffm) {
tool_err("Fail to alloc ffm_record_info\n");
kfree(dbg_info);
return NULL;
}
return dbg_info;
}
static void sss_tool_free_dbg_info(struct sss_tool_knl_dbg_info *dbg_info)
{
kfree(dbg_info->ffm);
kfree(dbg_info);
}
static int sss_tool_get_node_id(struct sss_card_node *card_node, int *node_id)
{
int ret;
ret = sscanf(card_node->chip_name, SSS_CHIP_NAME "%d", node_id);
if (ret < 0) {
tool_err("Fail to get card id\n");
return -ENOMEM;
}
return 0;
}
static int sss_tool_add_func_to_card_node(void *hwdev, struct sss_card_node *card_node)
{
int func_id = sss_get_func_id(hwdev);
struct sss_tool_knl_dbg_info *dbg_info = NULL;
int ret;
int node_id;
if (sss_get_func_type(hwdev) != SSS_FUNC_TYPE_VF)
card_node->func_handle_array[func_id] = hwdev;
if (card_node->func_num++)
return 0;
dbg_info = sss_tool_alloc_dbg_info(hwdev);
if (!dbg_info) {
ret = -ENOMEM;
tool_err("Fail to alloc dbg_info\n");
goto alloc_dbg_info_err;
}
card_node->dbgtool_info = dbg_info;
sema_init(&dbg_info->dbgtool_sem, 1);
ret = sss_tool_get_node_id(card_node, &node_id);
if (ret) {
tool_err("Fail to add node to global array\n");
goto get_node_id_err;
}
g_card_node_array[node_id] = card_node;
return 0;
get_node_id_err:
sss_tool_free_dbg_info(dbg_info);
card_node->dbgtool_info = NULL;
alloc_dbg_info_err:
card_node->func_num--;
if (sss_get_func_type(hwdev) != SSS_FUNC_TYPE_VF)
card_node->func_handle_array[func_id] = NULL;
return ret;
}
static void sss_tool_del_func_in_card_node(void *hwdev, struct sss_card_node *card_node)
{
struct sss_tool_knl_dbg_info *dbg_info = card_node->dbgtool_info;
int func_id = sss_get_func_id(hwdev);
int node_id;
if (sss_get_func_type(hwdev) != SSS_FUNC_TYPE_VF)
card_node->func_handle_array[func_id] = NULL;
if (--card_node->func_num)
return;
sss_tool_get_node_id(card_node, &node_id);
if (node_id < SSS_TOOL_CARD_MAX)
g_card_node_array[node_id] = NULL;
sss_tool_free_dbg_info(dbg_info);
card_node->dbgtool_info = NULL;
if (node_id < SSS_TOOL_CARD_MAX)
(void)sss_tool_free_card_mem(node_id);
}
static int sss_tool_create_dev(void)
{
int ret;
struct device *pdevice = NULL;
ret = alloc_chrdev_region(&g_dev_id, 0, 1, SSS_TOOL_DEV_NAME);
if (ret) {
tool_err("Fail to alloc sssnic_nictool_dev region(0x%x)\n", ret);
return ret;
}
g_nictool_class = class_create(THIS_MODULE, SSS_TOOL_DEV_CLASS);
if (IS_ERR(g_nictool_class)) {
tool_err("Fail to create sssnic_nictool_class\n");
ret = -EFAULT;
goto create_class_err;
}
cdev_init(&g_nictool_cdev, &sss_tool_file_ops);
ret = cdev_add(&g_nictool_cdev, g_dev_id, 1);
if (ret < 0) {
tool_err("Fail to add sssnic_nictool_dev to operating system (0x%x)\n", ret);
goto add_cdev_err;
}
pdevice = device_create(g_nictool_class, NULL, g_dev_id, NULL, SSS_TOOL_DEV_NAME);
if (IS_ERR(pdevice)) {
tool_err("Fail to create sssnic_nictool_dev on operating system\n");
ret = -EFAULT;
goto create_device_err;
}
tool_info("Success to register sssnic_nictool_dev to system\n");
return 0;
create_device_err:
cdev_del(&g_nictool_cdev);
add_cdev_err:
class_destroy(g_nictool_class);
create_class_err:
g_nictool_class = NULL;
unregister_chrdev_region(g_dev_id, 1);
return ret;
}
static void sss_tool_destroy_dev(void)
{
device_destroy(g_nictool_class, g_dev_id);
cdev_del(&g_nictool_cdev);
class_destroy(g_nictool_class);
g_nictool_class = NULL;
unregister_chrdev_region(g_dev_id, 1);
tool_info("Success to unregister sssnic_nictool_dev to system\n");
}
int sss_tool_init(void *hwdev, void *chip_node)
{
struct sss_card_node *card_node = (struct sss_card_node *)chip_node;
int ret;
ret = sss_tool_add_func_to_card_node(hwdev, card_node);
if (ret) {
tool_err("Fail to add func to card node\n");
return ret;
}
if (g_nictool_ref_cnt++) {
tool_info("sssnic_nictool_dev has already create\n");
return 0;
}
ret = sss_tool_create_dev();
if (ret) {
tool_err("Fail to create sssnic_nictool_dev\n");
goto out;
}
return 0;
out:
g_nictool_ref_cnt--;
sss_tool_del_func_in_card_node(hwdev, card_node);
return ret;
}
void sss_tool_uninit(void *hwdev, void *chip_node)
{
struct sss_card_node *chip_info = (struct sss_card_node *)chip_node;
sss_tool_del_func_in_card_node(hwdev, chip_info);
if (g_nictool_ref_cnt == 0)
return;
if (--g_nictool_ref_cnt)
return;
if (!g_nictool_class || IS_ERR(g_nictool_class)) {
tool_err("Fail to uninit sssnictool, tool class is NULL.\n");
return;
}
sss_tool_destroy_dev();
}

View File

@ -0,0 +1,527 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt
#include <net/sock.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/time.h>
#include "sss_linux_kernel.h"
#include "sss_hw.h"
#include "sss_hwdev.h"
#include "sss_tool.h"
#include "sss_csr.h"
#include "sss_adapter_mgmt.h"
#include "sss_mgmt_info.h"
#include "sss_pci_global.h"
#include "sss_hwif_api.h"
typedef int (*sss_tool_hw_cmd_func)(struct sss_hal_dev *hal_dev, const void *buf_in,
u32 in_size, void *buf_out, u32 *out_size);
struct sss_tool_hw_cmd_handle {
enum sss_tool_driver_cmd_type cmd_type;
sss_tool_hw_cmd_func func;
};
int sss_tool_get_func_type(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
if (*out_size != sizeof(u16) || !buf_out) {
tool_err("Invalid out_size from user :%u, expect: %lu\n", *out_size, sizeof(u16));
return -EFAULT;
}
*(u16 *)buf_out = (u16)sss_get_func_type(SSS_TO_HWDEV(hal_dev));
return 0;
}
int sss_tool_get_func_id(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
if (*out_size != sizeof(u16) || !buf_out) {
tool_err("Invalid out_size from user :%u, expect: %lu\n", *out_size, sizeof(u16));
return -EFAULT;
}
*(u16 *)buf_out = (u16)sss_get_func_id(SSS_TO_HWDEV(hal_dev));
return 0;
}
int sss_tool_get_hw_driver_stats(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
struct sss_hwdev *hwdev = hal_dev->hwdev;
struct sss_card_node *node = hwdev->chip_node;
struct sss_hw_stats *stats = buf_out;
struct sss_hw_stats *tmp = stats;
if (!hwdev)
return -EINVAL;
if (*out_size != sizeof(struct sss_hw_stats) || !stats) {
tool_err("Invalid out_size from user :%u, expect: %lu\n",
*out_size, sizeof(struct sss_hw_stats));
return -EFAULT;
}
memcpy(stats, &hwdev->hw_stats, sizeof(struct sss_hw_stats));
atomic_set(&tmp->nic_ucode_event_stats[SSS_CHN_BUSY],
atomic_read(&node->channel_timeout_cnt));
return 0;
}
static int sss_tool_clear_hw_driver_stats(struct sss_hal_dev *hal_dev, const void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
struct sss_hwdev *hwdev = hal_dev->hwdev;
struct sss_card_node *node = hwdev->chip_node;
memset((void *)&hwdev->hw_stats, 0, sizeof(struct sss_hw_stats));
memset((void *)hwdev->chip_fault_stats, 0, SSS_TOOL_CHIP_FAULT_SIZE);
if (SSS_SUPPORT_CHANNEL_DETECT(hwdev) && atomic_read(&node->channel_timeout_cnt)) {
atomic_set(&node->channel_timeout_cnt, 0);
hwdev->aeq_busy_cnt = 0;
#if !defined(__UEFI__) && !defined(VMWARE)
queue_delayed_work(hwdev->workq, &hwdev->channel_detect_task,
msecs_to_jiffies(SSSNIC_CHANNEL_DETECT_PERIOD));
#endif
}
if (*out_size != sizeof(struct sss_hw_stats)) {
tool_err("Invalid out_size from user :%u, expect: %lu\n",
*out_size, sizeof(struct sss_hw_stats));
return -EFAULT;
}
return 0;
}
static int sss_tool_get_self_test_result(struct sss_hal_dev *hal_dev,
const void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
u32 val;
if (*out_size != sizeof(u32) || !buf_out) {
tool_err("Invalid out_size from user :%u, expect: %lu\n",
*out_size, sizeof(u32));
return -EFAULT;
}
val = sss_chip_read_reg(SSS_TO_HWIF(hal_dev->hwdev), SSS_MGMT_HEALTH_STATUS_ADDR);
*(u32 *)buf_out = val;
return 0;
}
static void sss_tool_get_chip_fault_stats(const void *hwdev, u8 *chip_fault_stats, u32 offset)
{
u32 size;
if (offset >= SSS_TOOL_CHIP_FAULT_SIZE) {
tool_err("Invalid chip offset value: %d\n", offset);
return;
}
size = min(SSS_TOOL_DRV_BUF_SIZE_MAX, SSS_TOOL_CHIP_FAULT_SIZE - (int)offset);
memcpy(chip_fault_stats, ((struct sss_hwdev *)hwdev)->chip_fault_stats
+ offset, size);
}
static int sss_tool_get_chip_faults_stats(struct sss_hal_dev *hal_dev,
const void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
u32 offset = 0;
struct sss_tool_cmd_chip_fault_stats *info = NULL;
if (!buf_in || !buf_out || *out_size != sizeof(*info) ||
in_size != sizeof(*info)) {
tool_err("Invalid out_size from user: %d, expect: %lu\n", *out_size, sizeof(*info));
return -EFAULT;
}
info = (struct sss_tool_cmd_chip_fault_stats *)buf_in;
offset = info->offset;
info = (struct sss_tool_cmd_chip_fault_stats *)buf_out;
sss_tool_get_chip_fault_stats(hal_dev->hwdev,
info->chip_fault_stats, offset);
return 0;
}
static int sss_tool_get_single_card_info(struct sss_hal_dev *hal_dev, const void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
if (!buf_out || *out_size != sizeof(struct sss_tool_card_info)) {
tool_err("Invalid buf out is NULL, or out_size != %lu\n",
sizeof(struct sss_tool_card_info));
return -EINVAL;
}
sss_get_card_info(hal_dev->hwdev, buf_out);
return 0;
}
static int sss_tool_is_driver_in_vm(struct sss_hal_dev *hal_dev,
const void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
if (!buf_out || (*out_size != sizeof(u8))) {
tool_err("Invalid parameter, buf_out is NULL or out_size != %lu\n", sizeof(u8));
return -EINVAL;
}
*((u8 *)buf_out) = sss_is_in_host() ? 0 : 1;
return 0;
}
static int sss_tool_get_all_chip_id_cmd(struct sss_hal_dev *hal_dev,
const void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
if (*out_size != sizeof(struct sss_card_id) || !buf_out) {
tool_err("Invalid parameter: out_size %u, expect %lu\n",
*out_size, sizeof(struct sss_card_id));
return -EFAULT;
}
sss_get_all_chip_id(buf_out);
return 0;
}
static int sss_tool_get_card_id(char *dev_name, int *id)
{
int ret;
ret = sscanf(dev_name, SSS_CHIP_NAME "%d", id);
if (ret < 0) {
tool_err("Fail to get card id\n");
return ret;
}
if (*id >= SSS_TOOL_CARD_MAX || *id < 0) {
tool_err("Invalid chip id %d, out of range: [0-%d]\n", *id, SSS_TOOL_CARD_MAX - 1);
return -EINVAL;
}
return 0;
}
static void sss_tool_get_pf_dev_info_param(struct sss_tool_pf_dev_info *dev_info, int card_id,
void **func_array)
{
u32 func_id;
void *hwdev = NULL;
struct pci_dev *pdev = NULL;
for (func_id = 0; func_id < SSS_TOOL_PF_DEV_MAX; func_id++) {
hwdev = (void *)func_array[func_id];
dev_info[func_id].phy_addr = g_card_pa[card_id];
if (!hwdev) {
dev_info[func_id].bar0_size = 0;
dev_info[func_id].bus = 0;
dev_info[func_id].slot = 0;
dev_info[func_id].func = 0;
} else {
pdev = (struct pci_dev *)sss_get_pcidev_hdl(hwdev);
dev_info[func_id].bar0_size = pci_resource_len(pdev, 0);
dev_info[func_id].bus = pdev->bus->number;
dev_info[func_id].slot = PCI_SLOT(pdev->devfn);
dev_info[func_id].func = PCI_FUNC(pdev->devfn);
}
}
}
static int sss_tool_get_card_adm_mem(int card_id)
{
int i;
unsigned char *card_va = NULL;
g_card_id = card_id;
if (!g_card_va[card_id]) {
g_card_va[card_id] =
(void *)__get_free_pages(GFP_KERNEL, SSS_TOOL_PAGE_ORDER);
if (!g_card_va[card_id]) {
tool_err("Fail to alloc adm memory for card %d!\n", card_id);
return -EFAULT;
}
memset(g_card_va[card_id], 0, PAGE_SIZE * (1 << SSS_TOOL_PAGE_ORDER));
g_card_pa[card_id] = virt_to_phys(g_card_va[card_id]);
if (!g_card_pa[card_id]) {
tool_err("Invalid phy addr for card %d is 0\n", card_id);
free_pages((unsigned long)g_card_va[card_id], SSS_TOOL_PAGE_ORDER);
g_card_va[card_id] = NULL;
return -EFAULT;
}
card_va = g_card_va[card_id];
for (i = 0; i < (1 << SSS_TOOL_PAGE_ORDER); i++) {
SetPageReserved(virt_to_page(card_va));
card_va += PAGE_SIZE;
}
}
return 0;
}
static int sss_tool_get_pf_dev_info(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
int id;
int ret;
struct sss_tool_pf_dev_info *info = buf_out;
struct sss_card_node *node = sss_get_card_node(hal_dev);
if (!buf_out || *out_size != sizeof(struct sss_tool_pf_dev_info) * SSS_TOOL_PF_DEV_MAX) {
tool_err("Invalid param: out_size %u, expect %lu\n",
*out_size, sizeof(info) * SSS_TOOL_PF_DEV_MAX);
return -EFAULT;
}
ret = sss_tool_get_card_id(node->chip_name, &id);
if (ret)
return ret;
sss_tool_get_pf_dev_info_param(info, id, node->func_handle_array);
ret = sss_tool_get_card_adm_mem(id);
if (ret) {
tool_err("Fail to get adm memory for userspace %s\n", node->chip_name);
return -EFAULT;
}
return 0;
}
long sss_tool_free_card_mem(int id)
{
unsigned char *va = NULL;
int i;
if (!g_card_va[id])
return 0;
va = g_card_va[id];
for (i = 0; i < (1 << SSS_TOOL_PAGE_ORDER); i++) {
ClearPageReserved(virt_to_page(va));
va += PAGE_SIZE;
}
free_pages((unsigned long)g_card_va[id], SSS_TOOL_PAGE_ORDER);
g_card_va[id] = NULL;
g_card_pa[id] = 0;
return 0;
}
static int sss_tool_free_all_card_mem(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
int id;
int ret;
struct sss_card_node *node = sss_get_card_node(hal_dev);
ret = sss_tool_get_card_id(node->chip_name, &id);
if (ret)
return ret;
sss_tool_free_card_mem(id);
return 0;
}
static int sss_tool_check_card_info_param(char *dev_name, const void *buf_out, u32 out_size)
{
int ret;
if (!buf_out || out_size != sizeof(struct sss_card_func_info)) {
tool_err("Invalid out_size %u, expect %lu\n",
out_size, sizeof(struct sss_card_func_info));
return -EINVAL;
}
ret = memcmp(dev_name, SSS_CHIP_NAME, strlen(SSS_CHIP_NAME));
if (ret) {
tool_err("Invalid chip name %s\n", dev_name);
return ret;
}
return 0;
}
static int sss_tool_get_card_func_info(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
int ret;
int id = 0;
struct sss_card_func_info *info = buf_out;
struct sss_card_node *node = sss_get_card_node(hal_dev);
ret = sss_tool_check_card_info_param(node->chip_name, buf_out, *out_size);
if (ret)
return ret;
ret = sss_tool_get_card_id(node->chip_name, &id);
if (ret)
return ret;
sss_get_card_func_info(node->chip_name, info);
if (!info->pf_num) {
tool_err("Fail to get card func info, chip name %s\n", node->chip_name);
return -EFAULT;
}
ret = sss_tool_get_card_adm_mem(id);
if (ret) {
tool_err("Fail to get adm memory for userspace %s\n", node->chip_name);
return -EFAULT;
}
info->usr_adm_pa = g_card_pa[id];
return 0;
}
static int sss_tool_get_pf_cap_info(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
struct sss_hwdev *hwdev = NULL;
struct sss_card_node *node = sss_get_card_node(hal_dev);
struct sss_svc_cap_info *in_info = (struct sss_svc_cap_info *)buf_in;
struct sss_svc_cap_info *out_info = (struct sss_svc_cap_info *)buf_out;
if (*out_size != sizeof(struct sss_svc_cap_info) ||
in_size != sizeof(struct sss_svc_cap_info) ||
!buf_in || !buf_out) {
tool_err("Invalid out_size %u, in_size: %u, expect %lu\n",
*out_size, in_size, sizeof(struct sss_svc_cap_info));
return -EINVAL;
}
if (in_info->func_id >= SSS_MAX_FUNC) {
tool_err("Invalid func id: %u, max_num: %u\n",
in_info->func_id, SSS_MAX_FUNC);
return -EINVAL;
}
sss_hold_chip_node();
hwdev = (struct sss_hwdev *)(node->func_handle_array)[in_info->func_id];
if (!hwdev) {
sss_put_chip_node();
return -EINVAL;
}
memcpy(&out_info->cap, SSS_TO_SVC_CAP(hwdev), sizeof(struct sss_service_cap));
sss_put_chip_node();
return 0;
}
static int sss_tool_get_hw_drv_version(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
int ret;
struct sss_tool_drv_version_info *info = buf_out;
if (!buf_out || *out_size != sizeof(*info)) {
tool_err("Invalid param, buf_out is NULL or out_size:%u, expect: %lu\n",
*out_size, sizeof(*info));
return -EINVAL;
}
ret = snprintf(info->ver, sizeof(info->ver), "%s %s", SSS_DRV_VERSION,
__TIME_STR__);
if (ret < 0)
return -EINVAL;
return 0;
}
static int sss_tool_get_pf_id(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
struct sss_tool_pf_info *info = NULL;
struct sss_card_node *node = sss_get_card_node(hal_dev);
u32 port_id;
int ret;
if (!node)
return -ENODEV;
if (!buf_out || (*out_size != sizeof(*info)) || !buf_in || in_size != sizeof(port_id)) {
tool_err("Invalid out_size from user: %u, expect: %lu, in_size:%u\n",
*out_size, sizeof(*info), in_size);
return -EINVAL;
}
port_id = *((u32 *)buf_in);
info = (struct sss_tool_pf_info *)buf_out;
ret = sss_get_pf_id(node, port_id, &info->pf_id, &info->valid);
if (ret != 0)
return ret;
*out_size = sizeof(*info);
return 0;
}
struct sss_tool_hw_cmd_handle g_hw_cmd_handle[] = {
{SSS_TOOL_FUNC_TYPE, sss_tool_get_func_type},
{SSS_TOOL_GET_FUNC_IDX, sss_tool_get_func_id},
{SSS_TOOL_GET_CHIP_INFO, sss_tool_get_card_func_info},
{SSS_TOOL_GET_DRV_VERSION, sss_tool_get_hw_drv_version},
{SSS_TOOL_GET_PF_ID, sss_tool_get_pf_id},
{SSS_TOOL_GET_FUNC_CAP, sss_tool_get_pf_cap_info},
{SSS_TOOL_GET_SELF_TEST_RES, sss_tool_get_self_test_result},
{SSS_TOOL_GET_CHIP_ID, sss_tool_get_all_chip_id_cmd},
{SSS_TOOL_GET_PF_DEV_INFO, sss_tool_get_pf_dev_info},
{SSS_TOOL_IS_DRV_IN_VM, sss_tool_is_driver_in_vm},
{SSS_TOOL_CMD_FREE_MEM, sss_tool_free_all_card_mem},
{SSS_TOOL_GET_CHIP_FAULT_STATS, (sss_tool_hw_cmd_func)sss_tool_get_chip_faults_stats},
{SSS_TOOL_GET_SINGLE_CARD_INFO, (sss_tool_hw_cmd_func)sss_tool_get_single_card_info},
{SSS_TOOL_GET_HW_STATS, (sss_tool_hw_cmd_func)sss_tool_get_hw_driver_stats},
{SSS_TOOL_CLEAR_HW_STATS, sss_tool_clear_hw_driver_stats},
};
int sss_tool_msg_to_hw(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg,
void *buf_in, u32 in_size, void *buf_out, u32 *out_size)
{
int id;
int ret = 0;
int cmd_num = ARRAY_LEN(g_hw_cmd_handle);
enum sss_tool_driver_cmd_type cmd =
(enum sss_tool_driver_cmd_type)(tool_msg->msg_formate);
for (id = 0; id < cmd_num; id++) {
if (cmd == g_hw_cmd_handle[id].cmd_type) {
ret = g_hw_cmd_handle[id].func
(hal_dev, buf_in, in_size, buf_out, out_size);
break;
}
}
if (id == cmd_num) {
tool_err("Fail to send msg to hw, cmd: %d out of range\n", cmd);
return -EINVAL;
}
return ret;
}

View File

@ -0,0 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_TOOL_SDK_H
#define SSS_TOOL_SDK_H
#include "sss_tool_comm.h"
#include "sss_tool_hw.h"
#include "sss_hw.h"
long sss_tool_free_card_mem(int id);
int sss_tool_msg_to_hw(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg,
void *buf_in, u32 in_size, void *buf_out, u32 *out_size);
#endif

View File

@ -0,0 +1,383 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt
#include "sss_hwdev.h"
#include "sss_hwif_adm.h"
#include "sss_tool_comm.h"
#include "sss_tool_sm.h"
#define SSS_TOOL_CHIP_ACK 1
#define SSS_TOOL_CHIP_NOACK 0
#define SSS_TOOL_SM_CHIP_OP_READ 0x2
#define SSS_TOOL_SM_CHIP_OP_READ_CLEAR 0x6
#define SSS_TOOL_BIT_32 32
struct sss_tool_sm_in {
int node;
int id;
int instance;
};
struct sss_tool_sm_out {
u64 val1;
u64 val2;
};
union sss_tool_sm_chip_request_head {
struct {
u32 pad:15;
u32 ack:1;
u32 op_id:5;
u32 instance:6;
u32 src:5;
} bs;
u32 value;
};
/* counter read request struct */
struct sss_tool_sm_chip_request {
u32 extra;
union sss_tool_sm_chip_request_head head;
u32 ctr_id;
u32 initial;
u32 pad;
};
/* counter read response union */
union sss_tool_chip_rd_response {
struct {
u32 value1:16;
u32 pad0:16;
u32 pad1[3];
} bs_ss16_rsp;
struct {
u32 value1;
u32 pad[3];
} bs_ss32_rsp;
struct {
u32 value1:20;
u32 pad0:12;
u32 value2:12;
u32 pad1:20;
u32 pad2[2];
} bs_sp_rsp;
struct {
u32 value1;
u32 value2;
u32 pad[2];
} bs_bs64_rsp;
struct {
u32 val1_h;
u32 val1_l;
u32 val2_h;
u32 val2_l;
} bs_bp64_rsp;
};
typedef int (*sss_tool_sm_handler_func)(void *hwdev, u32 id, u8 instance,
u8 node, struct sss_tool_sm_out *out_buf);
struct sss_tool_sm_handler {
enum sss_tool_sm_cmd_type msg_name;
sss_tool_sm_handler_func sm_func;
};
static void sss_tool_sm_read_msg_create(struct sss_tool_sm_chip_request *request,
u8 instance_id, u8 op_id,
u8 ack, u32 ctr_id, u32 init_val)
{
request->head.value = 0;
request->head.bs.op_id = op_id;
request->head.bs.ack = ack;
request->head.bs.instance = instance_id;
request->head.value = HTONL(request->head.value);
request->initial = init_val;
request->ctr_id = ctr_id;
request->ctr_id = HTONL(request->ctr_id);
}
static void sss_tool_sm_node_htonl(u32 *node, u32 len)
{
u32 *new_node = node;
u32 i;
for (i = 0; i < len; i++) {
*new_node = HTONL(*new_node);
new_node++;
}
}
static int sss_tool_sm_adm_msg_rd(void *hwdev, u32 id, u8 instance,
u8 node, union sss_tool_chip_rd_response *rsp, u8 opcode)
{
struct sss_tool_sm_chip_request req = {0};
int ret;
if (!hwdev)
return -EFAULT;
if (!SSS_SUPPORT_ADM_MSG((struct sss_hwdev *)hwdev)) {
tool_err("Fail to read sm data, device not support adm msg\n");
return -EPERM;
}
sss_tool_sm_read_msg_create(&req, instance, opcode,
SSS_TOOL_CHIP_ACK, id, 0);
ret = sss_adm_msg_read_ack(hwdev, node, (u8 *)&req,
(unsigned short)sizeof(req),
(void *)rsp,
(unsigned short)sizeof(*rsp));
if (ret) {
tool_err("Fail to read sm data from adm msg, err(%d)\n", ret);
return ret;
}
sss_tool_sm_node_htonl((u32 *)rsp, sizeof(*rsp) / sizeof(u32));
return 0;
}
static int sss_tool_sm_msg_rd16(void *hwdev, u32 id, u8 instance,
u8 node, struct sss_tool_sm_out *out_buf)
{
u16 val1;
union sss_tool_chip_rd_response rsp;
int ret = 0;
ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node, &rsp, SSS_TOOL_SM_CHIP_OP_READ);
if (ret) {
tool_err("Fail to read sm 32 bits\n");
val1 = ~0;
goto out;
}
val1 = rsp.bs_ss16_rsp.value1;
out:
out_buf->val1 = val1;
return ret;
}
static int sss_tool_sm_msg_rd32(void *hwdev, u32 id, u8 instance,
u8 node, struct sss_tool_sm_out *out_buf)
{
u32 val1;
union sss_tool_chip_rd_response rsp;
int ret = 0;
ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node, &rsp, SSS_TOOL_SM_CHIP_OP_READ);
if (ret) {
tool_err("Fail to read sm 32 bits\n");
val1 = ~0;
goto out;
}
val1 = rsp.bs_ss32_rsp.value1;
out:
out_buf->val1 = val1;
return ret;
}
static int sss_tool_sm_msg_rd32_clear(void *hwdev, u32 id, u8 instance,
u8 node, struct sss_tool_sm_out *out_buf)
{
u32 val1;
union sss_tool_chip_rd_response rsp;
int ret = 0;
ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node,
&rsp, SSS_TOOL_SM_CHIP_OP_READ_CLEAR);
if (ret) {
tool_err("Fail to read sm 32 bits\n");
val1 = ~0;
goto out;
}
val1 = rsp.bs_ss32_rsp.value1;
out:
out_buf->val1 = val1;
return ret;
}
static int sss_tool_sm_msg_rd128(void *hwdev, u32 id, u8 instance,
u8 node, struct sss_tool_sm_out *out_buf)
{
u64 val1 = 0;
u64 val2 = 0;
int ret = 0;
union sss_tool_chip_rd_response rsp;
if ((id & 0x1) != 0) {
tool_err("Invalid id(%u), It is odd number\n", id);
val1 = ~0;
val2 = ~0;
ret = -EINVAL;
goto out;
}
ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node,
&rsp, SSS_TOOL_SM_CHIP_OP_READ);
if (ret) {
tool_err("Fail to read sm 128 bits\n");
val1 = ~0;
val2 = ~0;
goto out;
}
sss_tool_sm_node_htonl((u32 *)&rsp, sizeof(rsp) / sizeof(u32));
val1 = ((u64)rsp.bs_bp64_rsp.val1_h << SSS_TOOL_BIT_32) | rsp.bs_bp64_rsp.val1_l;
val2 = ((u64)rsp.bs_bp64_rsp.val2_h << SSS_TOOL_BIT_32) | rsp.bs_bp64_rsp.val2_l;
out:
out_buf->val1 = val1;
out_buf->val2 = val2;
return ret;
}
static int sss_tool_sm_msg_rd128_clear(void *hwdev, u32 id, u8 instance,
u8 node, struct sss_tool_sm_out *out_buf)
{
u64 val1 = 0;
u64 val2 = 0;
int ret = 0;
union sss_tool_chip_rd_response rsp;
if ((id & 0x1) != 0) {
tool_err("Invalid id(%u), It is odd number\n", id);
val1 = ~0;
val2 = ~0;
ret = -EINVAL;
goto out;
}
ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node,
&rsp, SSS_TOOL_SM_CHIP_OP_READ_CLEAR);
if (ret) {
tool_err("Fail to read sm 128 bits\n");
val1 = ~0;
val2 = ~0;
goto out;
}
val1 = ((u64)rsp.bs_bp64_rsp.val1_h << SSS_TOOL_BIT_32) | rsp.bs_bp64_rsp.val1_l;
val2 = ((u64)rsp.bs_bp64_rsp.val2_h << SSS_TOOL_BIT_32) | rsp.bs_bp64_rsp.val2_l;
out:
out_buf->val1 = val1;
out_buf->val2 = val2;
return ret;
}
static int sss_tool_sm_msg_rd64(void *hwdev, u32 id, u8 instance,
u8 node, struct sss_tool_sm_out *out_buf)
{
u64 val1 = 0;
int ret = 0;
union sss_tool_chip_rd_response rsp;
ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node,
&rsp, SSS_TOOL_SM_CHIP_OP_READ);
if (ret) {
tool_err("Fail to read sm 64 bits\n");
val1 = ~0;
goto out;
}
val1 = ((u64)rsp.bs_bs64_rsp.value1 << SSS_TOOL_BIT_32) | rsp.bs_bs64_rsp.value2;
out:
out_buf->val1 = val1;
return ret;
}
static int sss_tool_sm_msg_rd64_clear(void *hwdev, u32 id, u8 instance,
u8 node, struct sss_tool_sm_out *out_buf)
{
u64 val1 = 0;
int ret = 0;
union sss_tool_chip_rd_response rsp;
ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node,
&rsp, SSS_TOOL_SM_CHIP_OP_READ_CLEAR);
if (ret) {
tool_err("Fail to read sm 64 bits\n");
val1 = ~0;
goto out;
}
val1 = ((u64)rsp.bs_bs64_rsp.value1 << SSS_TOOL_BIT_32) | rsp.bs_bs64_rsp.value2;
out:
out_buf->val1 = val1;
return ret;
}
const struct sss_tool_sm_handler g_sm_cmd_handle[] = {
{SSS_TOOL_SM_CMD_RD16, sss_tool_sm_msg_rd16},
{SSS_TOOL_SM_CMD_RD32, sss_tool_sm_msg_rd32},
{SSS_TOOL_SM_CMD_RD32_CLEAR, sss_tool_sm_msg_rd32_clear},
{SSS_TOOL_SM_CMD_RD64, sss_tool_sm_msg_rd64},
{SSS_TOOL_SM_CMD_RD64_CLEAR, sss_tool_sm_msg_rd64_clear},
{SSS_TOOL_SM_CMD_RD64_PAIR, sss_tool_sm_msg_rd128},
{SSS_TOOL_SM_CMD_RD64_PAIR_CLEAR, sss_tool_sm_msg_rd128_clear}
};
int sss_tool_msg_to_sm(struct sss_hal_dev *hal_dev, struct sss_tool_msg *msg,
void *in_buf, u32 in_len, void *out_buf, u32 *out_len)
{
int index;
int ret = 0;
int cmd_num = ARRAY_LEN(g_sm_cmd_handle);
u32 msg_formate = msg->msg_formate;
struct sss_tool_sm_in *sm_in = in_buf;
struct sss_tool_sm_out *sm_out = out_buf;
if (!in_buf || !out_buf || !out_len) {
tool_err("Invalid in_buf or out buf param\n");
return -EINVAL;
}
if (in_len != sizeof(*sm_in) || *out_len != sizeof(*sm_out)) {
tool_err("Invalid out buf size :%u, in buf size: %u\n",
*out_len, in_len);
return -EINVAL;
}
for (index = 0; index < cmd_num; index++) {
if (msg_formate != g_sm_cmd_handle[index].msg_name)
continue;
ret = g_sm_cmd_handle[index].sm_func(hal_dev->hwdev, (u32)sm_in->id,
(u8)sm_in->instance, (u8)sm_in->node, sm_out);
break;
}
if (index == cmd_num) {
tool_err("Fail to execute msg %d,could not find callback\n", msg_formate);
return -EINVAL;
}
if (ret != 0)
tool_err("Fail to get sm information, id:%u, instance:%u, node:%u, msg:%d\n",
sm_in->id, sm_in->instance, sm_in->node, msg_formate);
*out_len = sizeof(*sm_out);
return ret;
}

View File

@ -0,0 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_TOOL_SM_H
#define SSS_TOOL_SM_H
#include "sss_pci_global.h"
#include "sss_tool_comm.h"
#include "sss_tool_hw.h"
#ifndef HTONL
#define HTONL(x) \
((((x) & 0x000000ff) << 24) | \
(((x) & 0x0000ff00) << 8) | \
(((x) & 0x00ff0000) >> 8) | \
(((x) & 0xff000000) >> 24))
#endif
int sss_tool_msg_to_sm(struct sss_hal_dev *hal_dev, struct sss_tool_msg *msg,
void *in_buf, u32 in_len, void *out_buf, u32 *out_len);
#endif

View File

@ -0,0 +1,29 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HW_AEQ_H
#define SSS_HW_AEQ_H
enum sss_aeq_hw_event {
SSS_HW_FROM_INT = 0,
SSS_MBX_FROM_FUNC = 1,
SSS_MSG_FROM_MGMT = 2,
SSS_ADM_RSP = 3,
SSS_ADM_MSG_STS = 4,
SSS_MBX_SEND_RSLT = 5,
SSS_AEQ_EVENT_MAX
};
enum sss_aeq_sw_event {
SSS_STL_EVENT = 0,
SSS_STF_EVENT = 1,
SSS_AEQ_SW_EVENT_MAX
};
enum sss_ucode_event_type {
SSS_INTERN_ERR = 0x0,
SSS_CHN_BUSY = 0x7,
SSS_ERR_MAX = 0x8,
};
#endif

View File

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HW_CEQ_H
#define SSS_HW_CEQ_H
enum sss_ceq_event {
SSS_NIC_CTRLQ = 0x3,
SSS_NIC_SQ,
SSS_NIC_RQ,
SSS_CEQ_EVENT_MAX,
};
#endif

View File

@ -0,0 +1,121 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HW_COMMON_H
#define SSS_HW_COMMON_H
#include <linux/types.h>
#ifndef BIG_ENDIAN
#define BIG_ENDIAN 0x4321
#endif
#ifndef LITTLE_ENDIAN
#define LITTLE_ENDIAN 0x1234
#endif
#ifdef BYTE_ORDER
#undef BYTE_ORDER
#endif
/* X86 */
#define BYTE_ORDER LITTLE_ENDIAN
#define ARRAY_LEN(arr) ((int)((int)sizeof(arr) / (int)sizeof((arr)[0])))
#ifndef IFNAMSIZ
#define IFNAMSIZ 16
#endif
enum sss_func_type {
SSS_FUNC_TYPE_PF,
SSS_FUNC_TYPE_VF,
SSS_FUNC_TYPE_PPF,
SSS_FUNC_TYPE_UNKNOWN,
};
struct sss_dma_addr_align {
u32 real_size;
void *origin_vaddr;
dma_addr_t origin_paddr;
void *align_vaddr;
dma_addr_t align_paddr;
};
enum sss_process_ret {
SSS_PROCESS_OK = 0,
SSS_PROCESS_DOING = 1,
SSS_PROCESS_ERR = 2,
};
struct sss_sge {
u32 high_addr;
u32 low_addr;
u32 len;
};
typedef enum sss_process_ret(*sss_wait_handler_t)(void *priv_data);
/* *
* sssnic_cpu_to_be32 - convert data to big endian 32 bit format
* @data: the data to convert
* @len: length of data to convert, must be Multiple of 4B
*/
static inline void sss_cpu_to_be32(void *data, int len)
{
int i, chunk_sz = sizeof(u32);
int data_len = len;
u32 *mem = data;
if (!data)
return;
data_len = data_len / chunk_sz;
for (i = 0; i < data_len; i++) {
*mem = cpu_to_be32(*mem);
mem++;
}
}
/* *
* sss_cpu_to_be32 - convert data from big endian 32 bit format
* @data: the data to convert
* @len: length of data to convert
*/
static inline void sss_be32_to_cpu(void *data, int len)
{
int i;
int data_len;
u32 *array = data;
if (!data)
return;
data_len = len / sizeof(u32);
for (i = 0; i < data_len; i++) {
*array = be32_to_cpu(*array);
array++;
}
}
/* *
* sss_set_sge - set dma area in scatter gather entry
* @sge: scatter gather entry
* @addr: dma address
* @len: length of relevant data in the dma address
*/
static inline void sss_set_sge(struct sss_sge *sge, dma_addr_t addr, int len)
{
sge->high_addr = upper_32_bits(addr);
sge->low_addr = lower_32_bits(addr);
sge->len = len;
}
#define sss_hw_be32(val) (val)
#define sss_hw_cpu32(val) (val)
#define sss_hw_cpu16(val) (val)
#endif

View File

@ -0,0 +1,67 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HW_CTRLQ_H
#define SSS_HW_CTRLQ_H
#include <linux/types.h>
#include <linux/atomic.h>
struct sss_ctrl_msg_buf {
void *buf;
dma_addr_t dma_addr;
u16 size;
/* Usage count, USERS DO NOT USE */
atomic_t ref_cnt;
};
/**
* @brief sss_alloc_ctrlq_msg_buf - alloc ctrlq msg buffer
* @param hwdev: device pointer to hwdev
* @retval non-zero: success
* @retval null: failure
**/
struct sss_ctrl_msg_buf *sss_alloc_ctrlq_msg_buf(void *hwdev);
/**
* @brief sss_free_ctrlq_msg_buf - free ctrlq msg buffer
* @param hwdev: device pointer to hwdev
* @param msg_buf: buffer to free
**/
void sss_free_ctrlq_msg_buf(void *hwdev, struct sss_ctrl_msg_buf *msg_buf);
/**
* @brief sss_ctrlq_direct_reply - ctrlq direct message response
* @param hwdev: device pointer to hwdev
* @param mod: mod type
* @param cmd: cmd
* @param in_buf: message buffer in
* @param out_param: message out
* @param timeout: timeout
* @param channel: channel id
* @retval zero: success
* @retval non-zero: failure
*/
int sss_ctrlq_direct_reply(void *hwdev, u8 mod, u8 cmd,
struct sss_ctrl_msg_buf *in_buf,
u64 *out_param, u32 timeout, u16 channel);
/**
* @brief sss_ctrlq_detail_reply - ctrlq detail message response
* @param hwdev: device pointer to hwdev
* @param mod: mod type
* @param cmd: cmd
* @param in_buf: message buffer in
* @param out_buf: message buffer out
* @param out_param: inline output data
* @param timeout: timeout
* @param channel: channel id
* @retval zero: success
* @retval non-zero: failure
*/
int sss_ctrlq_detail_reply(void *hwdev, u8 mod, u8 cmd,
struct sss_ctrl_msg_buf *in_buf, struct sss_ctrl_msg_buf *out_buf,
u64 *out_param, u32 timeout, u16 channel);
#endif

View File

@ -0,0 +1,160 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HW_EVENT_H
#define SSS_HW_EVENT_H
#include <linux/types.h>
#include "sss_hw_svc_cap.h"
enum sss_fault_source_type {
/* same as SSS_FAULT_TYPE_CHIP */
SSS_FAULT_SRC_HW_MGMT_CHIP = 0,
/* same as SSS_FAULT_TYPE_NPU */
SSS_FAULT_SRC_HW_MGMT_NPU,
/* same as SSS_FAULT_TYPE_MEM_RD_TIMEOUT */
SSS_FAULT_SRC_HW_MGMT_MEM_RD_TIMEOUT,
/* same as SSS_FAULT_TYPE_MEM_WR_TIMEOUT */
SSS_FAULT_SRC_HW_MGMT_MEM_WR_TIMEOUT,
/* same as SSS_FAULT_TYPE_REG_RD_TIMEOUT */
SSS_FAULT_SRC_HW_MGMT_REG_RD_TIMEOUT,
/* same as SSS_FAULT_TYPE_REG_WR_TIMEOUT */
SSS_FAULT_SRC_HW_MGMT_REG_WR_TIMEOUT,
SSS_FAULT_SRC_SW_MGMT_NPU,
SSS_FAULT_SRC_MGMT_WATCHDOG,
SSS_FAULT_SRC_MGMT_RESET = 8,
SSS_FAULT_SRC_HW_PHY_FAULT,
SSS_FAULT_SRC_TX_PAUSE_EXCP,
SSS_FAULT_SRC_PCIE_LINK_DOWN = 20,
SSS_FAULT_SRC_HOST_HEARTBEAT_LOST = 21,
SSS_FAULT_SRC_TX_TIMEOUT,
SSS_FAULT_SRC_TYPE_MAX,
};
enum sss_comm_event_type {
SSS_EVENT_PCIE_LINK_DOWN,
SSS_EVENT_HEART_LOST,
SSS_EVENT_FAULT,
SSS_EVENT_SRIOV_STATE_CHANGE,
SSS_EVENT_CARD_REMOVE,
SSS_EVENT_MGMT_WATCHDOG,
SSS_EVENT_MAX
};
enum sss_event_service_type {
SSS_EVENT_SRV_COMM,
SSS_SERVICE_EVENT_BASE,
SSS_EVENT_SRV_NIC = SSS_SERVICE_EVENT_BASE + SSS_SERVICE_TYPE_NIC,
SSS_EVENT_SRV_MIGRATE = SSS_SERVICE_EVENT_BASE + SSS_SERVICE_TYPE_MIGRATE,
};
enum sss_fault_err_level {
SSS_FAULT_LEVEL_FATAL,
SSS_FAULT_LEVEL_SERIOUS_RESET,
SSS_FAULT_LEVEL_HOST,
SSS_FAULT_LEVEL_SERIOUS_FLR,
SSS_FAULT_LEVEL_GENERAL,
SSS_FAULT_LEVEL_SUGGESTION,
SSS_FAULT_LEVEL_MAX,
};
enum sss_fault_type {
SSS_FAULT_TYPE_CHIP,
SSS_FAULT_TYPE_NPU,
SSS_FAULT_TYPE_MEM_RD_TIMEOUT,
SSS_FAULT_TYPE_MEM_WR_TIMEOUT,
SSS_FAULT_TYPE_REG_RD_TIMEOUT,
SSS_FAULT_TYPE_REG_WR_TIMEOUT,
SSS_FAULT_TYPE_PHY_FAULT,
SSS_FAULT_TYPE_TSENSOR_FAULT,
SSS_FAULT_TYPE_MAX,
};
#define SSS_SRV_EVENT_TYPE(svc, type) ((((u32)(svc)) << 16) | (type))
#define SSS_MGMT_CMD_UNSUPPORTED 0xFF
union sss_fault_hw_mgmt {
u32 val[4];
/* valid only type == SSS_FAULT_TYPE_CHIP */
struct {
u8 node_id;
/* enum sss_fault_err_level */
u8 err_level;
u16 err_type;
u32 err_csr_addr;
u32 err_csr_value;
/* func_id valid only if err_level == SSS_FAULT_LEVEL_SERIOUS_FLR */
u8 rsvd1;
u8 host_id;
u16 func_id;
} chip;
/* valid only if type == SSS_FAULT_TYPE_NPU */
struct {
u8 cause_id;
u8 core_id;
u8 c_id;
u8 rsvd3;
u32 epc;
u32 rsvd4;
u32 rsvd5;
} ucode;
/* valid only if type == SSS_FAULT_TYPE_MEM_RD_TIMEOUT ||
* SSS_FAULT_TYPE_MEM_WR_TIMEOUT
*/
struct {
u32 err_csr_ctrl;
u32 err_csr_data;
u32 ctrl_tab;
u32 mem_id;
} mem_timeout;
/* valid only if type == SSS_FAULT_TYPE_REG_RD_TIMEOUT ||
* SSS_FAULT_TYPE_REG_WR_TIMEOUT
*/
struct {
u32 err_csr;
u32 rsvd6;
u32 rsvd7;
u32 rsvd8;
} reg_timeout;
struct {
/* 0: read; 1: write */
u8 op_type;
u8 port_id;
u8 dev_ad;
u8 rsvd9;
u32 csr_addr;
u32 op_data;
u32 rsvd10;
} phy_fault;
};
/* defined by chip */
struct sss_fault_event {
u8 type; /* enum sss_fault_type */
u8 fault_level; /* sdk write fault level for uld event */
u8 rsvd[2];
union sss_fault_hw_mgmt info;
};
struct sss_cmd_fault_event {
u8 status;
u8 ver;
u8 rsvd[6];
struct sss_fault_event fault_event;
};
struct sss_event_info {
u16 service; /* enum sss_event_service_type */
u16 type; /* enum sss_comm_event_type */
u8 event_data[104];
};
typedef void (*sss_event_handler_t)(void *handle, struct sss_event_info *event);
#endif

View File

@ -0,0 +1,228 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 3snic Technologies Co., Ltd */
#ifndef SSS_HW_EXPORT_H
#define SSS_HW_EXPORT_H
#include <linux/types.h>
#include "sss_hw_irq.h"
#include "sss_hw_svc_cap.h"
#include "sss_hw_event.h"
int sss_chip_set_msix_attr(void *hwdev,
struct sss_irq_cfg intr_cfg, u16 channel);
/* *
* @brief sss_chip_clear_msix_resend_bit - clear msix resend bit
* @param hwdev: device pointer to hwdev
* @param msix_id: msix id
* @param clear_en: 1-clear
*/
void sss_chip_clear_msix_resend_bit(void *hwdev, u16 msix_id, bool clear_en);
/**
* @brief sss_chip_reset_function - reset func
* @param hwdev: device pointer to hwdev
* @param func_id: global function index
* @param flag: reset flag
* @param channel: channel id
*/
int sss_chip_reset_function(void *hwdev, u16 func_id, u64 flag, u16 channel);
/**
* @brief sss_chip_set_root_ctx - set root context
* @param hwdev: device pointer to hwdev
* @param rq_depth: rq depth
* @param sq_depth: sq depth
* @param rx_size: rx buffer size
* @param channel: channel id
* @retval zero: success
* @retval non-zero: failure
**/
int sss_chip_set_root_ctx(void *hwdev,
u32 rq_depth, u32 sq_depth, int rx_size, u16 channel);
/**
* @brief sss_chip_clean_root_ctx - clean root context
* @param hwdev: device pointer to hwdev
* @param channel: channel id
* @retval zero: success
* @retval non-zero: failure
**/
int sss_chip_clean_root_ctx(void *hwdev, u16 channel);
/* *
* @brief sss_get_mgmt_version - get management cpu version
* @param hwdev: device pointer to hwdev
* @param buf: output management version
* @param channel: channel id
* @retval zero: success
* @retval non-zero: failure
*/
int sss_get_mgmt_version(void *hwdev, u8 *buf, u8 buf_size, u16 channel);
/**
* @brief sss_chip_set_func_used_state - set function service used state
* @param hwdev: device pointer to hwdev
* @param service_type: service type
* @param state: function used state
* @param channel: channel id
* @retval zero: success
* @retval non-zero: failure
*/
int sss_chip_set_func_used_state(void *hwdev,
u16 service_type, bool state, u16 channel);
bool sss_get_nic_capability(void *hwdev, struct sss_nic_service_cap *capability);
/* *
* @brief sss_support_nic - function support nic
* @param hwdev: device pointer to hwdev
* @param cap: nic service capbility
* @retval true: function support nic
* @retval false: function not support nic
*/
bool sss_support_nic(void *hwdev);
bool sss_support_ppa(void *hwdev, struct sss_ppa_service_cap *cap);
/* *
* @brief sss_get_max_sq_num - get max queue number
* @param hwdev: device pointer to hwdev
* @retval non-zero: max queue number
* @retval zero: failure
*/
u16 sss_get_max_sq_num(void *hwdev);
/* *
* @brief sss_get_phy_port_id - get physical port id
* @param hwdev: device pointer to hwdev
* @retval physical port id
*/
u8 sss_get_phy_port_id(void *hwdev); /* Obtain sss_service_cap.port_id */
/* *
* @brief sss_get_max_vf_num - get vf number
* @param hwdev: device pointer to hwdev
* @retval non-zero: vf number
* @retval zero: failure
*/
u16 sss_get_max_vf_num(void *hwdev); /* Obtain sss_service_cap.max_vf */
/* *
* @brief sss_get_cos_valid_bitmap - get cos valid bitmap
* @param hwdev: device pointer to hwdev
* @retval non-zero: valid cos bit map
* @retval zero: failure
*/
int sss_get_cos_valid_bitmap(void *hwdev, u8 *func_cos_bitmap, u8 *port_cos_bitmap);
/* *
* @brief sss_alloc_irq - alloc irq
* @param hwdev: device pointer to hwdev
* @param service_type: service type
* @param alloc_array: alloc irq info
* @param alloc_num: alloc number
* @retval zero: failure
* @retval non-zero: success
*/
u16 sss_alloc_irq(void *hwdev, enum sss_service_type service_type,
struct sss_irq_desc *alloc_array, u16 alloc_num);
/* *
* @brief sss_free_irq - free irq
* @param hwdev: device pointer to hwdev
* @param service_type: service type
* @param irq_id: irq id
*/
void sss_free_irq(void *hwdev, enum sss_service_type service_type, u32 irq_id);
/* *
* @brief sss_register_dev_event - register hardware event
* @param hwdev: device pointer to hwdev
* @param data: private data will be used by the callback
* @param callback: callback function
*/
void sss_register_dev_event(void *hwdev, void *data, sss_event_handler_t callback);
/* *
* @brief sss_unregister_dev_event - unregister hardware event
* @param dev: device pointer to hwdev
*/
void sss_unregister_dev_event(void *dev);
/* *
* @brief sss_get_dev_present_flag - get chip present flag
* @param hwdev: device pointer to hwdev
* @retval 1: chip is present
* @retval 0: chip is absent
*/
int sss_get_dev_present_flag(const void *hwdev);
/* *
* @brief sss_get_max_pf_num - get global max pf number
*/
u8 sss_get_max_pf_num(void *hwdev);
u16 sss_nic_intr_num(void *hwdev);
/* *
* @brief sss_get_chip_present_state - get card present state
* @param hwdev: device pointer to hwdev
* @param present_state: return card present state
* @retval zero: success
* @retval non-zero: failure
*/
int sss_get_chip_present_state(void *hwdev, bool *present_state);
/**
* @brief sss_fault_event_report - report fault event
* @param hwdev: device pointer to hwdev
* @param src: fault event source, reference to enum sss_fault_source_type
* @param level: fault level, reference to enum sss_fault_err_level
*/
void sss_fault_event_report(void *hwdev, u16 src, u16 level);
/**
* @brief sss_register_service_adapter - register service adapter
* @param hwdev: device pointer to hwdev
* @param service_type: service type
* @param service_adapter: service adapter
* @retval zero: success
* @retval non-zero: failure
**/
int sss_register_service_adapter(void *hwdev, enum sss_service_type service_type,
void *service_adapter);
/**
* @brief sss_unregister_service_adapter - unregister service adapter
* @param hwdev: device pointer to hwdev
* @param service_type: service type
**/
void sss_unregister_service_adapter(void *hwdev,
enum sss_service_type service_type);
/**
* @brief sss_get_service_adapter - get service adapter
* @param hwdev: device pointer to hwdev
* @param service_type: service type
* @retval non-zero: success
* @retval null: failure
**/
void *sss_get_service_adapter(void *hwdev, enum sss_service_type service_type);
/**
* @brief sss_do_event_callback - evnet callback to notify service driver
* @param hwdev: device pointer to hwdev
* @param event: event info to service driver
*/
void sss_do_event_callback(void *hwdev, struct sss_event_info *event);
/**
* @brief sss_update_link_stats - link event stats
* @param hwdev: device pointer to hwdev
* @param link_state: link status
*/
void sss_update_link_stats(void *hwdev, bool link_state);
#endif

Some files were not shown because too many files have changed in this diff Show More