ice: update thirdparty ice nic driver to 1.10.1.2

Conflicts:
	drivers/thirdparty/ice/ice_ethtool.c

Signed-off-by: Jinliang Zheng <alexjlzheng@tencent.com>
Reviewed-by: mengensun <mengensun@tencent.com>
Signed-off-by: Jianping Liu <frankjpliu@tencent.com>
This commit is contained in:
Jinliang Zheng 2024-04-28 16:57:20 +08:00 committed by Jianping Liu
parent 9660939fa0
commit a2936f347e
174 changed files with 53563 additions and 24595 deletions

View File

@ -63,6 +63,7 @@ config THIRDPARTY_ICE
depends on X86 depends on X86
default n default n
depends on PCI_MSI depends on PCI_MSI
select AUXILIARY_BUS
---help--- ---help---
This driver supports Intel(R) Ethernet Connection E800 Series of This driver supports Intel(R) Ethernet Connection E800 Series of
devices. For more information on how to identify your adapter, go devices. For more information on how to identify your adapter, go

View File

@ -10,47 +10,73 @@ subdir-ccflags-y += -I$(src)
obj-m += ice.o obj-m += ice.o
ice-y := ice_main.o \ ice-y := ice_main.o \
ice_controlq.o \ ice_controlq.o \
ice_common.o \ ice_common.o \
ice_nvm.o \ ice_nvm.o \
ice_switch.o \ ice_switch.o \
ice_sched.o \ ice_sched.o \
ice_base.o \ ice_base.o \
ice_lib.o \ ice_lib.o \
ice_txrx_lib.o \ ice_txrx_lib.o \
ice_txrx.o \ ice_txrx.o \
ice_fltr.o \ ice_fltr.o \
ice_pf_vsi_vlan_ops.o \ ice_irq.o \
ice_vsi_vlan_ops.o \ ice_pf_vsi_vlan_ops.o \
ice_vsi_vlan_lib.o \ ice_vsi_vlan_ops.o \
ice_tc_lib.o \ ice_vsi_vlan_lib.o \
ice_fdir.o \ ice_gnss.o \
ice_ethtool_fdir.o \ ice_tc_lib.o \
ice_acl_main.o \ ice_fdir.o \
ice_acl.o \ ice_ethtool_fdir.o \
ice_acl_ctrl.o \ ice_acl_main.o \
ice_vlan_mode.o \ ice_acl.o \
ice_flex_pipe.o \ ice_acl_ctrl.o \
ice_flow.o \ ice_vlan_mode.o \
ice_lag.o \ ice_ddp.o \
ice_fwlog.o \ ice_flex_pipe.o \
ice_ethtool.o \ ice_flow.o \
kcompat.o ice_parser.o \
ice_imem.o \
ice_pg_cam.o \
ice_metainit.o \
ice_bst_tcam.o \
ice_ptype_mk.o \
ice_mk_grp.o \
ice_proto_grp.o \
ice_flg_rd.o \
ice_xlt_kb.o \
ice_parser_rt.o \
ice_lag.o \
ice_fwlog.o \
ice_ieps.o \
ice_ethtool.o
ice-$(CONFIG_NET_DEVLINK:m=y) += ice_devlink.o ice_fw_update.o ice-$(CONFIG_NET_DEVLINK:m=y) += ice_devlink.o ice_fw_update.o
ice-$(CONFIG_NET_DEVLINK:m=y) += ice_eswitch.o ice_repr.o ice-$(CONFIG_NET_DEVLINK:m=y) += ice_eswitch.o ice_repr.o
ice-$(CONFIG_MFD_CORE:m=y) += ice_idc.o ice-y += ice_idc.o
ice-$(CONFIG_DEBUG_FS) += ice_debugfs.o ice-$(CONFIG_DEBUG_FS) += ice_debugfs.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_allowlist.o
ice-$(CONFIG_PCI_IOV) += ice_dcf.o ice-$(CONFIG_PCI_IOV) += \
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_fdir.o ice_dcf.o \
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice_vf_vsi_vlan_ops.o ice_sriov.o \
ice_vf_mbx.o \
ice_vf_vsi_vlan_ops.o \
ice_virtchnl_allowlist.o \
ice_vf_adq.o \
ice_virtchnl.o \
ice_virtchnl_fdir.o \
ice_virtchnl_fsub.o \
ice_vf_lib.o
ifneq (${ENABLE_SIOV_SUPPORT},)
ice-$(CONFIG_VFIO_MDEV:m=y) += ice_vdcm.o ice_siov.o
endif
ice-$(CONFIG_PTP_1588_CLOCK:m=y) += ice_ptp.o ice_ptp_hw.o ice-$(CONFIG_PTP_1588_CLOCK:m=y) += ice_ptp.o ice_ptp_hw.o
ice-$(CONFIG_PTP_1588_CLOCK:m=y) += ice_cgu_ops.o ice_cgu_util.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
ice-y += kcompat.o
# Use kcompat pldmfw.c if kernel does not provide CONFIG_PLDMFW # Use kcompat pldmfw.c if kernel does not provide CONFIG_PLDMFW
ifndef CONFIG_PLDMFW ifndef CONFIG_PLDMFW
ice-y += kcompat_pldmfw.o ice-y += kcompat_pldmfw.o

View File

@ -17,9 +17,9 @@
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/rtnetlink.h> #include <linux/rtnetlink.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#ifdef HAVE_NETDEV_SB_DEV #ifdef HAVE_NDO_DFWD_OPS
#include <linux/if_macvlan.h> #include <linux/if_macvlan.h>
#endif /* HAVE_NETDEV_SB_DEV */ #endif /* HAVE_NDO_DFWD_OPS */
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
@ -30,6 +30,10 @@
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/bitmap.h> #include <linux/bitmap.h>
#ifdef HAVE_INCLUDE_BITFIELD
#include <linux/bitfield.h>
#endif /* HAVE_INCLUDE_BITFIELD */
#include <linux/hashtable.h>
#include <linux/log2.h> #include <linux/log2.h>
#include <linux/ip.h> #include <linux/ip.h>
#include <linux/sctp.h> #include <linux/sctp.h>
@ -38,6 +42,10 @@
#include <linux/if_bridge.h> #include <linux/if_bridge.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/sizes.h>
#ifdef HAVE_LINKMODE
#include <linux/linkmode.h>
#endif /* HAVE_LINKMODE */
#ifdef HAVE_XDP_SUPPORT #ifdef HAVE_XDP_SUPPORT
#include <linux/bpf.h> #include <linux/bpf.h>
#include <linux/filter.h> #include <linux/filter.h>
@ -49,14 +57,12 @@
#if IS_ENABLED(CONFIG_NET_DEVLINK) #if IS_ENABLED(CONFIG_NET_DEVLINK)
#include <net/devlink.h> #include <net/devlink.h>
#endif /* CONFIG_NET_DEVLINK */ #endif /* CONFIG_NET_DEVLINK */
#if IS_ENABLED(CONFIG_DCB)
#include <scsi/iscsi_proto.h>
#endif /* CONFIG_DCB */
#ifdef HAVE_CONFIG_DIMLIB #ifdef HAVE_CONFIG_DIMLIB
#include <linux/dim.h> #include <linux/dim.h>
#else #else
#include "kcompat_dim.h" #include "kcompat_dim.h"
#endif #endif
#include "ice_ddp.h"
#include "ice_devids.h" #include "ice_devids.h"
#include "ice_type.h" #include "ice_type.h"
#include "ice_txrx.h" #include "ice_txrx.h"
@ -65,17 +71,20 @@
#include "ice_common.h" #include "ice_common.h"
#include "ice_flow.h" #include "ice_flow.h"
#include "ice_sched.h" #include "ice_sched.h"
#include <linux/mfd/core.h> #ifdef USE_INTEL_AUX_BUS
#include "linux/auxiliary_bus.h"
#else
#include <linux/auxiliary_bus.h>
#endif /* USE_INTEL_AUX_BUS */
#include <linux/idr.h> #include <linux/idr.h>
#include "ice_idc_int.h" #include "ice_idc_int.h"
#include "virtchnl.h" #include "virtchnl.h"
#include "ice_virtchnl_pf.h"
#include "ice_sriov.h" #include "ice_sriov.h"
#include "ice_vf_mbx.h"
#include "ice_ptp.h" #include "ice_ptp.h"
#include "ice_cgu.h"
#include "ice_cgu_ops.h"
#include "ice_cgu_util.h"
#include "ice_fdir.h" #include "ice_fdir.h"
#include "ice_vdcm.h"
#include "ice_siov.h"
#ifdef HAVE_AF_XDP_ZC_SUPPORT #ifdef HAVE_AF_XDP_ZC_SUPPORT
#include "ice_xsk.h" #include "ice_xsk.h"
#endif /* HAVE_AF_XDP_ZC_SUPPORT */ #endif /* HAVE_AF_XDP_ZC_SUPPORT */
@ -97,6 +106,9 @@
#include <net/geneve.h> #include <net/geneve.h>
#endif #endif
#endif /* HAVE_GENEVE_RX_OFFLOAD || HAVE_GENEVE_TYPE */ #endif /* HAVE_GENEVE_RX_OFFLOAD || HAVE_GENEVE_TYPE */
#ifdef HAVE_GTP_SUPPORT
#include <net/gtp.h>
#endif /* HAVE_GTP_SUPPORT */
#ifdef HAVE_UDP_ENC_RX_OFFLOAD #ifdef HAVE_UDP_ENC_RX_OFFLOAD
#include <net/udp_tunnel.h> #include <net/udp_tunnel.h>
#endif #endif
@ -113,10 +125,12 @@
#include "ice_repr.h" #include "ice_repr.h"
#include "ice_eswitch.h" #include "ice_eswitch.h"
#include "ice_vsi_vlan_ops.h" #include "ice_vsi_vlan_ops.h"
#include "ice_gnss.h"
extern const char ice_drv_ver[]; extern const char ice_drv_ver[];
#define ICE_BAR0 0 #define ICE_BAR0 0
#define ICE_BAR3 3 #define ICE_BAR_RDMA_DOORBELL_OFFSET 0x7f0000
#define ICE_BAR3 3
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
#define ICE_MAX_CSR_SPACE (8 * 1024 * 1024 - 64 * 1024) #define ICE_MAX_CSR_SPACE (8 * 1024 * 1024 - 64 * 1024)
#endif /* CONFIG_DEBUG_FS */ #endif /* CONFIG_DEBUG_FS */
@ -124,7 +138,11 @@ extern const char ice_drv_ver[];
#define ICE_MIN_NUM_DESC 64 #define ICE_MIN_NUM_DESC 64
#define ICE_MAX_NUM_DESC 8160 #define ICE_MAX_NUM_DESC 8160
#define ICE_DFLT_MIN_RX_DESC 512 #define ICE_DFLT_MIN_RX_DESC 512
#ifdef CONFIG_ICE_USE_SKB
#define ICE_DFLT_NUM_RX_DESC 512
#else
#define ICE_DFLT_NUM_RX_DESC 2048 #define ICE_DFLT_NUM_RX_DESC 2048
#endif /* CONFIG_ICE_USE_SKB */
#define ICE_DFLT_NUM_TX_DESC 256 #define ICE_DFLT_NUM_TX_DESC 256
#define ICE_DFLT_TXQ_VMDQ_VSI 1 #define ICE_DFLT_TXQ_VMDQ_VSI 1
@ -133,9 +151,10 @@ extern const char ice_drv_ver[];
#define ICE_MAX_NUM_VMDQ_VSI 16 #define ICE_MAX_NUM_VMDQ_VSI 16
#define ICE_MAX_TXQ_VMDQ_VSI 4 #define ICE_MAX_TXQ_VMDQ_VSI 4
#define ICE_MAX_RXQ_VMDQ_VSI 4 #define ICE_MAX_RXQ_VMDQ_VSI 4
#ifdef HAVE_NETDEV_SB_DEV #ifdef HAVE_NDO_DFWD_OPS
#define ICE_MAX_MACVLANS 64 #define ICE_MAX_MACVLANS 64
#endif #endif
#define ICE_MAX_SCALABLE 100
#define ICE_DFLT_TRAFFIC_CLASS BIT(0) #define ICE_DFLT_TRAFFIC_CLASS BIT(0)
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_AQ_LEN 192 #define ICE_AQ_LEN 192
@ -192,8 +211,9 @@ extern const char ice_drv_ver[];
#define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i])) #define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i]))
#define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i])) #define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i]))
#define ICE_ACL_ENTIRE_SLICE 1 #define ICE_ACL_ENTIRE_SLICE 1
#define ICE_ACL_HALF_SLICE 2 #define ICE_ACL_HALF_SLICE 2
#define ICE_TCAM_DIVIDER_THRESHOLD 6
/* Minimum BW limit is 500 Kbps for any scheduler node */ /* Minimum BW limit is 500 Kbps for any scheduler node */
#define ICE_MIN_BW_LIMIT 500 #define ICE_MIN_BW_LIMIT 500
@ -202,9 +222,29 @@ extern const char ice_drv_ver[];
*/ */
#define ICE_BW_KBPS_DIVISOR 125 #define ICE_BW_KBPS_DIVISOR 125
#if defined(HAVE_TC_FLOWER_ENC) && defined(HAVE_TC_INDIR_BLOCK) #if defined(HAVE_TCF_MIRRED_DEV) || defined(HAVE_TC_FLOW_RULE_INFRASTRUCTURE)
#define ICE_GTP_TNL_WELLKNOWN_PORT 2152 #define ICE_GTPU_PORT 2152
#endif /* HAVE_TC_FLOWER_ENC && HAVE_TC_INDIR_BLOCK */ #endif /* HAVE_TCF_MIRRED_DEC || HAVE_TC_FLOW_RULE_INFRASTRUCTURE */
#ifdef HAVE_GTP_SUPPORT
#define ICE_GTPC_PORT 2123
#endif /* HAVE_GTP_SUPPORT */
#ifdef HAVE_TC_SETUP_CLSFLOWER
/* prio 5..7 can be used as advanced switch filter priority. Default recipes
* have prio 4 and below, hence prio value between 5..7 can be used as filter
* prio for advanced switch filter (advanced switch filter means it needs
* new recipe to be created to represent specified extraction sequence because
* default recipe extraction sequence does not represent custom extraction)
*/
#define ICE_SWITCH_FLTR_PRIO_QUEUE 7
/* prio 6 is reserved for future use (e.g. switch filter with L3 fields +
* (Optional: IP TOS/TTL) + L4 fields + (optionally: TCP fields such as
* SYN/FIN/RST))
*/
#define ICE_SWITCH_FLTR_PRIO_RSVD 6
#define ICE_SWITCH_FLTR_PRIO_VSI 5
#define ICE_SWITCH_FLTR_PRIO_QGRP ICE_SWITCH_FLTR_PRIO_VSI
#endif /* ifdef HAVE_TC_SETUP_CLSFLOWER*/
/* Macro for each VSI in a PF */ /* Macro for each VSI in a PF */
#define ice_for_each_vsi(pf, i) \ #define ice_for_each_vsi(pf, i) \
@ -254,25 +294,15 @@ struct ice_fwlog_user_input {
enum ice_feature { enum ice_feature {
ICE_F_DSCP, ICE_F_DSCP,
ICE_F_PTP_EXTTS, ICE_F_PTP_EXTTS,
ICE_F_CGU,
ICE_F_PHY_RCLK,
ICE_F_SMA_CTRL,
ICE_F_GNSS,
ICE_F_FIXED_TIMING_PINS,
ICE_F_LAG,
ICE_F_MAX ICE_F_MAX
}; };
enum ice_channel_fltr_type {
ICE_CHNL_FLTR_TYPE_INVALID,
ICE_CHNL_FLTR_TYPE_SRC_PORT,
ICE_CHNL_FLTR_TYPE_DEST_PORT,
ICE_CHNL_FLTR_TYPE_SRC_DEST_PORT, /* for future use cases */
ICE_CHNL_FLTR_TYPE_TENANT_ID,
ICE_CHNL_FLTR_TYPE_SRC_IPV4,
ICE_CHNL_FLTR_TYPE_DEST_IPV4,
ICE_CHNL_FLTR_TYPE_SRC_DEST_IPV4,
ICE_CHNL_FLTR_TYPE_SRC_IPV6,
ICE_CHNL_FLTR_TYPE_DEST_IPV6,
ICE_CHNL_FLTR_TYPE_SRC_DEST_IPV6,
ICE_CHNL_FLTR_TYPE_LAST /* must be last */
};
struct ice_channel { struct ice_channel {
struct list_head list; struct list_head list;
u8 type; u8 type;
@ -293,7 +323,9 @@ struct ice_channel {
atomic_t fd_queue; atomic_t fd_queue;
/* packets services thru' inline-FD filter */ /* packets services thru' inline-FD filter */
u64 fd_pkt_cnt; u64 fd_pkt_cnt;
enum ice_channel_fltr_type fltr_type; u8 inline_fd:1;
u8 qps_per_poller;
u32 poller_timeout;
struct ice_vsi *ch_vsi; struct ice_vsi *ch_vsi;
}; };
@ -302,6 +334,8 @@ struct ice_channel {
#define ICE_BW_MBIT_PS_DIVISOR 125000 /* rate / (1000000 / 8) Mbps */ #define ICE_BW_MBIT_PS_DIVISOR 125000 /* rate / (1000000 / 8) Mbps */
#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ #endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */
#define ICE_ADQ_MAX_QPS 256
struct ice_txq_meta { struct ice_txq_meta {
u32 q_teid; /* Tx-scheduler element identifier */ u32 q_teid; /* Tx-scheduler element identifier */
u16 q_id; /* Entry in VSI's txq_map bitmap */ u16 q_id; /* Entry in VSI's txq_map bitmap */
@ -344,8 +378,6 @@ struct ice_sw {
struct ice_pf *pf; struct ice_pf *pf;
u16 sw_id; /* switch ID for this switch */ u16 sw_id; /* switch ID for this switch */
u16 bridge_mode; /* VEB/VEPA/Port Virtualizer */ u16 bridge_mode; /* VEB/VEPA/Port Virtualizer */
struct ice_vsi *dflt_vsi; /* default VSI for this switch */
u8 dflt_vsi_ena:1; /* true if above dflt_vsi is enabled */
}; };
enum ice_pf_state { enum ice_pf_state {
@ -354,9 +386,9 @@ enum ice_pf_state {
ICE_NEEDS_RESTART, ICE_NEEDS_RESTART,
ICE_PREPARED_FOR_RESET, /* set by driver when prepared */ ICE_PREPARED_FOR_RESET, /* set by driver when prepared */
ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */ ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */
ICE_PFR_REQ, /* set by driver and peers */ ICE_PFR_REQ, /* set by driver and aux drivers */
ICE_CORER_REQ, /* set by driver and peers */ ICE_CORER_REQ, /* set by driver and aux drivers */
ICE_GLOBR_REQ, /* set by driver and peers */ ICE_GLOBR_REQ, /* set by driver and aux drivers */
ICE_CORER_RECV, /* set by OICR handler */ ICE_CORER_RECV, /* set by OICR handler */
ICE_GLOBR_RECV, /* set by OICR handler */ ICE_GLOBR_RECV, /* set by OICR handler */
ICE_EMPR_RECV, /* set by OICR handler */ ICE_EMPR_RECV, /* set by OICR handler */
@ -382,8 +414,6 @@ enum ice_pf_state {
ICE_VF_DIS, ICE_VF_DIS,
ICE_CFG_BUSY, ICE_CFG_BUSY,
ICE_SERVICE_SCHED, ICE_SERVICE_SCHED,
ICE_PTP_TX_TS_READY,
ICE_PTP_EXT_TS_READY,
ICE_SERVICE_DIS, ICE_SERVICE_DIS,
ICE_FD_FLUSH_REQ, ICE_FD_FLUSH_REQ,
ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */ ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */
@ -393,6 +423,7 @@ enum ice_pf_state {
ICE_LINK_DEFAULT_OVERRIDE_PENDING, ICE_LINK_DEFAULT_OVERRIDE_PENDING,
ICE_PHY_INIT_COMPLETE, ICE_PHY_INIT_COMPLETE,
ICE_FD_VF_FLUSH_CTX, /* set at FD Rx IRQ or timeout */ ICE_FD_VF_FLUSH_CTX, /* set at FD Rx IRQ or timeout */
ICE_AUX_ERR_PENDING,
ICE_STATE_NBITS /* must be last */ ICE_STATE_NBITS /* must be last */
}; };
@ -403,14 +434,12 @@ enum ice_vsi_state {
ICE_VSI_NETDEV_REGISTERED, ICE_VSI_NETDEV_REGISTERED,
ICE_VSI_UMAC_FLTR_CHANGED, ICE_VSI_UMAC_FLTR_CHANGED,
ICE_VSI_MMAC_FLTR_CHANGED, ICE_VSI_MMAC_FLTR_CHANGED,
ICE_VSI_VLAN_FLTR_CHANGED,
ICE_VSI_PROMISC_CHANGED, ICE_VSI_PROMISC_CHANGED,
ICE_VSI_STATE_NBITS /* must be last */ ICE_VSI_STATE_NBITS /* must be last */
}; };
enum ice_chnl_feature { enum ice_chnl_feature {
ICE_CHNL_FEATURE_FD_ENA, /* for side-band flow-director */ ICE_CHNL_FEATURE_FD_ENA, /* for side-band flow-director */
ICE_CHNL_FEATURE_INLINE_FD_ENA, /* for inline flow-director */
/* using the SO_MARK socket option will trigger skb->mark to be set. /* using the SO_MARK socket option will trigger skb->mark to be set.
* Driver should act on skb->mark of not (to align flow to HW queue * Driver should act on skb->mark of not (to align flow to HW queue
* binding) is additionally controlled via ethtool private flag and * binding) is additionally controlled via ethtool private flag and
@ -448,10 +477,10 @@ struct ice_vsi {
struct ice_port_info *port_info; /* back pointer to port_info */ struct ice_port_info *port_info; /* back pointer to port_info */
struct ice_ring **rx_rings; /* Rx ring array */ struct ice_ring **rx_rings; /* Rx ring array */
struct ice_ring **tx_rings; /* Tx ring array */ struct ice_ring **tx_rings; /* Tx ring array */
#ifdef HAVE_NETDEV_SB_DEV #ifdef HAVE_NDO_DFWD_OPS
/* Initial VSI tx_rings array when L2 offload is off */ /* Initial VSI tx_rings array when L2 offload is off */
struct ice_ring **base_tx_rings; struct ice_ring **base_tx_rings;
#endif /* HAVE_NETDEV_SB_DEV */ #endif /* HAVE_NDO_DFWD_OPS */
struct ice_q_vector **q_vectors; /* q_vector array */ struct ice_q_vector **q_vectors; /* q_vector array */
irqreturn_t (*irq_handler)(int irq, void *data); irqreturn_t (*irq_handler)(int irq, void *data);
@ -472,7 +501,7 @@ struct ice_vsi {
u16 vsi_num; /* HW (absolute) index of this VSI */ u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */ u16 idx; /* software index in pf->vsi[] */
s16 vf_id; /* VF ID for SR-IOV VSIs */ struct ice_vf *vf; /* VF associated with this VSI */
u16 ethtype; /* Ethernet protocol for pause frame */ u16 ethtype; /* Ethernet protocol for pause frame */
u16 num_gfltr; u16 num_gfltr;
@ -516,7 +545,6 @@ struct ice_vsi {
struct ice_vsi_vlan_ops outer_vlan_ops; struct ice_vsi_vlan_ops outer_vlan_ops;
u16 num_vlan; u16 num_vlan;
/* queue information */ /* queue information */
u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
u8 rx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ u8 rx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@ -539,6 +567,7 @@ struct ice_vsi {
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
#endif /* HAVE_XDP_SUPPORT */ #endif /* HAVE_XDP_SUPPORT */
#ifdef HAVE_AF_XDP_ZC_SUPPORT #ifdef HAVE_AF_XDP_ZC_SUPPORT
unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
#ifndef HAVE_AF_XDP_NETDEV_UMEM #ifndef HAVE_AF_XDP_NETDEV_UMEM
struct xdp_umem **xsk_umems; struct xdp_umem **xsk_umems;
u16 num_xsk_umems_used; u16 num_xsk_umems_used;
@ -549,10 +578,6 @@ struct ice_vsi {
#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO #ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO
struct tc_mqprio_qopt_offload mqprio_qopt;/* queue parameters */ struct tc_mqprio_qopt_offload mqprio_qopt;/* queue parameters */
#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ #endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */
DECLARE_BITMAP(ptp_tx_idx, INDEX_PER_QUAD);
struct sk_buff *ptp_tx_skb[INDEX_PER_QUAD];
u32 tx_hwtstamp_skipped;
u8 ptp_tx:1;
/* Channel Specific Fields */ /* Channel Specific Fields */
struct ice_vsi *tc_map_vsi[ICE_CHNL_MAX_TC]; struct ice_vsi *tc_map_vsi[ICE_CHNL_MAX_TC];
@ -599,7 +624,7 @@ struct ice_vsi {
u16 old_ena_tc; u16 old_ena_tc;
struct ice_channel *ch; struct ice_channel *ch;
struct net_device **target_netdevs; u8 num_tc_devlink_params;
/* setup back reference, to which aggregator node this VSI /* setup back reference, to which aggregator node this VSI
* corresponds to * corresponds to
@ -756,24 +781,30 @@ struct ice_q_vector {
#ifdef ADQ_PERF_COUNTERS #ifdef ADQ_PERF_COUNTERS
struct ice_q_vector_ch_stats ch_stats; struct ice_q_vector_ch_stats ch_stats;
#endif /* ADQ_PERF_COUNTERS */ #endif /* ADQ_PERF_COUNTERS */
u64 last_wd_jiffy;
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
enum ice_pf_flags { enum ice_pf_flags {
ICE_FLAG_FLTR_SYNC, ICE_FLAG_FLTR_SYNC,
ICE_FLAG_VMDQ_ENA, ICE_FLAG_VMDQ_ENA,
#ifdef HAVE_NETDEV_SB_DEV #ifdef HAVE_NDO_DFWD_OPS
ICE_FLAG_MACVLAN_ENA, ICE_FLAG_MACVLAN_ENA,
#endif /* HAVE_NETDEV_SB_DEV */ #endif /* HAVE_NDO_DFWD_OPS */
ICE_FLAG_IWARP_ENA, ICE_FLAG_IWARP_ENA,
ICE_FLAG_RSS_ENA, ICE_FLAG_RSS_ENA,
ICE_FLAG_SRIOV_ENA, ICE_FLAG_SRIOV_ENA,
ICE_FLAG_SRIOV_CAPABLE, ICE_FLAG_SRIOV_CAPABLE,
ICE_FLAG_SIOV_ENA,
ICE_FLAG_SIOV_CAPABLE,
ICE_FLAG_DCB_CAPABLE, ICE_FLAG_DCB_CAPABLE,
ICE_FLAG_DCB_ENA, ICE_FLAG_DCB_ENA,
ICE_FLAG_FD_ENA, ICE_FLAG_FD_ENA,
ICE_FLAG_PTP_ENA, /* NVM PTP support */ ICE_FLAG_PTP_SUPPORTED, /* NVM PTP support */
ICE_FLAG_PTP, /* PTP successfully initialized */ ICE_FLAG_PTP, /* PTP successfully initialized */
ICE_FLAG_PEER_ENA, ICE_FLAG_AUX_ENA,
ICE_FLAG_PLUG_AUX_DEV,
ICE_FLAG_UNPLUG_AUX_DEV,
ICE_FLAG_MTU_CHANGED,
ICE_FLAG_ADV_FEATURES, ICE_FLAG_ADV_FEATURES,
#ifdef NETIF_F_HW_TC #ifdef NETIF_F_HW_TC
ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */ ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */
@ -787,23 +818,28 @@ enum ice_pf_flags {
ICE_FLAG_BASE_R_FEC, ICE_FLAG_BASE_R_FEC,
#endif /* !ETHTOOL_GFECPARAM */ #endif /* !ETHTOOL_GFECPARAM */
ICE_FLAG_FW_LLDP_AGENT, ICE_FLAG_FW_LLDP_AGENT,
ICE_FLAG_CHNL_INLINE_FD_ENA,
ICE_FLAG_CHNL_INLINE_FD_MARK_ENA, ICE_FLAG_CHNL_INLINE_FD_MARK_ENA,
ICE_FLAG_CHNL_PKT_INSPECT_OPT_ENA, ICE_FLAG_CHNL_PKT_INSPECT_OPT_ENA,
ICE_FLAG_CHNL_PKT_CLEAN_BP_STOP_ENA, ICE_FLAG_CHNL_PKT_CLEAN_BP_STOP_ENA,
ICE_FLAG_CHNL_PKT_CLEAN_BP_STOP_CFG, ICE_FLAG_CHNL_PKT_CLEAN_BP_STOP_CFG,
ICE_FLAG_MOD_POWER_UNSUPPORTED, ICE_FLAG_MOD_POWER_UNSUPPORTED,
ICE_FLAG_PHY_FW_LOAD_FAILED,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_FLAG_LEGACY_RX, ICE_FLAG_LEGACY_RX,
ICE_FLAG_VF_TRUE_PROMISC_ENA, ICE_FLAG_VF_TRUE_PROMISC_ENA,
ICE_FLAG_MDD_AUTO_RESET_VF, ICE_FLAG_MDD_AUTO_RESET_VF,
ICE_FLAG_VF_VLAN_PRUNE_DIS, ICE_FLAG_VF_VLAN_PRUNING,
ICE_FLAG_LINK_LENIENT_MODE_ENA, ICE_FLAG_LINK_LENIENT_MODE_ENA,
ICE_FLAG_ESWITCH_CAPABLE, ICE_FLAG_ESWITCH_CAPABLE,
ICE_FLAG_DPLL_FAST_LOCK,
ICE_FLAG_DPLL_MONITOR,
ICE_FLAG_EXTTS_FILTER,
ICE_FLAG_GNSS, /* GNSS successfully initialized */
ICE_FLAG_ALLOW_FEC_DIS_AUTO,
ICE_PF_FLAGS_NBITS /* must be last */ ICE_PF_FLAGS_NBITS /* must be last */
}; };
#ifdef HAVE_NETDEV_SB_DEV #ifdef HAVE_NDO_DFWD_OPS
struct ice_macvlan { struct ice_macvlan {
struct list_head list; struct list_head list;
int id; int id;
@ -812,7 +848,7 @@ struct ice_macvlan {
struct ice_vsi *vsi; struct ice_vsi *vsi;
u8 mac[ETH_ALEN]; u8 mac[ETH_ALEN];
}; };
#endif /* HAVE_NETDEV_SB_DEV */ #endif /* HAVE_NDO_DFWD_OPS */
struct ice_switchdev_info { struct ice_switchdev_info {
struct ice_vsi *control_vsi; struct ice_vsi *control_vsi;
@ -845,27 +881,45 @@ struct ice_agg_node {
u8 valid; u8 valid;
}; };
enum ice_flash_update_preservation { #ifdef HAVE_DEVLINK_HEALTH
/* Preserve all settings and fields */ enum ice_mdd_src {
ICE_FLASH_UPDATE_PRESERVE_ALL = 0, ICE_MDD_SRC_NONE = 0,
/* Preserve limited fields, such as VPD, PCI serial ID, MACs, etc */ ICE_MDD_SRC_TX_PQM,
ICE_FLASH_UPDATE_PRESERVE_LIMITED, ICE_MDD_SRC_TX_TCLAN,
/* Return all fields to factory settings */ ICE_MDD_SRC_TX_TDPU,
ICE_FLASH_UPDATE_PRESERVE_FACTORY_SETTINGS, ICE_MDD_SRC_RX
/* Do not perform any preservation */
ICE_FLASH_UPDATE_PRESERVE_NONE,
}; };
struct ice_mdd_event {
struct list_head list;
enum ice_mdd_src src;
u8 pf_num;
u16 vf_num;
u8 event;
u16 queue;
};
struct ice_mdd_reporter {
struct devlink_health_reporter *reporter;
u16 count;
struct list_head event_list;
};
#endif /* HAVE_DEVLINK_HEALTH */
struct ice_pf { struct ice_pf {
struct pci_dev *pdev; struct pci_dev *pdev;
#if IS_ENABLED(CONFIG_NET_DEVLINK) #if IS_ENABLED(CONFIG_NET_DEVLINK)
#ifdef HAVE_DEVLINK_REGIONS #ifdef HAVE_DEVLINK_REGIONS
struct devlink_region *nvm_region; struct devlink_region *nvm_region;
struct devlink_region *sram_region;
struct devlink_region *devcaps_region; struct devlink_region *devcaps_region;
#endif /* HAVE_DEVLINK_REGIONS */ #endif /* HAVE_DEVLINK_REGIONS */
/* devlink port data */ /* devlink port data */
struct devlink_port devlink_port; struct devlink_port devlink_port;
#ifdef HAVE_DEVLINK_HEALTH
struct ice_mdd_reporter mdd_reporter;
#endif /* HAVE_DEVLINK_HEALTH */
#endif /* CONFIG_NET_DEVLINK */ #endif /* CONFIG_NET_DEVLINK */
/* OS reserved IRQ details */ /* OS reserved IRQ details */
@ -885,15 +939,7 @@ struct ice_pf {
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
struct dentry *ice_debugfs_pf; struct dentry *ice_debugfs_pf;
#endif /* CONFIG_DEBUG_FS */ #endif /* CONFIG_DEBUG_FS */
/* Virtchnl/SR-IOV config info */ struct ice_vfs vfs;
struct ice_vf *vf;
u16 num_alloc_vfs; /* actual number of VFs allocated */
u16 num_vfs_supported; /* num VFs supported for this PF */
u16 num_qps_per_vf;
u16 num_msix_per_vf;
/* used to ratelimit the MDD event logging */
unsigned long last_printed_mdd_jiffies;
DECLARE_BITMAP(malvfs, ICE_MAX_VF_COUNT);
DECLARE_BITMAP(features, ICE_F_MAX); DECLARE_BITMAP(features, ICE_F_MAX);
DECLARE_BITMAP(state, ICE_STATE_NBITS); DECLARE_BITMAP(state, ICE_STATE_NBITS);
DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS); DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
@ -906,28 +952,33 @@ struct ice_pf {
struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */ struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */ struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
struct mutex tc_mutex; /* lock to protect TC changes */ struct mutex tc_mutex; /* lock to protect TC changes */
struct mutex adev_mutex; /* lock to protect aux device access */
struct mutex lag_mutex; /* lock protects the lag struct */
u32 msg_enable; u32 msg_enable;
struct ice_ptp ptp; struct ice_ptp ptp;
struct ice_cgu_info cgu_info; struct tty_driver *ice_gnss_tty_driver;
struct tty_port *gnss_tty_port;
struct gnss_serial *gnss_serial;
u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */ u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */
u16 rdma_base_vector; u16 rdma_base_vector;
struct ice_peer_obj *rdma_peer; #ifdef HAVE_NDO_DFWD_OPS
#ifdef HAVE_NETDEV_SB_DEV
/* MACVLAN specific variables */ /* MACVLAN specific variables */
DECLARE_BITMAP(avail_macvlan, ICE_MAX_MACVLANS); DECLARE_BITMAP(avail_macvlan, ICE_MAX_MACVLANS);
struct list_head macvlan_list; struct list_head macvlan_list;
u16 num_macvlan; u16 num_macvlan;
u16 max_num_macvlan; u16 max_num_macvlan;
#endif /* HAVE_NETDEV_SB_DEV */ #endif /* HAVE_NDO_DFWD_OPS */
/* spinlock to protect the AdminQ wait list */ /* spinlock to protect the AdminQ wait list */
spinlock_t aq_wait_lock; spinlock_t aq_wait_lock;
struct hlist_head aq_wait_list; struct hlist_head aq_wait_list;
wait_queue_head_t aq_wait_queue; wait_queue_head_t aq_wait_queue;
bool fw_emp_reset_disabled;
wait_queue_head_t reset_wait_queue; wait_queue_head_t reset_wait_queue;
u32 hw_csum_rx_error; u32 hw_csum_rx_error;
u32 oicr_err_reg;
u16 oicr_idx; /* Other interrupt cause MSIX vector index */ u16 oicr_idx; /* Other interrupt cause MSIX vector index */
u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */ u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
u16 max_pf_txqs; /* Total Tx queues PF wide */ u16 max_pf_txqs; /* Total Tx queues PF wide */
@ -975,8 +1026,8 @@ struct ice_pf {
unsigned long tx_timeout_last_recovery; unsigned long tx_timeout_last_recovery;
u32 tx_timeout_recovery_level; u32 tx_timeout_recovery_level;
char int_name[ICE_INT_NAME_STR_LEN]; char int_name[ICE_INT_NAME_STR_LEN];
struct ice_peer_obj_int **peers; struct iidc_core_dev_info **cdev_infos;
int peer_idx; int aux_idx;
u32 sw_int_count; u32 sw_int_count;
#ifdef HAVE_TC_SETUP_CLSFLOWER #ifdef HAVE_TC_SETUP_CLSFLOWER
/* count of tc_flower filters specific to channel (aka where filter /* count of tc_flower filters specific to channel (aka where filter
@ -986,6 +1037,9 @@ struct ice_pf {
struct hlist_head tc_flower_fltr_list; struct hlist_head tc_flower_fltr_list;
#endif /* HAVE_TC_SETUP_CLSFLOWER */ #endif /* HAVE_TC_SETUP_CLSFLOWER */
u16 max_qps;
u16 max_adq_qps;
struct ice_dcf dcf; struct ice_dcf dcf;
__le64 nvm_phy_type_lo; /* NVM PHY type low */ __le64 nvm_phy_type_lo; /* NVM PHY type low */
__le64 nvm_phy_type_hi; /* NVM PHY type high */ __le64 nvm_phy_type_hi; /* NVM PHY type high */
@ -1000,13 +1054,19 @@ struct ice_pf {
*/ */
spinlock_t tnl_lock; spinlock_t tnl_lock;
struct list_head tnl_list; struct list_head tnl_list;
#ifdef HAVE_UDP_TUNNEL_NIC_INFO
#ifdef HAVE_UDP_TUNNEL_NIC_SHARED
struct udp_tunnel_nic_shared udp_tunnel_shared;
#endif /* HAVE_UDP_TUNNEL_NIC_SHARED */
struct udp_tunnel_nic_info udp_tunnel_nic;
#endif /* HAVE_UDP_TUNNEL_NIC_INFO */
struct ice_switchdev_info switchdev; struct ice_switchdev_info switchdev;
#define ICE_INVALID_AGG_NODE_ID 0 #define ICE_INVALID_AGG_NODE_ID 0
#define ICE_PF_AGG_NODE_ID_START 1 #define ICE_PF_AGG_NODE_ID_START 1
#define ICE_MAX_PF_AGG_NODES 32 #define ICE_MAX_PF_AGG_NODES 32
struct ice_agg_node pf_agg_node[ICE_MAX_PF_AGG_NODES]; struct ice_agg_node pf_agg_node[ICE_MAX_PF_AGG_NODES];
#ifdef HAVE_NETDEV_SB_DEV #ifdef HAVE_NDO_DFWD_OPS
#define ICE_MACVLAN_AGG_NODE_ID_START (ICE_PF_AGG_NODE_ID_START + \ #define ICE_MACVLAN_AGG_NODE_ID_START (ICE_PF_AGG_NODE_ID_START + \
ICE_MAX_PF_AGG_NODES) ICE_MAX_PF_AGG_NODES)
#define ICE_MAX_MACVLAN_AGG_NODES 32 #define ICE_MAX_MACVLAN_AGG_NODES 32
@ -1015,8 +1075,20 @@ struct ice_pf {
#define ICE_VF_AGG_NODE_ID_START 65 #define ICE_VF_AGG_NODE_ID_START 65
#define ICE_MAX_VF_AGG_NODES 32 #define ICE_MAX_VF_AGG_NODES 32
struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES]; struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
enum ice_cgu_state synce_dpll_state;
u8 synce_ref_pin;
enum ice_cgu_state ptp_dpll_state;
u8 ptp_ref_pin;
s64 ptp_dpll_phase_offset;
u32 phc_recalc;
u8 n_quanta_prof_used;
}; };
extern struct workqueue_struct *ice_wq;
extern struct workqueue_struct *ice_lag_wq;
struct ice_netdev_priv { struct ice_netdev_priv {
struct ice_vsi *vsi; struct ice_vsi *vsi;
#ifdef HAVE_TC_INDIR_BLOCK #ifdef HAVE_TC_INDIR_BLOCK
@ -1038,9 +1110,6 @@ struct ice_netdev_priv {
struct ice_repr *repr; struct ice_repr *repr;
}; };
extern struct ida ice_peer_index_ida;
/** /**
* ice_vector_ch_enabled * ice_vector_ch_enabled
* @qv: pointer to q_vector, can be NULL * @qv: pointer to q_vector, can be NULL
@ -1052,6 +1121,19 @@ static inline bool ice_vector_ch_enabled(struct ice_q_vector *qv)
return !!qv->ch; /* Enable it to run with TC */ return !!qv->ch; /* Enable it to run with TC */
} }
/**
* ice_vector_ind_poller
* @qv: pointer to q_vector
*
* This function returns true if vector is channel enabled and
* independent pollers are enabled on the associated channel.
*/
static inline bool ice_vector_ind_poller(struct ice_q_vector *qv)
{
return (ice_vector_ch_enabled(qv) && qv->ch->qps_per_poller &&
qv->ch->poller_timeout);
}
/** /**
* ice_vector_busypoll_intr * ice_vector_busypoll_intr
* @qv: pointer to q_vector * @qv: pointer to q_vector
@ -1065,45 +1147,6 @@ static inline bool ice_vector_busypoll_intr(struct ice_q_vector *qv)
!(qv->state_flags & ICE_CHNL_IN_BP); !(qv->state_flags & ICE_CHNL_IN_BP);
} }
/**
* ice_vector_ever_in_busypoll
* @qv: pointer to q_vector
*
* This function returns true if vectors current OR previous state
* is BUSY_POLL
*/
static inline bool ice_vector_ever_in_busypoll(struct ice_q_vector *qv)
{
return (qv->state_flags & ICE_CHNL_PREV_IN_BP) ||
(qv->state_flags & ICE_CHNL_IN_BP);
}
/**
* ice_vector_state_curr_prev_intr
* @qv: pointer to q_vector
*
* This function returns true if vectors current AND previous state
* is INTERRUPT
*/
static inline bool ice_vector_state_curr_prev_intr(struct ice_q_vector *qv)
{
return !(qv->state_flags & ICE_CHNL_PREV_IN_BP) &&
!(qv->state_flags & ICE_CHNL_IN_BP);
}
/**
* ice_vector_intr_busypoll
* @qv: pointer to q_vector
*
* This function returns true if vector is transitioning from INTERRUPT
* to BUSY_POLL based on current and previous state of vector
*/
static inline bool ice_vector_intr_busypoll(struct ice_q_vector *qv)
{
return !(qv->state_flags & ICE_CHNL_PREV_IN_BP) &&
(qv->state_flags & ICE_CHNL_IN_BP);
}
/** /**
* ice_adq_trigger_sw_intr * ice_adq_trigger_sw_intr
* @hw: ptr to HW * @hw: ptr to HW
@ -1151,33 +1194,6 @@ ice_sw_intr_cntr(struct ice_q_vector *q_vector, bool napi_codepath)
} }
#endif /* ADQ_PERF_COUNTERS */ #endif /* ADQ_PERF_COUNTERS */
/**
* ice_force_wb - trigger force write-back by setting WB_ON_ITR bit
* @hw: ptr to HW
* @q_vector: pointer to q_vector
*
* This function is used to force write-backs by setting WB_ON_ITR bit
* in DYN_CTLN register. WB_ON_ITR and INTENA are mutually exclusive bits.
* Setting WB_ON_ITR bits means Tx and Rx descriptors are written back based
* on ITR expiration irrespective of INTENA setting
*/
static inline void
ice_force_wb(struct ice_hw *hw, struct ice_q_vector *q_vector)
{
if (q_vector->num_ring_rx || q_vector->num_ring_tx) {
#ifdef ADQ_PERF_COUNTERS
q_vector->ch_stats.num_wb_on_itr_set++;
#endif /* ADQ_PERF_COUNTERS */
wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
ICE_GLINT_DYN_CTL_WB_ON_ITR(0, ICE_RX_ITR));
}
/* needed to avoid triggering WB_ON_ITR again which typically
* happens from ice_set_wb_on_itr function
*/
q_vector->wb_on_itr = true;
}
/** /**
* ice_irq_dynamic_ena - Enable default interrupt generation settings * ice_irq_dynamic_ena - Enable default interrupt generation settings
* @hw: pointer to HW struct * @hw: pointer to HW struct
@ -1215,12 +1231,30 @@ static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev)
return np->vsi->back; return np->vsi->back;
} }
#ifdef HAVE_XDP_SUPPORT /**
static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi) * ice_kobj_to_pf - Retrieve the PF struct associated with a kobj
* @kobj: pointer to the kobject
*
* Returns a pointer to PF or NULL if there is no association.
*/
static inline struct ice_pf *ice_kobj_to_pf(struct kobject *kobj)
{ {
return !!vsi->xdp_prog; if (!kobj || !kobj->parent)
return NULL;
return pci_get_drvdata(to_pci_dev(kobj_to_dev(kobj->parent)));
} }
static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
{
#ifdef HAVE_XDP_SUPPORT
return !!vsi->xdp_prog;
#else
return false;
#endif
}
#ifdef HAVE_XDP_SUPPORT
static inline void ice_set_ring_xdp(struct ice_ring *ring) static inline void ice_set_ring_xdp(struct ice_ring *ring)
{ {
ring->flags |= ICE_TX_FLAGS_RING_XDP; ring->flags |= ICE_TX_FLAGS_RING_XDP;
@ -1229,37 +1263,38 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring)
#endif /* HAVE_XDP_SUPPORT */ #endif /* HAVE_XDP_SUPPORT */
#ifdef HAVE_AF_XDP_ZC_SUPPORT #ifdef HAVE_AF_XDP_ZC_SUPPORT
/** /**
* ice_xsk_umem - get XDP UMEM bound to a ring * ice_xsk_pool - get XSK buffer pool bound to a ring
* @ring: ring to use * @ring: Rx ring to use
* *
* Returns a pointer to xdp_umem structure if there is an UMEM present, * Returns a pointer to xdp_umem structure if there is a buffer pool present,
* NULL otherwise. * NULL otherwise.
*/ */
#ifdef HAVE_NETDEV_BPF_XSK_POOL #ifdef HAVE_NETDEV_BPF_XSK_POOL
static inline struct xsk_buff_pool *ice_xsk_umem(struct ice_ring *ring) static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring)
#else #else
static inline struct xdp_umem *ice_xsk_umem(struct ice_ring *ring) static inline struct xdp_umem *ice_xsk_pool(struct ice_ring *ring)
#endif #endif
{ {
struct ice_vsi *vsi = ring->vsi;
#ifndef HAVE_AF_XDP_NETDEV_UMEM #ifndef HAVE_AF_XDP_NETDEV_UMEM
struct xdp_umem **umems = ring->vsi->xsk_umems; struct xdp_umem **umems = vsi->xsk_umems;
#endif /* !HAVE_AF_XDP_NETDEV_UMEM */ #endif /* !HAVE_AF_XDP_NETDEV_UMEM */
u16 qid = ring->q_index; u16 qid = ring->q_index;
if (ice_ring_is_xdp(ring)) if (ice_ring_is_xdp(ring))
qid -= ring->vsi->num_xdp_txq; qid -= vsi->num_xdp_txq;
#ifndef HAVE_AF_XDP_NETDEV_UMEM #ifndef HAVE_AF_XDP_NETDEV_UMEM
if (qid >= ring->vsi->num_xsk_umems || !umems || !umems[qid] || if (qid >= vsi->num_xsk_umems || !umems || !umems[qid] ||
!ice_is_xdp_ena_vsi(ring->vsi)) !ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
return NULL; return NULL;
return umems[qid]; return umems[qid];
#else #else
if (!ice_is_xdp_ena_vsi(ring->vsi)) if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
return NULL; return NULL;
return xsk_get_pool_from_qid(ring->vsi->netdev, qid); return xsk_get_pool_from_qid(vsi->netdev, qid);
#endif /* !HAVE_AF_XDP_NETDEV_UMEM */ #endif /* !HAVE_AF_XDP_NETDEV_UMEM */
} }
#endif /* HAVE_AF_XDP_ZC_SUPPORT */ #endif /* HAVE_AF_XDP_ZC_SUPPORT */
@ -1272,10 +1307,7 @@ static inline struct xdp_umem *ice_xsk_umem(struct ice_ring *ring)
*/ */
static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf) static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf)
{ {
if (pf->vsi) return pf->vsi[0];
return pf->vsi[0];
return NULL;
} }
/** /**
@ -1325,6 +1357,21 @@ static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf)
return pf->vsi[pf->ctrl_vsi_idx]; return pf->vsi[pf->ctrl_vsi_idx];
} }
/**
* ice_find_vsi - Find the VSI from VSI ID
* @pf: The PF pointer to search in
* @vsi_num: The VSI ID to search for
*/
static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
{
int i;
ice_for_each_vsi(pf, i)
if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num)
return pf->vsi[i];
return NULL;
}
/** /**
* ice_find_first_vsi_by_type - Find and return first VSI of a given type * ice_find_first_vsi_by_type - Find and return first VSI of a given type
* @pf: PF to search for VSI * @pf: PF to search for VSI
@ -1387,19 +1434,6 @@ static inline bool ice_vsi_fd_ena(struct ice_vsi *vsi)
return !!test_bit(ICE_CHNL_FEATURE_FD_ENA, vsi->features); return !!test_bit(ICE_CHNL_FEATURE_FD_ENA, vsi->features);
} }
/**
* ice_vsi_inline_fd_ena
* @vsi: pointer to VSI
*
* This function returns true if VSI is enabled for usage of flow-director
* otherwise returns false. This is controlled thru' ethtool priv-flag
* 'channel-inline-flow-director'
*/
static inline bool ice_vsi_inline_fd_ena(struct ice_vsi *vsi)
{
return !!test_bit(ICE_CHNL_FEATURE_INLINE_FD_ENA, vsi->features);
}
static inline bool ice_vsi_inline_fd_mark_ena(struct ice_vsi *vsi) static inline bool ice_vsi_inline_fd_mark_ena(struct ice_vsi *vsi)
{ {
return !!test_bit(ICE_CHNL_FEATURE_INLINE_FD_MARK_ENA, vsi->features); return !!test_bit(ICE_CHNL_FEATURE_INLINE_FD_MARK_ENA, vsi->features);
@ -1529,12 +1563,12 @@ static inline bool ice_active_vmdqs(struct ice_pf *pf)
return !!ice_find_first_vsi_by_type(pf, ICE_VSI_VMDQ2); return !!ice_find_first_vsi_by_type(pf, ICE_VSI_VMDQ2);
} }
#ifdef HAVE_NETDEV_SB_DEV #ifdef HAVE_NDO_DFWD_OPS
static inline bool ice_is_offloaded_macvlan_ena(struct ice_pf *pf) static inline bool ice_is_offloaded_macvlan_ena(struct ice_pf *pf)
{ {
return test_bit(ICE_FLAG_MACVLAN_ENA, pf->flags); return test_bit(ICE_FLAG_MACVLAN_ENA, pf->flags);
} }
#endif /* HAVE_NETDEV_SB_DEV */ #endif /* HAVE_NDO_DFWD_OPS */
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
void ice_debugfs_pf_init(struct ice_pf *pf); void ice_debugfs_pf_init(struct ice_pf *pf);
@ -1563,12 +1597,14 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx);
void ice_update_pf_stats(struct ice_pf *pf); void ice_update_pf_stats(struct ice_pf *pf);
void ice_update_vsi_stats(struct ice_vsi *vsi); void ice_update_vsi_stats(struct ice_vsi *vsi);
int ice_up(struct ice_vsi *vsi); int ice_up(struct ice_vsi *vsi);
void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes);
int ice_down(struct ice_vsi *vsi); int ice_down(struct ice_vsi *vsi);
int ice_down_up(struct ice_vsi *vsi);
int ice_vsi_cfg(struct ice_vsi *vsi); int ice_vsi_cfg(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi); struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
#ifdef HAVE_NETDEV_SB_DEV #ifdef HAVE_NDO_DFWD_OPS
int ice_vsi_cfg_netdev_tc0(struct ice_vsi *vsi); int ice_vsi_cfg_netdev_tc0(struct ice_vsi *vsi);
#endif /* HAVE_NETDEV_SB_DEV */ #endif /* HAVE_NDO_DFWD_OPS */
#ifdef HAVE_XDP_SUPPORT #ifdef HAVE_XDP_SUPPORT
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog); int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
int ice_destroy_xdp_rings(struct ice_vsi *vsi); int ice_destroy_xdp_rings(struct ice_vsi *vsi);
@ -1590,33 +1626,71 @@ int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset); int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup); void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
#if IS_ENABLED(CONFIG_MFD_CORE) int ice_plug_aux_dev(struct iidc_core_dev_info *cdev_info, const char *name);
int ice_init_peer_devices(struct ice_pf *pf); void ice_unplug_aux_dev(struct iidc_core_dev_info *cdev_info);
int ice_plug_aux_devs(struct ice_pf *pf);
void ice_unplug_aux_devs(struct ice_pf *pf);
int ice_init_aux_devices(struct ice_pf *pf);
int int
ice_for_each_peer(struct ice_pf *pf, void *data, ice_for_each_aux(struct ice_pf *pf, void *data,
int (*fn)(struct ice_peer_obj_int *, void *)); int (*fn)(struct iidc_core_dev_info *, void *));
#ifdef CONFIG_PM #ifdef CONFIG_PM
void ice_peer_refresh_msix(struct ice_pf *pf); void ice_cdev_info_refresh_msix(struct ice_pf *pf);
#endif /* CONFIG_PM */ #endif /* CONFIG_PM */
#else /* !CONFIG_MFD_CORE */ #ifdef HAVE_NETDEV_UPPER_INFO
static inline int ice_init_peer_devices(struct ice_pf *pf) { return 0; } /**
* ice_set_sriov_cap - enable SRIOV in PF flags
static inline int * @pf: PF struct
ice_for_each_peer(struct ice_pf *pf, void *data, */
int (*fn)(struct ice_peer_obj_int *, void *)) static inline void ice_set_sriov_cap(struct ice_pf *pf)
{ {
return 0; if (pf->hw.func_caps.common_cap.sr_iov_1_1)
set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
}
/**
* ice_clear_sriov_cap - disable SRIOV in PF flags
* @pf: PF struct
*/
static inline void ice_clear_sriov_cap(struct ice_pf *pf)
{
clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
}
/**
* ice_set_rdma_cap - enable RDMA in PF flags
* @pf: PF struct
*/
static inline void ice_set_rdma_cap(struct ice_pf *pf)
{
if (pf->hw.func_caps.common_cap.iwarp && pf->num_rdma_msix) {
set_bit(ICE_FLAG_IWARP_ENA, pf->flags);
}
}
/**
* ice_clear_rdma_cap - disable RDMA in PF flags
* @pf: PF struct
*/
static inline void ice_clear_rdma_cap(struct ice_pf *pf)
{
clear_bit(ICE_FLAG_IWARP_ENA, pf->flags);
}
#endif /* HAVE_NETDEV_UPPER_INFO */
/** ice_chk_rdma_cap - check the status of RDMA if PF flags
* @pf: PF struct
*/
static inline bool ice_chk_rdma_cap(struct ice_pf *pf)
{
return test_bit(ICE_FLAG_IWARP_ENA, pf->flags);
} }
#ifdef CONFIG_PM
static inline void ice_peer_refresh_msix(struct ice_pf *pf) { }
#endif /* CONFIG_PM */
#endif /* !CONFIG_MFD_CORE */
const char *ice_stat_str(enum ice_status stat_err);
const char *ice_aq_str(enum ice_aq_err aq_err); const char *ice_aq_str(enum ice_aq_err aq_err);
bool ice_is_wol_supported(struct ice_hw *hw); bool ice_is_wol_supported(struct ice_hw *hw);
int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
struct ice_rq_event_info *event); struct ice_rq_event_info *event);
void ice_fdir_del_all_fltrs(struct ice_vsi *vsi);
int int
ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
bool is_tun); bool is_tun);
@ -1648,6 +1722,7 @@ int
ice_ntuple_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input, ice_ntuple_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input,
int fltr_idx); int fltr_idx);
void ice_update_ring_dest_vsi(struct ice_vsi *vsi, u16 *dest_vsi, u32 *ring); void ice_update_ring_dest_vsi(struct ice_vsi *vsi, u16 *dest_vsi, u32 *ring);
void ice_ch_vsi_update_ring_vecs(struct ice_vsi *vsi);
int ice_open(struct net_device *netdev); int ice_open(struct net_device *netdev);
int ice_open_internal(struct net_device *netdev); int ice_open_internal(struct net_device *netdev);
int ice_stop(struct net_device *netdev); int ice_stop(struct net_device *netdev);

View File

@ -12,7 +12,7 @@
* *
* Allocate ACL table (indirect 0x0C10) * Allocate ACL table (indirect 0x0C10)
*/ */
enum ice_status int
ice_aq_alloc_acl_tbl(struct ice_hw *hw, struct ice_acl_alloc_tbl *tbl, ice_aq_alloc_acl_tbl(struct ice_hw *hw, struct ice_acl_alloc_tbl *tbl,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
{ {
@ -20,10 +20,10 @@ ice_aq_alloc_acl_tbl(struct ice_hw *hw, struct ice_acl_alloc_tbl *tbl,
struct ice_aq_desc desc; struct ice_aq_desc desc;
if (!tbl->act_pairs_per_entry) if (!tbl->act_pairs_per_entry)
return ICE_ERR_PARAM; return -EINVAL;
if (tbl->act_pairs_per_entry > ICE_AQC_MAX_ACTION_MEMORIES) if (tbl->act_pairs_per_entry > ICE_AQC_MAX_ACTION_MEMORIES)
return ICE_ERR_MAX_LIMIT; return -ENOSPC;
/* If this is concurrent table, then buffer shall be valid and /* If this is concurrent table, then buffer shall be valid and
* contain DependentAllocIDs, 'num_dependent_alloc_ids' should be valid * contain DependentAllocIDs, 'num_dependent_alloc_ids' should be valid
@ -31,10 +31,10 @@ ice_aq_alloc_acl_tbl(struct ice_hw *hw, struct ice_acl_alloc_tbl *tbl,
*/ */
if (tbl->concurr) { if (tbl->concurr) {
if (!tbl->num_dependent_alloc_ids) if (!tbl->num_dependent_alloc_ids)
return ICE_ERR_PARAM; return -EINVAL;
if (tbl->num_dependent_alloc_ids > if (tbl->num_dependent_alloc_ids >
ICE_AQC_MAX_CONCURRENT_ACL_TBL) ICE_AQC_MAX_CONCURRENT_ACL_TBL)
return ICE_ERR_INVAL_SIZE; return -EINVAL;
} }
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_alloc_acl_tbl); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_alloc_acl_tbl);
@ -63,7 +63,7 @@ ice_aq_alloc_acl_tbl(struct ice_hw *hw, struct ice_acl_alloc_tbl *tbl,
* format is 'struct ice_aqc_acl_generic', pass ptr to that struct * format is 'struct ice_aqc_acl_generic', pass ptr to that struct
* as 'buf' and its size as 'buf_size' * as 'buf' and its size as 'buf_size'
*/ */
enum ice_status int
ice_aq_dealloc_acl_tbl(struct ice_hw *hw, u16 alloc_id, ice_aq_dealloc_acl_tbl(struct ice_hw *hw, u16 alloc_id,
struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd) struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd)
{ {
@ -77,7 +77,7 @@ ice_aq_dealloc_acl_tbl(struct ice_hw *hw, u16 alloc_id,
return ice_aq_send_cmd(hw, &desc, buf, sizeof(*buf), cd); return ice_aq_send_cmd(hw, &desc, buf, sizeof(*buf), cd);
} }
static enum ice_status static int
ice_aq_acl_entry(struct ice_hw *hw, u16 opcode, u8 tcam_idx, u16 entry_idx, ice_aq_acl_entry(struct ice_hw *hw, u16 opcode, u8 tcam_idx, u16 entry_idx,
struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd) struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd)
{ {
@ -106,7 +106,7 @@ ice_aq_acl_entry(struct ice_hw *hw, u16 opcode, u8 tcam_idx, u16 entry_idx,
* *
* Program ACL entry (direct 0x0C20) * Program ACL entry (direct 0x0C20)
*/ */
enum ice_status int
ice_aq_program_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx, ice_aq_program_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx,
struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd) struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd)
{ {
@ -127,7 +127,7 @@ ice_aq_program_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx,
* NOTE: Caller of this API to parse 'buf' appropriately since it contains * NOTE: Caller of this API to parse 'buf' appropriately since it contains
* response (key and key invert) * response (key and key invert)
*/ */
enum ice_status int
ice_aq_query_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx, ice_aq_query_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx,
struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd) struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd)
{ {
@ -136,7 +136,7 @@ ice_aq_query_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx,
} }
/* Helper function to alloc/dealloc ACL action pair */ /* Helper function to alloc/dealloc ACL action pair */
static enum ice_status static int
ice_aq_actpair_a_d(struct ice_hw *hw, u16 opcode, u16 alloc_id, ice_aq_actpair_a_d(struct ice_hw *hw, u16 opcode, u16 alloc_id,
struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd) struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd)
{ {
@ -162,7 +162,7 @@ ice_aq_actpair_a_d(struct ice_hw *hw, u16 opcode, u16 alloc_id,
* This command doesn't need and doesn't have its own command buffer * This command doesn't need and doesn't have its own command buffer
* but for response format is as specified in 'struct ice_aqc_acl_generic' * but for response format is as specified in 'struct ice_aqc_acl_generic'
*/ */
enum ice_status int
ice_aq_alloc_actpair(struct ice_hw *hw, u16 alloc_id, ice_aq_alloc_actpair(struct ice_hw *hw, u16 alloc_id,
struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd) struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd)
{ {
@ -179,7 +179,7 @@ ice_aq_alloc_actpair(struct ice_hw *hw, u16 alloc_id,
* *
* Deallocate ACL actionpair (direct 0x0C13) * Deallocate ACL actionpair (direct 0x0C13)
*/ */
enum ice_status int
ice_aq_dealloc_actpair(struct ice_hw *hw, u16 alloc_id, ice_aq_dealloc_actpair(struct ice_hw *hw, u16 alloc_id,
struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd) struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd)
{ {
@ -188,7 +188,7 @@ ice_aq_dealloc_actpair(struct ice_hw *hw, u16 alloc_id,
} }
/* Helper function to program/query ACL action pair */ /* Helper function to program/query ACL action pair */
static enum ice_status static int
ice_aq_actpair_p_q(struct ice_hw *hw, u16 opcode, u8 act_mem_idx, ice_aq_actpair_p_q(struct ice_hw *hw, u16 opcode, u8 act_mem_idx,
u16 act_entry_idx, struct ice_aqc_actpair *buf, u16 act_entry_idx, struct ice_aqc_actpair *buf,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
@ -218,7 +218,7 @@ ice_aq_actpair_p_q(struct ice_hw *hw, u16 opcode, u8 act_mem_idx,
* *
* Program action entries (indirect 0x0C1C) * Program action entries (indirect 0x0C1C)
*/ */
enum ice_status int
ice_aq_program_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx, ice_aq_program_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx,
struct ice_aqc_actpair *buf, struct ice_sq_cd *cd) struct ice_aqc_actpair *buf, struct ice_sq_cd *cd)
{ {
@ -236,7 +236,7 @@ ice_aq_program_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx,
* *
* Query ACL actionpair (indirect 0x0C25) * Query ACL actionpair (indirect 0x0C25)
*/ */
enum ice_status int
ice_aq_query_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx, ice_aq_query_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx,
struct ice_aqc_actpair *buf, struct ice_sq_cd *cd) struct ice_aqc_actpair *buf, struct ice_sq_cd *cd)
{ {
@ -252,7 +252,7 @@ ice_aq_query_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx,
* De-allocate ACL resources (direct 0x0C1A). Used by SW to release all the * De-allocate ACL resources (direct 0x0C1A). Used by SW to release all the
* resources allocated for it using a single command * resources allocated for it using a single command
*/ */
enum ice_status ice_aq_dealloc_acl_res(struct ice_hw *hw, struct ice_sq_cd *cd) int ice_aq_dealloc_acl_res(struct ice_hw *hw, struct ice_sq_cd *cd)
{ {
struct ice_aq_desc desc; struct ice_aq_desc desc;
@ -271,7 +271,7 @@ enum ice_status ice_aq_dealloc_acl_res(struct ice_hw *hw, struct ice_sq_cd *cd)
* *
* This function sends ACL profile commands * This function sends ACL profile commands
*/ */
static enum ice_status static int
ice_acl_prof_aq_send(struct ice_hw *hw, u16 opc, u8 prof_id, ice_acl_prof_aq_send(struct ice_hw *hw, u16 opc, u8 prof_id,
struct ice_aqc_acl_prof_generic_frmt *buf, struct ice_aqc_acl_prof_generic_frmt *buf,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
@ -295,7 +295,7 @@ ice_acl_prof_aq_send(struct ice_hw *hw, u16 opc, u8 prof_id,
* *
* Program ACL profile extraction (indirect 0x0C1D) * Program ACL profile extraction (indirect 0x0C1D)
*/ */
enum ice_status int
ice_prgm_acl_prof_xtrct(struct ice_hw *hw, u8 prof_id, ice_prgm_acl_prof_xtrct(struct ice_hw *hw, u8 prof_id,
struct ice_aqc_acl_prof_generic_frmt *buf, struct ice_aqc_acl_prof_generic_frmt *buf,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
@ -313,7 +313,7 @@ ice_prgm_acl_prof_xtrct(struct ice_hw *hw, u8 prof_id,
* *
* Query ACL profile (indirect 0x0C21) * Query ACL profile (indirect 0x0C21)
*/ */
enum ice_status int
ice_query_acl_prof(struct ice_hw *hw, u8 prof_id, ice_query_acl_prof(struct ice_hw *hw, u8 prof_id,
struct ice_aqc_acl_prof_generic_frmt *buf, struct ice_aqc_acl_prof_generic_frmt *buf,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
@ -329,12 +329,12 @@ ice_query_acl_prof(struct ice_hw *hw, u8 prof_id,
* This function checks the counter bank range for counter type and returns * This function checks the counter bank range for counter type and returns
* success or failure. * success or failure.
*/ */
static enum ice_status ice_aq_acl_cntrs_chk_params(struct ice_acl_cntrs *cntrs) static int ice_aq_acl_cntrs_chk_params(struct ice_acl_cntrs *cntrs)
{ {
enum ice_status status = 0; int status = 0;
if (!cntrs || !cntrs->amount) if (!cntrs || !cntrs->amount)
return ICE_ERR_PARAM; return -EINVAL;
switch (cntrs->type) { switch (cntrs->type) {
case ICE_AQC_ACL_CNT_TYPE_SINGLE: case ICE_AQC_ACL_CNT_TYPE_SINGLE:
@ -343,18 +343,18 @@ static enum ice_status ice_aq_acl_cntrs_chk_params(struct ice_acl_cntrs *cntrs)
* shall be 0-3. * shall be 0-3.
*/ */
if (cntrs->bank > ICE_AQC_ACL_MAX_CNT_SINGLE) if (cntrs->bank > ICE_AQC_ACL_MAX_CNT_SINGLE)
status = ICE_ERR_OUT_OF_RANGE; status = -EIO;
break; break;
case ICE_AQC_ACL_CNT_TYPE_DUAL: case ICE_AQC_ACL_CNT_TYPE_DUAL:
/* Pair counter type - counts number of bytes and packets /* Pair counter type - counts number of bytes and packets
* The valid values for byte/packet counter duals shall be 0-1 * The valid values for byte/packet counter duals shall be 0-1
*/ */
if (cntrs->bank > ICE_AQC_ACL_MAX_CNT_DUAL) if (cntrs->bank > ICE_AQC_ACL_MAX_CNT_DUAL)
status = ICE_ERR_OUT_OF_RANGE; status = -EIO;
break; break;
default: default:
/* Unspecified counter type - Invalid or error */ /* Unspecified counter type - Invalid or error */
status = ICE_ERR_PARAM; status = -EINVAL;
} }
return status; return status;
@ -372,14 +372,14 @@ static enum ice_status ice_aq_acl_cntrs_chk_params(struct ice_acl_cntrs *cntrs)
* unsuccessful if returned counter value is invalid. In this case it returns * unsuccessful if returned counter value is invalid. In this case it returns
* an error otherwise success. * an error otherwise success.
*/ */
enum ice_status int
ice_aq_alloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs, ice_aq_alloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
{ {
struct ice_aqc_acl_alloc_counters *cmd; struct ice_aqc_acl_alloc_counters *cmd;
u16 first_cntr, last_cntr; u16 first_cntr, last_cntr;
struct ice_aq_desc desc; struct ice_aq_desc desc;
enum ice_status status; int status;
/* check for invalid params */ /* check for invalid params */
status = ice_aq_acl_cntrs_chk_params(cntrs); status = ice_aq_acl_cntrs_chk_params(cntrs);
@ -396,7 +396,7 @@ ice_aq_alloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs,
last_cntr = le16_to_cpu(cmd->ops.resp.last_counter); last_cntr = le16_to_cpu(cmd->ops.resp.last_counter);
if (first_cntr == ICE_AQC_ACL_ALLOC_CNT_INVAL || if (first_cntr == ICE_AQC_ACL_ALLOC_CNT_INVAL ||
last_cntr == ICE_AQC_ACL_ALLOC_CNT_INVAL) last_cntr == ICE_AQC_ACL_ALLOC_CNT_INVAL)
return ICE_ERR_OUT_OF_RANGE; return -EIO;
cntrs->first_cntr = first_cntr; cntrs->first_cntr = first_cntr;
cntrs->last_cntr = last_cntr; cntrs->last_cntr = last_cntr;
} }
@ -411,13 +411,13 @@ ice_aq_alloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs,
* *
* De-allocate ACL counters (direct 0x0C17) * De-allocate ACL counters (direct 0x0C17)
*/ */
enum ice_status int
ice_aq_dealloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs, ice_aq_dealloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
{ {
struct ice_aqc_acl_dealloc_counters *cmd; struct ice_aqc_acl_dealloc_counters *cmd;
struct ice_aq_desc desc; struct ice_aq_desc desc;
enum ice_status status; int status;
/* check for invalid params */ /* check for invalid params */
status = ice_aq_acl_cntrs_chk_params(cntrs); status = ice_aq_acl_cntrs_chk_params(cntrs);
@ -433,7 +433,6 @@ ice_aq_dealloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs,
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
} }
/** /**
* ice_prog_acl_prof_ranges - program ACL profile ranges * ice_prog_acl_prof_ranges - program ACL profile ranges
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
@ -443,7 +442,7 @@ ice_aq_dealloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs,
* *
* Program ACL profile ranges (indirect 0x0C1E) * Program ACL profile ranges (indirect 0x0C1E)
*/ */
enum ice_status int
ice_prog_acl_prof_ranges(struct ice_hw *hw, u8 prof_id, ice_prog_acl_prof_ranges(struct ice_hw *hw, u8 prof_id,
struct ice_aqc_acl_profile_ranges *buf, struct ice_aqc_acl_profile_ranges *buf,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
@ -466,7 +465,7 @@ ice_prog_acl_prof_ranges(struct ice_hw *hw, u8 prof_id,
* *
* Query ACL profile ranges (indirect 0x0C22) * Query ACL profile ranges (indirect 0x0C22)
*/ */
enum ice_status int
ice_query_acl_prof_ranges(struct ice_hw *hw, u8 prof_id, ice_query_acl_prof_ranges(struct ice_hw *hw, u8 prof_id,
struct ice_aqc_acl_profile_ranges *buf, struct ice_aqc_acl_profile_ranges *buf,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
@ -488,16 +487,16 @@ ice_query_acl_prof_ranges(struct ice_hw *hw, u8 prof_id,
* *
* Allocate ACL scenario (indirect 0x0C14) * Allocate ACL scenario (indirect 0x0C14)
*/ */
enum ice_status int
ice_aq_alloc_acl_scen(struct ice_hw *hw, u16 *scen_id, ice_aq_alloc_acl_scen(struct ice_hw *hw, u16 *scen_id,
struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd) struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd)
{ {
struct ice_aqc_acl_alloc_scen *cmd; struct ice_aqc_acl_alloc_scen *cmd;
struct ice_aq_desc desc; struct ice_aq_desc desc;
enum ice_status status; int status;
if (!scen_id) if (!scen_id)
return ICE_ERR_PARAM; return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_alloc_acl_scen); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_alloc_acl_scen);
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
@ -518,7 +517,7 @@ ice_aq_alloc_acl_scen(struct ice_hw *hw, u16 *scen_id,
* *
* Deallocate ACL scenario (direct 0x0C15) * Deallocate ACL scenario (direct 0x0C15)
*/ */
enum ice_status int
ice_aq_dealloc_acl_scen(struct ice_hw *hw, u16 scen_id, struct ice_sq_cd *cd) ice_aq_dealloc_acl_scen(struct ice_hw *hw, u16 scen_id, struct ice_sq_cd *cd)
{ {
struct ice_aqc_acl_dealloc_scen *cmd; struct ice_aqc_acl_dealloc_scen *cmd;
@ -541,7 +540,7 @@ ice_aq_dealloc_acl_scen(struct ice_hw *hw, u16 scen_id, struct ice_sq_cd *cd)
* *
* Calls update or query ACL scenario * Calls update or query ACL scenario
*/ */
static enum ice_status static int
ice_aq_update_query_scen(struct ice_hw *hw, u16 opcode, u16 scen_id, ice_aq_update_query_scen(struct ice_hw *hw, u16 opcode, u16 scen_id,
struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd) struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd)
{ {
@ -566,7 +565,7 @@ ice_aq_update_query_scen(struct ice_hw *hw, u16 opcode, u16 scen_id,
* *
* Update ACL scenario (indirect 0x0C1B) * Update ACL scenario (indirect 0x0C1B)
*/ */
enum ice_status int
ice_aq_update_acl_scen(struct ice_hw *hw, u16 scen_id, ice_aq_update_acl_scen(struct ice_hw *hw, u16 scen_id,
struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd) struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd)
{ {
@ -583,7 +582,7 @@ ice_aq_update_acl_scen(struct ice_hw *hw, u16 scen_id,
* *
* Query ACL scenario (indirect 0x0C23) * Query ACL scenario (indirect 0x0C23)
*/ */
enum ice_status int
ice_aq_query_acl_scen(struct ice_hw *hw, u16 scen_id, ice_aq_query_acl_scen(struct ice_hw *hw, u16 scen_id,
struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd) struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd)
{ {

View File

@ -125,78 +125,78 @@ struct ice_acl_cntrs {
u16 last_cntr; u16 last_cntr;
}; };
enum ice_status int
ice_acl_create_tbl(struct ice_hw *hw, struct ice_acl_tbl_params *params); ice_acl_create_tbl(struct ice_hw *hw, struct ice_acl_tbl_params *params);
enum ice_status ice_acl_destroy_tbl(struct ice_hw *hw); int ice_acl_destroy_tbl(struct ice_hw *hw);
enum ice_status int
ice_acl_create_scen(struct ice_hw *hw, u16 match_width, u16 num_entries, ice_acl_create_scen(struct ice_hw *hw, u16 match_width, u16 num_entries,
u16 *scen_id); u16 *scen_id);
enum ice_status int
ice_aq_alloc_acl_tbl(struct ice_hw *hw, struct ice_acl_alloc_tbl *tbl, ice_aq_alloc_acl_tbl(struct ice_hw *hw, struct ice_acl_alloc_tbl *tbl,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status int
ice_aq_dealloc_acl_tbl(struct ice_hw *hw, u16 alloc_id, ice_aq_dealloc_acl_tbl(struct ice_hw *hw, u16 alloc_id,
struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd); struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd);
enum ice_status int
ice_aq_program_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx, ice_aq_program_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx,
struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd); struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd);
enum ice_status int
ice_aq_query_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx, ice_aq_query_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx,
struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd); struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd);
enum ice_status int
ice_aq_alloc_actpair(struct ice_hw *hw, u16 alloc_id, ice_aq_alloc_actpair(struct ice_hw *hw, u16 alloc_id,
struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd); struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd);
enum ice_status int
ice_aq_dealloc_actpair(struct ice_hw *hw, u16 alloc_id, ice_aq_dealloc_actpair(struct ice_hw *hw, u16 alloc_id,
struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd); struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd);
enum ice_status int
ice_aq_program_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx, ice_aq_program_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx,
struct ice_aqc_actpair *buf, struct ice_sq_cd *cd); struct ice_aqc_actpair *buf, struct ice_sq_cd *cd);
enum ice_status int
ice_aq_query_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx, ice_aq_query_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx,
struct ice_aqc_actpair *buf, struct ice_sq_cd *cd); struct ice_aqc_actpair *buf, struct ice_sq_cd *cd);
enum ice_status ice_aq_dealloc_acl_res(struct ice_hw *hw, struct ice_sq_cd *cd); int ice_aq_dealloc_acl_res(struct ice_hw *hw, struct ice_sq_cd *cd);
enum ice_status int
ice_prgm_acl_prof_xtrct(struct ice_hw *hw, u8 prof_id, ice_prgm_acl_prof_xtrct(struct ice_hw *hw, u8 prof_id,
struct ice_aqc_acl_prof_generic_frmt *buf, struct ice_aqc_acl_prof_generic_frmt *buf,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status int
ice_query_acl_prof(struct ice_hw *hw, u8 prof_id, ice_query_acl_prof(struct ice_hw *hw, u8 prof_id,
struct ice_aqc_acl_prof_generic_frmt *buf, struct ice_aqc_acl_prof_generic_frmt *buf,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status int
ice_aq_alloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs, ice_aq_alloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status int
ice_aq_dealloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs, ice_aq_dealloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status int
ice_prog_acl_prof_ranges(struct ice_hw *hw, u8 prof_id, ice_prog_acl_prof_ranges(struct ice_hw *hw, u8 prof_id,
struct ice_aqc_acl_profile_ranges *buf, struct ice_aqc_acl_profile_ranges *buf,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status int
ice_query_acl_prof_ranges(struct ice_hw *hw, u8 prof_id, ice_query_acl_prof_ranges(struct ice_hw *hw, u8 prof_id,
struct ice_aqc_acl_profile_ranges *buf, struct ice_aqc_acl_profile_ranges *buf,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status int
ice_aq_alloc_acl_scen(struct ice_hw *hw, u16 *scen_id, ice_aq_alloc_acl_scen(struct ice_hw *hw, u16 *scen_id,
struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd); struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd);
enum ice_status int
ice_aq_dealloc_acl_scen(struct ice_hw *hw, u16 scen_id, struct ice_sq_cd *cd); ice_aq_dealloc_acl_scen(struct ice_hw *hw, u16 scen_id, struct ice_sq_cd *cd);
enum ice_status int
ice_aq_update_acl_scen(struct ice_hw *hw, u16 scen_id, ice_aq_update_acl_scen(struct ice_hw *hw, u16 scen_id,
struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd); struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd);
enum ice_status int
ice_aq_query_acl_scen(struct ice_hw *hw, u16 scen_id, ice_aq_query_acl_scen(struct ice_hw *hw, u16 scen_id,
struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd); struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd);
enum ice_status int
ice_acl_add_entry(struct ice_hw *hw, struct ice_acl_scen *scen, ice_acl_add_entry(struct ice_hw *hw, struct ice_acl_scen *scen,
enum ice_acl_entry_prio prio, u8 *keys, u8 *inverts, enum ice_acl_entry_prio prio, u8 *keys, u8 *inverts,
struct ice_acl_act_entry *acts, u8 acts_cnt, u16 *entry_idx); struct ice_acl_act_entry *acts, u8 acts_cnt, u16 *entry_idx);
enum ice_status int
ice_acl_prog_act(struct ice_hw *hw, struct ice_acl_scen *scen, ice_acl_prog_act(struct ice_hw *hw, struct ice_acl_scen *scen,
struct ice_acl_act_entry *acts, u8 acts_cnt, u16 entry_idx); struct ice_acl_act_entry *acts, u8 acts_cnt, u16 entry_idx);
enum ice_status int
ice_acl_rem_entry(struct ice_hw *hw, struct ice_acl_scen *scen, u16 entry_idx); ice_acl_rem_entry(struct ice_hw *hw, struct ice_acl_scen *scen, u16 entry_idx);
bool ice_is_acl_empty(struct ice_hw *hw); bool ice_is_acl_empty(struct ice_hw *hw);
#endif /* _ICE_ACL_H_ */ #endif /* _ICE_ACL_H_ */

View File

@ -4,12 +4,11 @@
#include "ice_acl.h" #include "ice_acl.h"
#include "ice_flow.h" #include "ice_flow.h"
/* Determine the TCAM index of entry 'e' within the ACL table */ /* Determine the TCAM index of entry 'e' within the ACL table */
#define ICE_ACL_TBL_TCAM_IDX(e) ((e) / ICE_AQC_ACL_TCAM_DEPTH) #define ICE_ACL_TBL_TCAM_IDX(e) ((u8)((e) / ICE_AQC_ACL_TCAM_DEPTH))
/* Determine the entry index within the TCAM */ /* Determine the entry index within the TCAM */
#define ICE_ACL_TBL_TCAM_ENTRY_IDX(e) ((e) % ICE_AQC_ACL_TCAM_DEPTH) #define ICE_ACL_TBL_TCAM_ENTRY_IDX(e) ((u16)((e) % ICE_AQC_ACL_TCAM_DEPTH))
#define ICE_ACL_SCEN_ENTRY_INVAL 0xFFFF #define ICE_ACL_SCEN_ENTRY_INVAL 0xFFFF
@ -74,14 +73,14 @@ ice_acl_scen_assign_entry_idx(struct ice_acl_scen *scen,
* *
* To mark an entry available in scenario * To mark an entry available in scenario
*/ */
static enum ice_status static int
ice_acl_scen_free_entry_idx(struct ice_acl_scen *scen, u16 idx) ice_acl_scen_free_entry_idx(struct ice_acl_scen *scen, u16 idx)
{ {
if (idx >= scen->num_entry) if (idx >= scen->num_entry)
return ICE_ERR_MAX_LIMIT; return -ENOSPC;
if (!test_and_clear_bit(idx, scen->entry_bitmap)) if (!test_and_clear_bit(idx, scen->entry_bitmap))
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
return 0; return 0;
} }
@ -141,18 +140,18 @@ static u16 ice_acl_tbl_calc_end_idx(u16 start, u16 num_entries, u16 width)
* *
* Initialize the ACL table by invalidating TCAM entries and action pairs. * Initialize the ACL table by invalidating TCAM entries and action pairs.
*/ */
static enum ice_status ice_acl_init_tbl(struct ice_hw *hw) static int ice_acl_init_tbl(struct ice_hw *hw)
{ {
struct ice_aqc_actpair act_buf; struct ice_aqc_actpair act_buf;
struct ice_aqc_acl_data buf; struct ice_aqc_acl_data buf;
enum ice_status status = 0;
struct ice_acl_tbl *tbl; struct ice_acl_tbl *tbl;
int status = 0;
u8 tcam_idx, i; u8 tcam_idx, i;
u16 idx; u16 idx;
tbl = hw->acl_tbl; tbl = hw->acl_tbl;
if (!tbl) if (!tbl)
return ICE_ERR_CFG; return -EIO;
memset(&buf, 0, sizeof(buf)); memset(&buf, 0, sizeof(buf));
memset(&act_buf, 0, sizeof(act_buf)); memset(&act_buf, 0, sizeof(act_buf));
@ -251,10 +250,8 @@ ice_acl_assign_act_mems_to_tcam(struct ice_acl_tbl *tbl, u8 cur_tcam,
*/ */
static void ice_acl_divide_act_mems_to_tcams(struct ice_acl_tbl *tbl) static void ice_acl_divide_act_mems_to_tcams(struct ice_acl_tbl *tbl)
{ {
u16 num_cscd, stack_level, stack_idx, min_act_mem; u16 num_cscd, stack_level, stack_idx, max_idx_to_get_extra;
u8 tcam_idx = tbl->first_tcam; u8 min_act_mem, tcam_idx = tbl->first_tcam, mem_idx = 0;
u16 max_idx_to_get_extra;
u8 mem_idx = 0;
/* Determine number of stacked TCAMs */ /* Determine number of stacked TCAMs */
stack_level = DIV_ROUND_UP(tbl->info.depth, ICE_AQC_ACL_TCAM_DEPTH); stack_level = DIV_ROUND_UP(tbl->info.depth, ICE_AQC_ACL_TCAM_DEPTH);
@ -303,20 +300,20 @@ static void ice_acl_divide_act_mems_to_tcams(struct ice_acl_tbl *tbl)
* values for the size of the table, but this will need to grow as more flow * values for the size of the table, but this will need to grow as more flow
* entries are added by the user level. * entries are added by the user level.
*/ */
enum ice_status int
ice_acl_create_tbl(struct ice_hw *hw, struct ice_acl_tbl_params *params) ice_acl_create_tbl(struct ice_hw *hw, struct ice_acl_tbl_params *params)
{ {
u16 width, depth, first_e, last_e, i; u16 width, depth, first_e, last_e, i;
struct ice_aqc_acl_generic *resp_buf; struct ice_aqc_acl_generic *resp_buf;
struct ice_acl_alloc_tbl tbl_alloc; struct ice_acl_alloc_tbl tbl_alloc;
struct ice_acl_tbl *tbl; struct ice_acl_tbl *tbl;
enum ice_status status; int status;
if (hw->acl_tbl) if (hw->acl_tbl)
return ICE_ERR_ALREADY_EXISTS; return -EEXIST;
if (!params) if (!params)
return ICE_ERR_PARAM; return -EINVAL;
/* round up the width to the next TCAM width boundary. */ /* round up the width to the next TCAM width boundary. */
width = roundup(params->width, (u16)ICE_AQC_ACL_KEY_WIDTH_BYTES); width = roundup(params->width, (u16)ICE_AQC_ACL_KEY_WIDTH_BYTES);
@ -324,7 +321,8 @@ ice_acl_create_tbl(struct ice_hw *hw, struct ice_acl_tbl_params *params)
depth = ALIGN(params->depth, ICE_ACL_ENTRY_ALLOC_UNIT); depth = ALIGN(params->depth, ICE_ACL_ENTRY_ALLOC_UNIT);
if (params->entry_act_pairs < width / ICE_AQC_ACL_KEY_WIDTH_BYTES) { if (params->entry_act_pairs < width / ICE_AQC_ACL_KEY_WIDTH_BYTES) {
params->entry_act_pairs = width / ICE_AQC_ACL_KEY_WIDTH_BYTES; params->entry_act_pairs =
(u8)(width / ICE_AQC_ACL_KEY_WIDTH_BYTES);
if (params->entry_act_pairs > ICE_AQC_TBL_MAX_ACTION_PAIRS) if (params->entry_act_pairs > ICE_AQC_TBL_MAX_ACTION_PAIRS)
params->entry_act_pairs = ICE_AQC_TBL_MAX_ACTION_PAIRS; params->entry_act_pairs = ICE_AQC_TBL_MAX_ACTION_PAIRS;
@ -333,7 +331,7 @@ ice_acl_create_tbl(struct ice_hw *hw, struct ice_acl_tbl_params *params)
/* Validate that width*depth will not exceed the TCAM limit */ /* Validate that width*depth will not exceed the TCAM limit */
if ((DIV_ROUND_UP(depth, ICE_AQC_ACL_TCAM_DEPTH) * if ((DIV_ROUND_UP(depth, ICE_AQC_ACL_TCAM_DEPTH) *
(width / ICE_AQC_ACL_KEY_WIDTH_BYTES)) > ICE_AQC_ACL_SLICES) (width / ICE_AQC_ACL_KEY_WIDTH_BYTES)) > ICE_AQC_ACL_SLICES)
return ICE_ERR_MAX_LIMIT; return -ENOSPC;
memset(&tbl_alloc, 0, sizeof(tbl_alloc)); memset(&tbl_alloc, 0, sizeof(tbl_alloc));
tbl_alloc.width = width; tbl_alloc.width = width;
@ -364,7 +362,7 @@ ice_acl_create_tbl(struct ice_hw *hw, struct ice_acl_tbl_params *params)
tbl = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tbl), GFP_KERNEL); tbl = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tbl), GFP_KERNEL);
if (!tbl) { if (!tbl) {
status = ICE_ERR_NO_MEMORY; status = -ENOMEM;
goto out; goto out;
} }
@ -422,7 +420,7 @@ out:
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
* @req: info of partition being allocated * @req: info of partition being allocated
*/ */
static enum ice_status static int
ice_acl_alloc_partition(struct ice_hw *hw, struct ice_acl_scen *req) ice_acl_alloc_partition(struct ice_hw *hw, struct ice_acl_scen *req)
{ {
u16 start = 0, cnt = 0, off = 0; u16 start = 0, cnt = 0, off = 0;
@ -435,7 +433,7 @@ ice_acl_alloc_partition(struct ice_hw *hw, struct ice_acl_scen *req)
/* Check if we have enough TCAMs to accommodate the width */ /* Check if we have enough TCAMs to accommodate the width */
if (width > hw->acl_tbl->last_tcam - hw->acl_tbl->first_tcam + 1) if (width > hw->acl_tbl->last_tcam - hw->acl_tbl->first_tcam + 1)
return ICE_ERR_MAX_LIMIT; return -ENOSPC;
/* Number of entries must be multiple of ICE_ACL_ENTRY_ALLOC_UNIT's */ /* Number of entries must be multiple of ICE_ACL_ENTRY_ALLOC_UNIT's */
r_entries = ALIGN(req->num_entry, ICE_ACL_ENTRY_ALLOC_UNIT); r_entries = ALIGN(req->num_entry, ICE_ACL_ENTRY_ALLOC_UNIT);
@ -546,7 +544,7 @@ ice_acl_alloc_partition(struct ice_hw *hw, struct ice_acl_scen *req)
} }
} while (!done); } while (!done);
return cnt >= r_entries ? ICE_SUCCESS : ICE_ERR_MAX_LIMIT; return cnt >= r_entries ? 0 : -ENOSPC;
} }
/** /**
@ -584,7 +582,7 @@ ice_acl_fill_tcam_select(struct ice_aqc_acl_scen *scen_buf,
*/ */
for (j = 0; j < ICE_AQC_ACL_KEY_WIDTH_BYTES; j++) { for (j = 0; j < ICE_AQC_ACL_KEY_WIDTH_BYTES; j++) {
/* PKT DIR uses the 1st location of Byte Selection Base: + 1 */ /* PKT DIR uses the 1st location of Byte Selection Base: + 1 */
u8 val = ICE_AQC_ACL_BYTE_SEL_BASE + 1 + idx; u8 val = (u8)(ICE_AQC_ACL_BYTE_SEL_BASE + 1 + idx);
if (tcam_idx_in_cascade == cascade_cnt - 1) { if (tcam_idx_in_cascade == cascade_cnt - 1) {
if (j == ICE_ACL_SCEN_RNG_CHK_IDX_IN_TCAM) if (j == ICE_ACL_SCEN_RNG_CHK_IDX_IN_TCAM)
@ -735,21 +733,21 @@ ice_acl_commit_partition(struct ice_hw *hw, struct ice_acl_scen *scen,
* @num_entries: number of entries to be allocated for the scenario * @num_entries: number of entries to be allocated for the scenario
* @scen_id: holds returned scenario ID if successful * @scen_id: holds returned scenario ID if successful
*/ */
enum ice_status int
ice_acl_create_scen(struct ice_hw *hw, u16 match_width, u16 num_entries, ice_acl_create_scen(struct ice_hw *hw, u16 match_width, u16 num_entries,
u16 *scen_id) u16 *scen_id)
{ {
u8 cascade_cnt, first_tcam, last_tcam, i, k; u8 cascade_cnt, first_tcam, last_tcam, i, k;
struct ice_aqc_acl_scen scen_buf; struct ice_aqc_acl_scen scen_buf;
struct ice_acl_scen *scen; struct ice_acl_scen *scen;
enum ice_status status; int status;
if (!hw->acl_tbl) if (!hw->acl_tbl)
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
scen = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*scen), GFP_KERNEL); scen = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*scen), GFP_KERNEL);
if (!scen) if (!scen)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
scen->start = hw->acl_tbl->first_entry; scen->start = hw->acl_tbl->first_entry;
scen->width = ICE_AQC_ACL_KEY_WIDTH_BYTES * scen->width = ICE_AQC_ACL_KEY_WIDTH_BYTES *
@ -789,7 +787,7 @@ ice_acl_create_scen(struct ice_hw *hw, u16 match_width, u16 num_entries,
/* set the START_SET bit at the beginning of the stack */ /* set the START_SET bit at the beginning of the stack */
scen_buf.tcam_cfg[k].start_cmp_set |= ICE_AQC_ACL_ALLOC_SCE_START_SET; scen_buf.tcam_cfg[k].start_cmp_set |= ICE_AQC_ACL_ALLOC_SCE_START_SET;
while (k <= last_tcam) { while (k <= last_tcam) {
u8 last_tcam_idx_cascade = cascade_cnt + k - 1; u16 last_tcam_idx_cascade = cascade_cnt + k - 1;
/* set start_cmp for the first cascaded TCAM */ /* set start_cmp for the first cascaded TCAM */
scen_buf.tcam_cfg[k].start_cmp_set |= scen_buf.tcam_cfg[k].start_cmp_set |=
@ -842,14 +840,14 @@ out:
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
* @scen_id: ID of the remove scenario * @scen_id: ID of the remove scenario
*/ */
static enum ice_status ice_acl_destroy_scen(struct ice_hw *hw, u16 scen_id) static int ice_acl_destroy_scen(struct ice_hw *hw, u16 scen_id)
{ {
struct ice_acl_scen *scen, *tmp_scen; struct ice_acl_scen *scen, *tmp_scen;
struct ice_flow_prof *p, *tmp; struct ice_flow_prof *p, *tmp;
enum ice_status status; int status;
if (!hw->acl_tbl) if (!hw->acl_tbl)
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
/* Remove profiles that use "scen_id" scenario */ /* Remove profiles that use "scen_id" scenario */
list_for_each_entry_safe(p, tmp, &hw->fl_profs[ICE_BLK_ACL], l_entry) list_for_each_entry_safe(p, tmp, &hw->fl_profs[ICE_BLK_ACL], l_entry)
@ -885,16 +883,16 @@ static enum ice_status ice_acl_destroy_scen(struct ice_hw *hw, u16 scen_id)
* ice_acl_destroy_tbl - Destroy a previously created LEM table for ACL * ice_acl_destroy_tbl - Destroy a previously created LEM table for ACL
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
*/ */
enum ice_status ice_acl_destroy_tbl(struct ice_hw *hw) int ice_acl_destroy_tbl(struct ice_hw *hw)
{ {
struct ice_acl_scen *pos_scen, *tmp_scen; struct ice_acl_scen *pos_scen, *tmp_scen;
struct ice_aqc_acl_generic resp_buf; struct ice_aqc_acl_generic resp_buf;
struct ice_aqc_acl_scen buf; struct ice_aqc_acl_scen buf;
enum ice_status status; int status;
u8 i; u8 i;
if (!hw->acl_tbl) if (!hw->acl_tbl)
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
/* Mark all the created scenario's TCAM to stop the packet lookup and /* Mark all the created scenario's TCAM to stop the packet lookup and
* delete them afterward * delete them afterward
@ -962,23 +960,23 @@ enum ice_status ice_acl_destroy_tbl(struct ice_hw *hw)
* The "keys" and "inverts" buffers must be of the size which is the same as * The "keys" and "inverts" buffers must be of the size which is the same as
* the scenario's width * the scenario's width
*/ */
enum ice_status int
ice_acl_add_entry(struct ice_hw *hw, struct ice_acl_scen *scen, ice_acl_add_entry(struct ice_hw *hw, struct ice_acl_scen *scen,
enum ice_acl_entry_prio prio, u8 *keys, u8 *inverts, enum ice_acl_entry_prio prio, u8 *keys, u8 *inverts,
struct ice_acl_act_entry *acts, u8 acts_cnt, u16 *entry_idx) struct ice_acl_act_entry *acts, u8 acts_cnt, u16 *entry_idx)
{ {
u8 i, entry_tcam, num_cscd, offset;
struct ice_aqc_acl_data buf; struct ice_aqc_acl_data buf;
enum ice_status status = 0; u8 entry_tcam, offset;
u16 idx; u16 i, num_cscd, idx;
int status = 0;
if (!scen) if (!scen)
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
*entry_idx = ice_acl_scen_assign_entry_idx(scen, prio); *entry_idx = ice_acl_scen_assign_entry_idx(scen, prio);
if (*entry_idx >= scen->num_entry) { if (*entry_idx >= scen->num_entry) {
*entry_idx = 0; *entry_idx = 0;
return ICE_ERR_MAX_LIMIT; return -ENOSPC;
} }
/* Determine number of cascaded TCAMs */ /* Determine number of cascaded TCAMs */
@ -999,7 +997,7 @@ ice_acl_add_entry(struct ice_hw *hw, struct ice_acl_scen *scen,
* be programmed first; the TCAM entry of the leftmost TCAM * be programmed first; the TCAM entry of the leftmost TCAM
* should be programmed last. * should be programmed last.
*/ */
offset = num_cscd - i - 1; offset = (u8)(num_cscd - i - 1);
memcpy(&buf.entry_key.val, memcpy(&buf.entry_key.val,
&keys[offset * sizeof(buf.entry_key.val)], &keys[offset * sizeof(buf.entry_key.val)],
sizeof(buf.entry_key.val)); sizeof(buf.entry_key.val));
@ -1037,18 +1035,17 @@ out:
* *
* Program a scenario's action memory * Program a scenario's action memory
*/ */
enum ice_status int
ice_acl_prog_act(struct ice_hw *hw, struct ice_acl_scen *scen, ice_acl_prog_act(struct ice_hw *hw, struct ice_acl_scen *scen,
struct ice_acl_act_entry *acts, u8 acts_cnt, struct ice_acl_act_entry *acts, u8 acts_cnt,
u16 entry_idx) u16 entry_idx)
{ {
u8 entry_tcam, num_cscd, i, actx_idx = 0; u16 idx, entry_tcam, num_cscd, i, actx_idx = 0;
struct ice_aqc_actpair act_buf; struct ice_aqc_actpair act_buf;
enum ice_status status = 0; int status = 0;
u16 idx;
if (entry_idx >= scen->num_entry) if (entry_idx >= scen->num_entry)
return ICE_ERR_MAX_LIMIT; return -ENOSPC;
memset(&act_buf, 0, sizeof(act_buf)); memset(&act_buf, 0, sizeof(act_buf));
@ -1085,7 +1082,7 @@ ice_acl_prog_act(struct ice_hw *hw, struct ice_acl_scen *scen,
} }
if (!status && actx_idx < acts_cnt) if (!status && actx_idx < acts_cnt)
status = ICE_ERR_MAX_LIMIT; status = -ENOSPC;
return status; return status;
} }
@ -1096,23 +1093,23 @@ ice_acl_prog_act(struct ice_hw *hw, struct ice_acl_scen *scen,
* @scen: scenario to remove the entry from * @scen: scenario to remove the entry from
* @entry_idx: the scenario-relative index of the flow entry being removed * @entry_idx: the scenario-relative index of the flow entry being removed
*/ */
enum ice_status int
ice_acl_rem_entry(struct ice_hw *hw, struct ice_acl_scen *scen, u16 entry_idx) ice_acl_rem_entry(struct ice_hw *hw, struct ice_acl_scen *scen, u16 entry_idx)
{ {
struct ice_aqc_actpair act_buf; struct ice_aqc_actpair act_buf;
struct ice_aqc_acl_data buf; struct ice_aqc_acl_data buf;
u8 entry_tcam, num_cscd, i; u16 num_cscd, idx, i;
enum ice_status status = 0; int status = 0;
u16 idx; u8 entry_tcam;
if (!scen) if (!scen)
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
if (entry_idx >= scen->num_entry) if (entry_idx >= scen->num_entry)
return ICE_ERR_MAX_LIMIT; return -ENOSPC;
if (!test_bit(entry_idx, scen->entry_bitmap)) if (!test_bit(entry_idx, scen->entry_bitmap))
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
/* Determine number of cascaded TCAMs */ /* Determine number of cascaded TCAMs */
num_cscd = DIV_ROUND_UP(scen->width, ICE_AQC_ACL_KEY_WIDTH_BYTES); num_cscd = DIV_ROUND_UP(scen->width, ICE_AQC_ACL_KEY_WIDTH_BYTES);
@ -1123,8 +1120,8 @@ ice_acl_rem_entry(struct ice_hw *hw, struct ice_acl_scen *scen, u16 entry_idx)
/* invalidate the flow entry */ /* invalidate the flow entry */
memset(&buf, 0, sizeof(buf)); memset(&buf, 0, sizeof(buf));
for (i = 0; i < num_cscd; i++) { for (i = 0; i < num_cscd; i++) {
status = ice_aq_program_acl_entry(hw, entry_tcam + i, idx, &buf, status = ice_aq_program_acl_entry(hw, (u8)(entry_tcam + i),
NULL); idx, &buf, NULL);
if (status) if (status)
ice_debug(hw, ICE_DBG_ACL, "AQ program ACL entry failed status: %d\n", ice_debug(hw, ICE_DBG_ACL, "AQ program ACL entry failed status: %d\n",
status); status);

View File

@ -3,7 +3,6 @@
/* ACL support for ice */ /* ACL support for ice */
#include "ice.h" #include "ice.h"
#include "ice_lib.h" #include "ice_lib.h"
#include "ice_flow.h" #include "ice_flow.h"
@ -127,7 +126,6 @@ ice_acl_set_ip4_usr_seg(struct ice_flow_seg_info *seg,
return 0; return 0;
} }
/** /**
* ice_acl_check_input_set - Checks that a given ACL input set is valid * ice_acl_check_input_set - Checks that a given ACL input set is valid
* @pf: ice PF structure * @pf: ice PF structure
@ -144,7 +142,6 @@ ice_acl_check_input_set(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp)
struct ice_flow_seg_info *seg; struct ice_flow_seg_info *seg;
enum ice_fltr_ptype fltr_type; enum ice_fltr_ptype fltr_type;
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
enum ice_status status;
struct device *dev; struct device *dev;
int err; int err;
@ -215,12 +212,10 @@ ice_acl_check_input_set(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp)
/* Adding a profile for the given flow specification with no /* Adding a profile for the given flow specification with no
* actions (NULL) and zero actions 0. * actions (NULL) and zero actions 0.
*/ */
status = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX, fltr_type, err = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX, fltr_type, seg, 1,
seg, 1, NULL, 0, &prof); NULL, 0, &prof);
if (status) { if (err)
err = ice_status_to_errno(status);
goto err_exit; goto err_exit;
}
hw_prof->fdir_seg[0] = seg; hw_prof->fdir_seg[0] = seg;
return 0; return 0;
@ -249,7 +244,6 @@ int ice_acl_add_rule_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
struct ice_fd_hw_prof *hw_prof; struct ice_fd_hw_prof *hw_prof;
struct ice_fdir_fltr *input; struct ice_fdir_fltr *input;
enum ice_fltr_ptype flow; enum ice_fltr_ptype flow;
enum ice_status status;
struct device *dev; struct device *dev;
struct ice_pf *pf; struct ice_pf *pf;
struct ice_hw *hw; struct ice_hw *hw;
@ -279,6 +273,13 @@ int ice_acl_add_rule_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
if (ret) if (ret)
goto free_input; goto free_input;
mutex_lock(&hw->fdir_fltr_lock);
if (ice_fdir_is_dup_fltr(hw, input)) {
ret = -EINVAL;
goto release_lock;
}
mutex_unlock(&hw->fdir_fltr_lock);
memset(&acts, 0, sizeof(acts)); memset(&acts, 0, sizeof(acts));
act_cnt = 1; act_cnt = 1;
if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
@ -296,12 +297,11 @@ int ice_acl_add_rule_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
flow = ice_ethtool_flow_to_fltr(fsp->flow_type & ~FLOW_EXT); flow = ice_ethtool_flow_to_fltr(fsp->flow_type & ~FLOW_EXT);
hw_prof = hw->acl_prof[flow]; hw_prof = hw->acl_prof[flow];
status = ice_flow_add_entry(hw, ICE_BLK_ACL, flow, fsp->location, ret = ice_flow_add_entry(hw, ICE_BLK_ACL, flow, fsp->location,
vsi->idx, ICE_FLOW_PRIO_NORMAL, input, acts, vsi->idx, ICE_FLOW_PRIO_NORMAL, input, acts,
act_cnt, &entry_h); act_cnt, &entry_h);
if (status) { if (ret) {
dev_err(dev, "Could not add flow entry %d\n", flow); dev_err(dev, "Could not add flow entry %d\n", flow);
ret = ice_status_to_errno(status);
goto free_input; goto free_input;
} }
@ -312,12 +312,14 @@ int ice_acl_add_rule_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
input->acl_fltr = true; input->acl_fltr = true;
/* input struct is added to the HW filter list */ /* input struct is added to the HW filter list */
mutex_lock(&hw->fdir_fltr_lock);
ice_ntuple_update_list_entry(pf, input, fsp->location); ice_ntuple_update_list_entry(pf, input, fsp->location);
return 0; release_lock:
mutex_unlock(&hw->fdir_fltr_lock);
free_input: free_input:
devm_kfree(dev, input); if (ret)
devm_kfree(dev, input);
return ret; return ret;
} }

File diff suppressed because it is too large Load Diff

View File

@ -2,6 +2,7 @@
/* Copyright (C) 2018-2021, Intel Corporation. */ /* Copyright (C) 2018-2021, Intel Corporation. */
#include "ice.h" #include "ice.h"
#include "ice_irq.h"
/** /**
* ice_is_arfs_active - helper to check is aRFS is active * ice_is_arfs_active - helper to check is aRFS is active
@ -553,7 +554,7 @@ void ice_init_arfs(struct ice_vsi *vsi)
if (!vsi || vsi->type != ICE_VSI_PF) if (!vsi || vsi->type != ICE_VSI_PF)
return; return;
arfs_fltr_list = kzalloc(sizeof(*arfs_fltr_list) * ICE_MAX_ARFS_LIST, arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list),
GFP_KERNEL); GFP_KERNEL);
if (!arfs_fltr_list) if (!arfs_fltr_list)
return; return;
@ -610,14 +611,14 @@ void ice_clear_arfs(struct ice_vsi *vsi)
} }
/** /**
* ice_free_cpu_rx_rmap - free setup cpu reverse map * ice_free_cpu_rx_rmap - free setup CPU reverse map
* @vsi: the VSI to be forwarded to * @vsi: the VSI to be forwarded to
*/ */
static void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
{ {
struct net_device *netdev; struct net_device *netdev;
if (!vsi || vsi->type != ICE_VSI_PF || !vsi->arfs_fltr_list) if (!vsi || vsi->type != ICE_VSI_PF)
return; return;
netdev = vsi->netdev; netdev = vsi->netdev;
@ -629,7 +630,7 @@ static void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
} }
/** /**
* ice_set_cpu_rx_rmap - setup cpu reverse map for each queue * ice_set_cpu_rx_rmap - setup CPU reverse map for each queue
* @vsi: the VSI to be forwarded to * @vsi: the VSI to be forwarded to
*/ */
int ice_set_cpu_rx_rmap(struct ice_vsi *vsi) int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
@ -639,7 +640,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
int base_idx, i; int base_idx, i;
if (!vsi || vsi->type != ICE_VSI_PF) if (!vsi || vsi->type != ICE_VSI_PF)
return -EINVAL; return 0;
pf = vsi->back; pf = vsi->back;
netdev = vsi->netdev; netdev = vsi->netdev;
@ -656,7 +657,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
base_idx = vsi->base_vector; base_idx = vsi->base_vector;
for (i = 0; i < vsi->num_q_vectors; i++) for (i = 0; i < vsi->num_q_vectors; i++)
if (irq_cpu_rmap_add(netdev->rx_cpu_rmap, if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
pf->msix_entries[base_idx + i].vector)) { ice_get_irq_num(pf, base_idx + i))) {
ice_free_cpu_rx_rmap(vsi); ice_free_cpu_rx_rmap(vsi);
return -EINVAL; return -EINVAL;
} }
@ -676,7 +677,6 @@ void ice_remove_arfs(struct ice_pf *pf)
if (!pf_vsi) if (!pf_vsi)
return; return;
ice_free_cpu_rx_rmap(pf_vsi);
ice_clear_arfs(pf_vsi); ice_clear_arfs(pf_vsi);
} }
@ -693,9 +693,5 @@ void ice_rebuild_arfs(struct ice_pf *pf)
return; return;
ice_remove_arfs(pf); ice_remove_arfs(pf);
if (ice_set_cpu_rx_rmap(pf_vsi)) {
dev_err(ice_pf_to_dev(pf), "Failed to rebuild aRFS\n");
return;
}
ice_init_arfs(pf_vsi); ice_init_arfs(pf_vsi);
} }

View File

@ -4,7 +4,7 @@
#ifndef _ICE_ARFS_H_ #ifndef _ICE_ARFS_H_
#define _ICE_ARFS_H_ #define _ICE_ARFS_H_
#include "ice.h" #include "ice_fdir.h"
enum ice_arfs_fltr_state { enum ice_arfs_fltr_state {
ICE_ARFS_INACTIVE, ICE_ARFS_INACTIVE,
@ -45,6 +45,7 @@ int
ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb, ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
u16 rxq_idx, u32 flow_id); u16 rxq_idx, u32 flow_id);
void ice_clear_arfs(struct ice_vsi *vsi); void ice_clear_arfs(struct ice_vsi *vsi);
void ice_free_cpu_rx_rmap(struct ice_vsi *vsi);
void ice_init_arfs(struct ice_vsi *vsi); void ice_init_arfs(struct ice_vsi *vsi);
void ice_sync_arfs_fltrs(struct ice_pf *pf); void ice_sync_arfs_fltrs(struct ice_pf *pf);
int ice_set_cpu_rx_rmap(struct ice_vsi *vsi); int ice_set_cpu_rx_rmap(struct ice_vsi *vsi);
@ -55,6 +56,7 @@ ice_is_arfs_using_perfect_flow(struct ice_hw *hw,
enum ice_fltr_ptype flow_type); enum ice_fltr_ptype flow_type);
#else #else
static inline void ice_clear_arfs(struct ice_vsi *vsi) { } static inline void ice_clear_arfs(struct ice_vsi *vsi) { }
static inline void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) { }
static inline void ice_init_arfs(struct ice_vsi *vsi) { } static inline void ice_init_arfs(struct ice_vsi *vsi) { }
static inline void ice_sync_arfs_fltrs(struct ice_pf *pf) { } static inline void ice_sync_arfs_fltrs(struct ice_pf *pf) { }
static inline void ice_remove_arfs(struct ice_pf *pf) { } static inline void ice_remove_arfs(struct ice_pf *pf) { }

View File

@ -4,7 +4,7 @@
#include "ice_base.h" #include "ice_base.h"
#include "ice_lib.h" #include "ice_lib.h"
#include "ice_dcb_lib.h" #include "ice_dcb_lib.h"
#include "ice_virtchnl_pf.h" #include "ice_sriov.h"
/** /**
* __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
@ -322,9 +322,10 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
break; break;
case ICE_VSI_VF: case ICE_VSI_VF:
/* Firmware expects vmvf_num to be absolute VF ID */ /* Firmware expects vmvf_num to be absolute VF ID */
tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id; tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break; break;
case ICE_VSI_ADI:
case ICE_VSI_OFFLOAD_MACVLAN: case ICE_VSI_OFFLOAD_MACVLAN:
case ICE_VSI_VMDQ2: case ICE_VSI_VMDQ2:
case ICE_VSI_SWITCHDEV_CTRL: case ICE_VSI_SWITCHDEV_CTRL:
@ -349,6 +350,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
break; break;
} }
tlan_ctx->quanta_prof_idx = ring->quanta_prof_id;
tlan_ctx->tso_ena = ICE_TX_LEGACY; tlan_ctx->tso_ena = ICE_TX_LEGACY;
tlan_ctx->tso_qnum = pf_q; tlan_ctx->tso_qnum = pf_q;
@ -402,7 +404,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
/* Strip the Ethernet CRC bytes before the packet is posted to host /* Strip the Ethernet CRC bytes before the packet is posted to host
* memory. * memory.
*/ */
rlan_ctx.crcstrip = ring->rx_crc_strip_dis ? 0 : 1; rlan_ctx.crcstrip = !(ring->flags & ICE_RX_FLAGS_CRC_STRIP_DIS);
/* L2TSEL flag defines the reported L2 Tags in the receive descriptor /* L2TSEL flag defines the reported L2 Tags in the receive descriptor
* and it needs to remain 1 for non-DVM capable configurations to not * and it needs to remain 1 for non-DVM capable configurations to not
@ -412,14 +414,25 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
* be stripped in L2TAG1 of the Rx descriptor, which is where VFs will * be stripped in L2TAG1 of the Rx descriptor, which is where VFs will
* check for the tag * check for the tag
*/ */
if (ice_is_dvm_ena(hw)) if (ice_is_dvm_ena(hw)) {
if (vsi->type == ICE_VSI_VF && if (vsi->type == ICE_VSI_VF) {
ice_vf_is_port_vlan_ena(&vsi->back->vf[vsi->vf_id])) struct ice_vf *vf = vsi->vf;
rlan_ctx.l2tsel = 1;
else if (vf && ice_vf_is_port_vlan_ena(vf)) {
rlan_ctx.l2tsel = 1;
} else if (!vf) {
WARN(1, "VF VSI %u has NULL VF pointer",
vsi->vsi_num);
rlan_ctx.l2tsel = 0;
} else {
rlan_ctx.l2tsel = 0;
}
} else {
rlan_ctx.l2tsel = 0; rlan_ctx.l2tsel = 0;
else }
} else {
rlan_ctx.l2tsel = 1; rlan_ctx.l2tsel = 1;
}
rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
@ -454,22 +467,25 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
* setting to 0x03 to ensure profile is programming if prev context is * setting to 0x03 to ensure profile is programming if prev context is
* of same priority * of same priority
*/ */
if (vsi->type != ICE_VSI_VF) switch (vsi->type) {
ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true); case ICE_VSI_ADI:
else case ICE_VSI_VF:
ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3, ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3,
false); false);
break;
default:
ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
}
/* Absolute queue number out of 2K needs to be passed */ /* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
if (err) { if (err) {
dev_err(ice_pf_to_dev(vsi->back), dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
"Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
pf_q, err); pf_q, err);
return -EIO; return -EIO;
} }
if (vsi->type == ICE_VSI_VF) if (vsi->type == ICE_VSI_VF || vsi->type == ICE_VSI_ADI)
return 0; return 0;
/* configure Rx buffer alignment */ /* configure Rx buffer alignment */
@ -499,6 +515,7 @@ int ice_vsi_cfg_rxq(struct ice_ring *ring)
ring->rx_buf_len = ring->vsi->rx_buf_len; ring->rx_buf_len = ring->vsi->rx_buf_len;
#ifdef HAVE_XDP_BUFF_RXQ
#ifdef HAVE_AF_XDP_ZC_SUPPORT #ifdef HAVE_AF_XDP_ZC_SUPPORT
if (ring->vsi->type == ICE_VSI_PF) { if (ring->vsi->type == ICE_VSI_PF) {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
@ -506,7 +523,7 @@ int ice_vsi_cfg_rxq(struct ice_ring *ring)
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index, ring->q_vector->napi.napi_id); ring->q_index, ring->q_vector->napi.napi_id);
ring->xsk_pool = ice_xsk_umem(ring); ring->xsk_pool = ice_xsk_pool(ring);
if (ring->xsk_pool) { if (ring->xsk_pool) {
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
@ -563,11 +580,13 @@ int ice_vsi_cfg_rxq(struct ice_ring *ring)
return err; return err;
} }
#endif /* HAVE_AF_XDP_ZC_SUPPORT */ #endif /* HAVE_AF_XDP_ZC_SUPPORT */
#endif /* HAVE_XDP_BUFF_RXQ */
err = ice_setup_rx_ctx(ring); err = ice_setup_rx_ctx(ring);
if (err) { if (err) {
dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n", ice_dev_err_errno(dev, err,
ring->q_index, err); "ice_setup_rx_ctx failed for RxQ %d",
ring->q_index);
return err; return err;
} }
@ -714,8 +733,9 @@ err_out:
while (v_idx--) while (v_idx--)
ice_free_q_vector(vsi, v_idx); ice_free_q_vector(vsi, v_idx);
dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n", ice_dev_err_errno(dev, err,
vsi->num_q_vectors, vsi->vsi_num, err); "Failed to allocate %d q_vector for VSI %d",
vsi->num_q_vectors, vsi->vsi_num);
vsi->num_q_vectors = 0; vsi->num_q_vectors = 0;
return err; return err;
} }
@ -754,9 +774,14 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
struct ice_ring *tx_ring = vsi->tx_rings[q_id]; struct ice_ring *tx_ring = vsi->tx_rings[q_id];
tx_ring->q_vector = q_vector; if (tx_ring) {
tx_ring->next = q_vector->tx.ring; tx_ring->q_vector = q_vector;
q_vector->tx.ring = tx_ring; tx_ring->next = q_vector->tx.ring;
q_vector->tx.ring = tx_ring;
} else {
dev_err(ice_pf_to_dev(vsi->back), "NULL Tx ring found\n");
break;
}
} }
tx_rings_rem -= tx_rings_per_v; tx_rings_rem -= tx_rings_per_v;
@ -771,9 +796,14 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
struct ice_ring *rx_ring = vsi->rx_rings[q_id]; struct ice_ring *rx_ring = vsi->rx_rings[q_id];
rx_ring->q_vector = q_vector; if (rx_ring) {
rx_ring->next = q_vector->rx.ring; rx_ring->q_vector = q_vector;
q_vector->rx.ring = rx_ring; rx_ring->next = q_vector->rx.ring;
q_vector->rx.ring = rx_ring;
} else {
dev_err(ice_pf_to_dev(vsi->back), "NULL Rx ring found\n");
break;
}
} }
rx_rings_rem -= rx_rings_per_v; rx_rings_rem -= rx_rings_per_v;
} }
@ -794,28 +824,28 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
/** /**
* ice_vsi_cfg_txq - Configure single Tx queue * ice_vsi_cfg_txq - Configure single Tx queue
* @vsi: the VSI that queue belongs to * @vsi: the VSI that queue belongs to
* @ring: Tx ring to be configured * @tx_ring: Tx ring to be configured
* @qg_buf: queue group buffer * @qg_buf: queue group buffer
*/ */
int int
ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *tx_ring,
struct ice_aqc_add_tx_qgrp *qg_buf) struct ice_aqc_add_tx_qgrp *qg_buf)
{ {
u8 buf_len = struct_size(qg_buf, txqs, 1); u8 buf_len = struct_size(qg_buf, txqs, 1);
struct ice_tlan_ctx tlan_ctx = { 0 }; struct ice_tlan_ctx tlan_ctx = { 0 };
struct ice_channel *ch = tx_ring->ch;
struct ice_aqc_add_txqs_perq *txq; struct ice_aqc_add_txqs_perq *txq;
struct ice_channel *ch = ring->ch;
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
enum ice_status status; int status;
u16 pf_q; u16 pf_q;
u8 tc; u8 tc;
/* Configure XPS */ /* Configure XPS */
ice_cfg_xps_tx_ring(ring); ice_cfg_xps_tx_ring(tx_ring);
pf_q = ring->reg_idx; pf_q = tx_ring->reg_idx;
ice_setup_tx_ctx(ring, &tlan_ctx, pf_q); ice_setup_tx_ctx(tx_ring, &tlan_ctx, pf_q);
/* copy context contents into the qg_buf */ /* copy context contents into the qg_buf */
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
@ -824,10 +854,9 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
/* init queue specific tail reg. It is referred as /* init queue specific tail reg. It is referred as
* transmit comm scheduler queue doorbell. * transmit comm scheduler queue doorbell.
*/ */
ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q); tx_ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q);
if (IS_ENABLED(CONFIG_DCB)) if (IS_ENABLED(CONFIG_DCB))
tc = ring->dcb_tc; tc = tx_ring->dcb_tc;
else else
tc = 0; tc = 0;
@ -835,19 +864,22 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
* TC into the VSI Tx ring * TC into the VSI Tx ring
*/ */
if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) if (vsi->type == ICE_VSI_SWITCHDEV_CTRL)
ring->q_handle = ice_eswitch_calc_q_handle(ring); tx_ring->q_handle = ice_eswitch_calc_q_handle(tx_ring);
else else
ring->q_handle = ice_calc_q_handle(vsi, ring, tc); tx_ring->q_handle = ice_calc_q_handle(vsi, tx_ring, tc);
status = (ch ? if (ch)
ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0, status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0,
ring->q_handle, 1, qg_buf, buf_len, NULL) : tx_ring->q_handle, 1, qg_buf, buf_len,
ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, NULL);
ring->q_handle, 1, qg_buf, buf_len, NULL)); else
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
tx_ring->q_handle, 1, qg_buf, buf_len,
NULL);
if (status) { if (status) {
dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %s\n", dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
ice_stat_str(status)); status);
return -ENODEV; return status;
} }
/* Add Tx Queue TEID into the VSI Tx ring from the /* Add Tx Queue TEID into the VSI Tx ring from the
@ -856,7 +888,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
*/ */
txq = &qg_buf->txqs[0]; txq = &qg_buf->txqs[0];
if (pf_q == le16_to_cpu(txq->txq_id)) if (pf_q == le16_to_cpu(txq->txq_id))
ring->txq_teid = le32_to_cpu(txq->q_teid); tx_ring->txq_teid = le32_to_cpu(txq->q_teid);
return 0; return 0;
} }
@ -971,7 +1003,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct ice_q_vector *q_vector; struct ice_q_vector *q_vector;
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
enum ice_status status; int status;
u32 val; u32 val;
/* clear cause_ena bit for disabled queues */ /* clear cause_ena bit for disabled queues */
@ -994,19 +1026,18 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
&txq_meta->q_id, &txq_meta->q_teid, rst_src, &txq_meta->q_id, &txq_meta->q_teid, rst_src,
rel_vmvf_num, NULL); rel_vmvf_num, NULL);
/* if the disable queue command was exercised during an /* If the disable queue command was exercised during an active reset
* active reset flow, ICE_ERR_RESET_ONGOING is returned. * flow, -EBUSY is returned. This is not an error as the reset
* This is not an error as the reset operation disables * operation disables queues at the hardware level anyway.
* queues at the hardware level anyway.
*/ */
if (status == ICE_ERR_RESET_ONGOING) { if (status == -EBUSY) {
dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n"); dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n");
} else if (status == ICE_ERR_DOES_NOT_EXIST) { } else if (status == -ENOENT) {
dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n"); dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
} else if (status) { } else if (status) {
dev_err(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %s\n", dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n",
ice_stat_str(status)); status);
return -ENODEV; return status;
} }
return 0; return 0;

View File

@ -15,7 +15,7 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi); void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
void ice_vsi_free_q_vectors(struct ice_vsi *vsi); void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
int int
ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *tx_ring,
struct ice_aqc_add_tx_qgrp *qg_buf); struct ice_aqc_add_tx_qgrp *qg_buf);
void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector); void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector);
void void

297
drivers/thirdparty/ice/ice_bst_tcam.c vendored Normal file
View File

@ -0,0 +1,297 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018-2021, Intel Corporation. */
#include "ice_common.h"
#include "ice_parser_util.h"
#define ICE_BST_TCAM_TABLE_SIZE 256
static void _bst_np_kb_dump(struct ice_hw *hw, struct ice_np_keybuilder *kb)
{
dev_info(ice_hw_to_dev(hw), "next proto key builder:\n");
dev_info(ice_hw_to_dev(hw), "\tops = %d\n", kb->ops);
dev_info(ice_hw_to_dev(hw), "\tstart_or_reg0 = %d\n",
kb->start_or_reg0);
dev_info(ice_hw_to_dev(hw), "\tlen_or_reg1 = %d\n", kb->len_or_reg1);
}
static void _bst_pg_kb_dump(struct ice_hw *hw, struct ice_pg_keybuilder *kb)
{
dev_info(ice_hw_to_dev(hw), "parse graph key builder:\n");
dev_info(ice_hw_to_dev(hw), "\tflag0_ena = %d\n", kb->flag0_ena);
dev_info(ice_hw_to_dev(hw), "\tflag1_ena = %d\n", kb->flag1_ena);
dev_info(ice_hw_to_dev(hw), "\tflag2_ena = %d\n", kb->flag2_ena);
dev_info(ice_hw_to_dev(hw), "\tflag3_ena = %d\n", kb->flag3_ena);
dev_info(ice_hw_to_dev(hw), "\tflag0_idx = %d\n", kb->flag0_idx);
dev_info(ice_hw_to_dev(hw), "\tflag1_idx = %d\n", kb->flag1_idx);
dev_info(ice_hw_to_dev(hw), "\tflag2_idx = %d\n", kb->flag2_idx);
dev_info(ice_hw_to_dev(hw), "\tflag3_idx = %d\n", kb->flag3_idx);
dev_info(ice_hw_to_dev(hw), "\talu_reg_idx = %d\n", kb->alu_reg_idx);
}
static void _bst_alu_dump(struct ice_hw *hw, struct ice_alu *alu, int index)
{
dev_info(ice_hw_to_dev(hw), "alu%d:\n", index);
dev_info(ice_hw_to_dev(hw), "\topc = %d\n", alu->opc);
dev_info(ice_hw_to_dev(hw), "\tsrc_start = %d\n", alu->src_start);
dev_info(ice_hw_to_dev(hw), "\tsrc_len = %d\n", alu->src_len);
dev_info(ice_hw_to_dev(hw), "\tshift_xlate_select = %d\n",
alu->shift_xlate_select);
dev_info(ice_hw_to_dev(hw), "\tshift_xlate_key = %d\n",
alu->shift_xlate_key);
dev_info(ice_hw_to_dev(hw), "\tsrc_reg_id = %d\n", alu->src_reg_id);
dev_info(ice_hw_to_dev(hw), "\tdst_reg_id = %d\n", alu->dst_reg_id);
dev_info(ice_hw_to_dev(hw), "\tinc0 = %d\n", alu->inc0);
dev_info(ice_hw_to_dev(hw), "\tinc1 = %d\n", alu->inc1);
dev_info(ice_hw_to_dev(hw), "\tproto_offset_opc = %d\n",
alu->proto_offset_opc);
dev_info(ice_hw_to_dev(hw), "\tproto_offset = %d\n",
alu->proto_offset);
dev_info(ice_hw_to_dev(hw), "\tbranch_addr = %d\n", alu->branch_addr);
dev_info(ice_hw_to_dev(hw), "\timm = %d\n", alu->imm);
dev_info(ice_hw_to_dev(hw), "\tdst_start = %d\n", alu->dst_start);
dev_info(ice_hw_to_dev(hw), "\tdst_len = %d\n", alu->dst_len);
dev_info(ice_hw_to_dev(hw), "\tflags_extr_imm = %d\n",
alu->flags_extr_imm);
dev_info(ice_hw_to_dev(hw), "\tflags_start_imm= %d\n",
alu->flags_start_imm);
}
/**
* ice_bst_tcam_dump - dump a boost tcam info
* @hw: pointer to the hardware structure
* @item: boost tcam to dump
*/
void ice_bst_tcam_dump(struct ice_hw *hw, struct ice_bst_tcam_item *item)
{
int i;
dev_info(ice_hw_to_dev(hw), "address = %d\n", item->address);
dev_info(ice_hw_to_dev(hw), "key :");
for (i = 0; i < 20; i++)
dev_info(ice_hw_to_dev(hw), "%02x ", item->key[i]);
dev_info(ice_hw_to_dev(hw), "\n");
dev_info(ice_hw_to_dev(hw), "key_inv:");
for (i = 0; i < 20; i++)
dev_info(ice_hw_to_dev(hw), "%02x ", item->key_inv[i]);
dev_info(ice_hw_to_dev(hw), "\n");
dev_info(ice_hw_to_dev(hw), "hit_idx_grp = %d\n", item->hit_idx_grp);
dev_info(ice_hw_to_dev(hw), "pg_pri = %d\n", item->pg_pri);
_bst_np_kb_dump(hw, &item->np_kb);
_bst_pg_kb_dump(hw, &item->pg_kb);
_bst_alu_dump(hw, &item->alu0, 0);
_bst_alu_dump(hw, &item->alu1, 1);
_bst_alu_dump(hw, &item->alu2, 2);
}
/** The function parses a 96 bits ALU entry with below format:
* BIT 0-5: Opcode (alu->opc)
* BIT 6-13: Source Start (alu->src_start)
* BIT 14-18: Source Length (alu->src_len)
* BIT 19: Shift/Xlate Select (alu->shift_xlate_select)
* BIT 20-23: Shift/Xlate Key (alu->shift_xlate_key)
* BIT 24-30: Source Register ID (alu->src_reg_id)
* BIT 31-37: Dest. Register ID (alu->dst_reg_id)
* BIT 38: Inc0 (alu->inc0)
* BIT 39: Inc1:(alu->inc1)
* BIT 40:41 Protocol Offset Opcode (alu->proto_offset_opc)
* BIT 42:49 Protocol Offset (alu->proto_offset)
* BIT 50:57 Branch Address (alu->branch_addr)
* BIT 58:73 Immediate (alu->imm)
* BIT 74 Dedicated Flags Enable (alu->dedicate_flags_ena)
* BIT 75:80 Dest. Start (alu->dst_start)
* BIT 81:86 Dest. Length (alu->dst_len)
* BIT 87 Flags Extract Imm. (alu->flags_extr_imm)
* BIT 88:95 Flags Start/Immediate (alu->flags_start_imm)
*
* NOTE: the first 7 bits are skipped as the start bit is not
* byte aligned.
*/
static void _bst_alu_init(struct ice_alu *alu, u8 *data)
{
u64 d64 = *(u64 *)data >> 7;
alu->opc = (enum ice_alu_opcode)(d64 & 0x3f);
alu->src_start = (u8)((d64 >> 6) & 0xff);
alu->src_len = (u8)((d64 >> 14) & 0x1f);
alu->shift_xlate_select = ((d64 >> 19) & 0x1) != 0;
alu->shift_xlate_key = (u8)((d64 >> 20) & 0xf);
alu->src_reg_id = (u8)((d64 >> 24) & 0x7f);
alu->dst_reg_id = (u8)((d64 >> 31) & 0x7f);
alu->inc0 = ((d64 >> 38) & 0x1) != 0;
alu->inc1 = ((d64 >> 39) & 0x1) != 0;
alu->proto_offset_opc = (u8)((d64 >> 40) & 0x3);
alu->proto_offset = (u8)((d64 >> 42) & 0xff);
d64 = *(u64 *)(&data[6]) >> 9;
alu->branch_addr = (u8)(d64 & 0xff);
alu->imm = (u16)((d64 >> 8) & 0xffff);
alu->dedicate_flags_ena = ((d64 >> 24) & 0x1) != 0;
alu->dst_start = (u8)((d64 >> 25) & 0x3f);
alu->dst_len = (u8)((d64 >> 31) & 0x3f);
alu->flags_extr_imm = ((d64 >> 37) & 0x1) != 0;
alu->flags_start_imm = (u8)((d64 >> 38) & 0xff);
}
/** The function parses a 35 bits Parse Graph Key Build with below format:
* BIT 0: Flag 0 Enable (kb->flag0_ena)
* BIT 1-6: Flag 0 Index (kb->flag0_idx)
* BIT 7: Flag 1 Enable (kb->flag1_ena)
* BIT 8-13: Flag 1 Index (kb->flag1_idx)
* BIT 14: Flag 2 Enable (kb->flag2_ena)
* BIT 15-20: Flag 2 Index (kb->flag2_idx)
* BIT 21: Flag 3 Enable (kb->flag3_ena)
* BIT 22-27: Flag 3 Index (kb->flag3_idx)
* BIT 28-34: ALU Register Index (kb->alu_reg_idx)
*/
static void _bst_pgkb_init(struct ice_pg_keybuilder *kb, u64 data)
{
kb->flag0_ena = (data & 0x1) != 0;
kb->flag0_idx = (u8)((data >> 1) & 0x3f);
kb->flag1_ena = ((data >> 7) & 0x1) != 0;
kb->flag1_idx = (u8)((data >> 8) & 0x3f);
kb->flag2_ena = ((data >> 14) & 0x1) != 0;
kb->flag2_idx = (u8)((data >> 15) & 0x3f);
kb->flag3_ena = ((data >> 21) & 0x1) != 0;
kb->flag3_idx = (u8)((data >> 22) & 0x3f);
kb->alu_reg_idx = (u8)((data >> 28) & 0x7f);
}
/** The function parses a 18 bits Next Protocol Key Build with below format:
* BIT 0-1: Opcode kb->ops
* BIT 2-9: Start / Reg 0 (kb->start_or_reg0)
* BIT 10-17: Length / Reg 1 (kb->len_or_reg1)
*/
static void _bst_npkb_init(struct ice_np_keybuilder *kb, u32 data)
{
kb->ops = (u8)(data & 0x3);
kb->start_or_reg0 = (u8)((data >> 2) & 0xff);
kb->len_or_reg1 = (u8)((data >> 10) & 0xff);
}
/** The function parses a 704 bits Boost TCAM entry with below format:
* BIT 0-15: Address (ti->address)
* BIT 16-31: reserved
* BIT 32-191: Key (ti->key)
* BIT 192-351:Key Invert (ti->key_inv)
* BIT 352-359:Boost Hit Index Group (ti->hit_idx_grp)
* BIT 360-361:PG Priority (ti->pg_pri)
* BIT 362-379:Next Proto Key Build (ti->np_kb)
* BIT 380-414:PG Key Build (ti->pg_kb)
* BIT 415-510:ALU 0 (ti->alu0)
* BIT 511-606:ALU 1 (ti->alu1)
* BIT 607-702:ALU 2 (ti->alu2)
* BIT 703: reserved
*/
static void _bst_parse_item(struct ice_hw *hw, u16 idx, void *item,
void *data, int size)
{
struct ice_bst_tcam_item *ti = (struct ice_bst_tcam_item *)item;
u8 *buf = (u8 *)data;
int i;
ti->address = *(u16 *)buf;
for (i = 0; i < 20; i++)
ti->key[i] = buf[4 + i];
for (i = 0; i < 20; i++)
ti->key_inv[i] = buf[24 + i];
ti->hit_idx_grp = buf[44];
ti->pg_pri = buf[45] & 0x3;
_bst_npkb_init(&ti->np_kb, *(u32 *)&buf[45] >> 2);
_bst_pgkb_init(&ti->pg_kb, *(u64 *)&buf[47] >> 4);
_bst_alu_init(&ti->alu0, &buf[51]);
_bst_alu_init(&ti->alu1, &buf[63]);
_bst_alu_init(&ti->alu2, &buf[75]);
if (hw->debug_mask & ICE_DBG_PARSER)
ice_bst_tcam_dump(hw, ti);
}
/**
* ice_bst_tcam_table_get - create a boost tcam table
* @hw: pointer to the hardware structure
*/
struct ice_bst_tcam_item *ice_bst_tcam_table_get(struct ice_hw *hw)
{
return (struct ice_bst_tcam_item *)
ice_parser_create_table(hw, ICE_SID_RXPARSER_BOOST_TCAM,
sizeof(struct ice_bst_tcam_item),
ICE_BST_TCAM_TABLE_SIZE,
ice_parser_sect_item_get,
_bst_parse_item, true);
}
static void _parse_lbl_item(struct ice_hw *hw, u16 idx, void *item,
void *data, int size)
{
ice_parse_item_dflt(hw, idx, item, data, size);
if (hw->debug_mask & ICE_DBG_PARSER)
ice_lbl_dump(hw, (struct ice_lbl_item *)item);
}
/**
* ice_bst_lbl_table_get - create a boost label table
* @hw: pointer to the hardware structure
*/
struct ice_lbl_item *ice_bst_lbl_table_get(struct ice_hw *hw)
{
return (struct ice_lbl_item *)
ice_parser_create_table(hw, ICE_SID_LBL_RXPARSER_TMEM,
sizeof(struct ice_lbl_item),
ICE_BST_TCAM_TABLE_SIZE,
ice_parser_sect_item_get,
_parse_lbl_item, true);
}
/**
* ice_bst_tcam_match - match a pattern on the boost tcam table
* @tcam_table: boost tcam table to search
* @pat: pattern to match
*/
struct ice_bst_tcam_item *
ice_bst_tcam_match(struct ice_bst_tcam_item *tcam_table, u8 *pat)
{
int i;
for (i = 0; i < ICE_BST_TCAM_TABLE_SIZE; i++) {
struct ice_bst_tcam_item *item = &tcam_table[i];
if (item->hit_idx_grp == 0)
continue;
if (ice_ternary_match(item->key, item->key_inv, pat, 20))
return item;
}
return NULL;
}
static bool _start_with(const char *prefix, const char *string)
{
int len1 = strlen(prefix);
int len2 = strlen(string);
if (len2 < len1)
return false;
return !memcmp(prefix, string, len1);
}
struct ice_bst_tcam_item *
ice_bst_tcam_search(struct ice_bst_tcam_item *tcam_table,
struct ice_lbl_item *lbl_table,
const char *prefix, u16 *start)
{
u16 i = *start;
for (; i < ICE_BST_TCAM_TABLE_SIZE; i++) {
if (_start_with(prefix, lbl_table[i].label)) {
*start = i;
return &tcam_table[lbl_table[i].idx];
}
}
return NULL;
}

34
drivers/thirdparty/ice/ice_bst_tcam.h vendored Normal file
View File

@ -0,0 +1,34 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018-2021, Intel Corporation. */
#ifndef _ICE_BST_TCAM_H_
#define _ICE_BST_TCAM_H_
#include "ice_imem.h"
struct ice_bst_tcam_item {
u16 address;
u8 key[20];
u8 key_inv[20];
u8 hit_idx_grp;
u8 pg_pri;
struct ice_np_keybuilder np_kb;
struct ice_pg_keybuilder pg_kb;
struct ice_alu alu0;
struct ice_alu alu1;
struct ice_alu alu2;
};
void ice_bst_tcam_dump(struct ice_hw *hw, struct ice_bst_tcam_item *item);
struct ice_bst_tcam_item *ice_bst_tcam_table_get(struct ice_hw *hw);
struct ice_lbl_item *ice_bst_lbl_table_get(struct ice_hw *hw);
struct ice_bst_tcam_item *
ice_bst_tcam_match(struct ice_bst_tcam_item *tcam_table, u8 *pat);
struct ice_bst_tcam_item *
ice_bst_tcam_search(struct ice_bst_tcam_item *tcam_table,
struct ice_lbl_item *lbl_table,
const char *prefix, u16 *start);
#endif /*_ICE_BST_TCAM_H_ */

View File

@ -1,231 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018-2021, Intel Corporation. */
#ifndef _ICE_CGU_H_
#define _ICE_CGU_H_
#include <linux/types.h>
#include "ice_cgu_regs.h"
/* CGU mux identifier
* Specifies a mux within the CGU block.
*/
enum ice_cgu_mux_sel {
/* CGU reference clock source (DWORD10_SYNCE_S_REF_CLK) */
ICE_CGU_MUX_SEL_REF_CLK,
/* CGU bypass clock source (DWORD11_SYNCE_S_BYP_CLK) */
ICE_CGU_MUX_SEL_BYPASS_CLK,
/* CGU ETHCLKO pin source (DWORD10_SYNCE_ETHCLKO_SEL) */
ICE_CGU_MUX_SEL_ETHCLKO,
/* CGU CLKO pin source (DWORD10_SYNCE_CLKO_SEL) */
ICE_CGU_MUX_SEL_CLKO,
NUM_ICE_CGU_MUX_SEL
};
/* CGU reference clock specification
* Specifies the source for the CGU reference/bypass clock.
*/
enum ice_cgu_clk_src {
/* network reference clock 0 */
ICE_CGU_CLK_SRC_NET_REF_CLK0,
/* network reference clock 1 */
ICE_CGU_CLK_SRC_NET_REF_CLK1,
/* 1588 recovered clock */
ICE_CGU_CLK_SRC_1588_RECOVERED_CLK,
/* recovered clock from phys port 0 */
ICE_CGU_CLK_SRC_SYNCE_CLK_0,
/* recovered clock from phys port 1 */
ICE_CGU_CLK_SRC_SYNCE_CLK_1,
/* recovered clock from phys port 2 */
ICE_CGU_CLK_SRC_SYNCE_CLK_2,
/* recovered clock from phys port 3 */
ICE_CGU_CLK_SRC_SYNCE_CLK_3,
/* recovered clock from phys port 4 */
ICE_CGU_CLK_SRC_SYNCE_CLK_4,
/* recovered clock from phys port 5 */
ICE_CGU_CLK_SRC_SYNCE_CLK_5,
/* recovered clock from phys port 6 */
ICE_CGU_CLK_SRC_SYNCE_CLK_6,
/* recovered clock from phys port 7 */
ICE_CGU_CLK_SRC_SYNCE_CLK_7,
NUM_ICE_CGU_CLK_SRC
};
/* Sources for ETHCLKO pin */
enum ice_cgu_ethclko_sel {
/* DPLL reference clock 0 input divided by ETHDIV */
ICE_CGU_ETHCLKO_SEL_REF_CLK_BYP0_DIV,
/* DPLL reference clock 1 input divided by ETHDIV */
ICE_CGU_ETHCLKO_SEL_REF_CLK_BYP1_DIV,
/* DPLL output clock divided by ETHDIV */
ICE_CGU_ETHCLKO_SEL_CLK_PLL_25000_DIV,
/* JAPLL output clock divided by ETHDIV */
ICE_CGU_ETHCLKO_SEL_CLK_JAPLL_625000_DIV,
/* DPLL reference clock 0 input */
ICE_CGU_ETHCLKO_SEL_REF_CLK_BYP0,
/* DPLL reference clock 1 input */
ICE_CGU_ETHCLKO_SEL_REF_CLK_BYP1,
/* DPLL output clock */
ICE_CGU_ETHCLKO_SEL_CLK_PLL_25000,
ICE_CGU_ETHCLKO_SEL_CLK_JAPLL_625000,
NUM_ICE_CGU_ETHCLKO_SEL
};
#define ICE_CGU_ETHCLKO_SEL_NRCKI ICE_CGU_ETHCLKO_SEL_REF_CLK_BYP1
/* Sources for CLKO pin */
enum ice_cgu_clko_sel {
/* DPLL reference clock 0 input divided by CLKODIV */
ICE_CGU_CLKO_SEL_REF_CLK_BYP0_DIV,
/* DPLL reference clock 1 input divided by CLKODIV */
ICE_CGU_CLKO_SEL_REF_CLK_BYP1_DIV,
/* DPLL core clock divided by CLKODIV */
ICE_CGU_CLKO_SEL_CLK_SYS_DIV,
/* JAPLL output clock divided by CLKODIV */
ICE_CGU_CLKO_SEL_CLK_JAPLL_625000_DIV,
/* DPLL reference clock 0 input */
ICE_CGU_CLKO_SEL_REF_CLK_BYP0,
/* DPLL reference clock 1 input */
ICE_CGU_CLKO_SEL_REF_CLK_BYP1,
/* 1.544 MHz, NRCP divider output */
ICE_CGU_CLKO_SEL_CLK_1544 = 8,
/* 2.048 MHz, NRCP divider output */
ICE_CGU_CLKO_SEL_CLK_2048 = 9,
NUM_ICE_CGU_CLKO_SEL
};
#define ICE_CGU_CLKO_SEL_NRCKI ICE_CGU_CLKO_SEL_REF_CLK_BYP1
/* TIME_REF source selection */
enum ice_cgu_time_ref_sel {
ICE_CGU_TIME_REF_SEL_TCXO, /* Use TCXO source */
ICE_CGU_TIME_REF_SEL_TIME_REF, /* Use TIME_REF source */
NUM_ICE_CGU_TIME_REF_SEL
};
/* Macro to convert an enum ice_time_ref_freq to a string for printing */
#define ICE_TIME_REF_FREQ_TO_STR(__trf) \
({ \
enum ice_time_ref_freq _trf = (__trf); \
(_trf) == ICE_TIME_REF_FREQ_25_000 ? "25 MHz" : \
(_trf) == ICE_TIME_REF_FREQ_122_880 ? "122.88 MHz" : \
(_trf) == ICE_TIME_REF_FREQ_125_000 ? "125 MHz" : \
(_trf) == ICE_TIME_REF_FREQ_153_600 ? "153.6 MHz" : \
(_trf) == ICE_TIME_REF_FREQ_156_250 ? "156.25 MHz" : \
(_trf) == ICE_TIME_REF_FREQ_245_760 ? "245.76 MHz" : \
"invalid"; \
})
/* Macro to convert an enum ice_cgu_time_ref_sel to a string for printing */
#define ICE_TIME_REF_SEL_TO_STR(__trs) \
({ \
enum ice_cgu_time_ref_sel _trs = (__trs); \
(_trs) == ICE_CGU_TIME_REF_SEL_TCXO ? "TCXO" : \
(_trs) == ICE_CGU_TIME_REF_SEL_TIME_REF ? "TIME_REF" : \
"invalid"; \
})
/* Macro to convert an enum ice_src_tmr_mode to a string for printing */
#define ICE_SRC_TMR_MODE_TO_STR(__mtm) \
({ \
enum ice_src_tmr_mode _mtm = (__mtm); \
(_mtm) == ICE_SRC_TMR_MODE_NANOSECONDS ? "nanoseconds" : \
(_mtm) == ICE_SRC_TMR_MODE_LOCKED ? "locked" : \
"invalid"; \
})
/* DPLL select */
enum ice_cgu_dpll_select {
/* DPLL (DPLL1) */
ICE_CGU_DPLL_SELECT_TRANSPORT,
/* EEC DPLL (DPLL2), 0x098 Hz BW */
ICE_CGU_DPLL_SELECT_EEC_RELAXED_BW,
NUM_ICE_CGU_DPLL_SELECT
};
/* DPLL holdover mode */
enum ice_cgu_dpll_holdover_mode {
/* previous acquired frequency */
ICE_CGU_DPLL_HOLDOVER_MODE_ACQUIRED_FREQ,
/* local frequency (free run) */
ICE_CGU_DPLL_HOLDOVER_MODE_LOCAL_FREQ,
NUM_ICE_CGU_DPLL_HOLDOVER_MODE
};
/* DPLL configuration parameters */
struct ice_cgu_dpll_cfg {
/* CGU reference clock frequency */
enum ice_time_ref_freq ref_freq;
/* select DPLL */
enum ice_cgu_dpll_select dpll_sel;
/* enable holdover feature support */
u32 holdover_support;
/* select holdover mode */
enum ice_cgu_dpll_holdover_mode holdover_mode;
};
enum ice_japll_ref_freq {
ICE_CGU_JAPLL_REF_FREQ_25_000, /* 25 MHz */
ICE_CGU_JAPLL_REF_FREQ_156_250, /* 156.25 MHz */
NUM_ICE_CGU_JAPLL_REF_FREQ
};
/* Mux configuration parameters */
struct ice_cgu_mux_cfg {
/* reference clock source select */
enum ice_cgu_clk_src ref_clk_src;
/* bypass clock source select */
enum ice_cgu_clk_src byp_clk_src;
/* ETHCLKO pin source select */
enum ice_cgu_ethclko_sel eth_clk_out;
/* CLKO pin source select */
enum ice_cgu_clko_sel clk_out;
/* CLKO programmable divider */
__u8 clk_out_div;
/* ETHCLKO programmable divider */
__u8 eth_clk_out_div;
/* bypass DPLL */
u32 bypass;
/* tie refClk to ground (force holdover mode) */
u32 ref_clk_gnd_ena;
};
/* CGU event was triggered by SyncE loss of lock */
#define ICE_CGU_EVENT_ERR_SYNCE_LOCK_LOSS 0x1
/* CGU event was triggered by SyncE holdover change */
#define ICE_CGU_EVENT_ERR_HOLDOVER_CHNG 0x2
/* CGU event was triggered by timestamp PLL loss of lock */
#define ICE_CGU_EVENT_ERR_TIMESYNC_LOCK_LOSS 0x4
struct ice_cgu_info {
struct ice_cgu_dpll_cfg dpll_cfg;
struct ice_cgu_mux_cfg mux_cfg;
enum ice_japll_ref_freq japll_ref_freq;
wait_queue_head_t wq_head;
/* used to synchronize waiters (only one at a time) */
struct mutex event_mutex;
u32 event_occurred;
u8 err_type;
u8 unlock_event;
/* current state of 1588 output to CGU */
u8 out_1588_enabled;
enum ice_time_ref_freq out_1588_ref_freq;
enum ice_time_ref_freq time_ref_freq;
enum ice_src_tmr_mode src_tmr_mode;
};
#endif /* _ICE_CGU_H_ */

View File

@ -1,248 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018-2021, Intel Corporation. */
#include "ice.h"
/**
* ice_cgu_cfg_ts_pll - Configure the TS PLL
* @pf: Board private structure
* @enable: True to enable TS PLL
* @time_ref_freq: primary timer frequency
* @time_ref_sel: Time source
* @src_tmr_mode: primary timer mode
*/
int ice_cgu_cfg_ts_pll(struct ice_pf *pf, bool enable, enum ice_time_ref_freq time_ref_freq,
enum ice_cgu_time_ref_sel time_ref_sel, enum ice_src_tmr_mode src_tmr_mode)
{
struct ice_cgu_info *cgu_info = &pf->cgu_info;
union tspll_ro_bwm_lf bwm_lf;
union nac_cgu_dword19 dw19;
union nac_cgu_dword22 dw22;
union nac_cgu_dword24 dw24;
union nac_cgu_dword9 dw9;
int err;
dev_dbg(ice_pf_to_dev(pf), "Requested %s, time_ref_freq %s, time_ref_sel %s, src_tmr_mode %s\n",
enable ? "enable" : "disable",
ICE_TIME_REF_FREQ_TO_STR(time_ref_freq),
ICE_TIME_REF_SEL_TO_STR(time_ref_sel),
ICE_SRC_TMR_MODE_TO_STR(src_tmr_mode));
if (time_ref_freq >= NUM_ICE_TIME_REF_FREQ) {
dev_err(ice_pf_to_dev(pf), "Invalid TIME_REF freq %u\n", time_ref_freq);
return -EIO;
}
if (time_ref_sel >= NUM_ICE_CGU_TIME_REF_SEL) {
dev_err(ice_pf_to_dev(pf), "Invalid TIME_REF sel %u\n", time_ref_sel);
return -EIO;
}
if (src_tmr_mode >= NUM_ICE_SRC_TMR_MODE) {
dev_err(ice_pf_to_dev(pf), "Invalid src_tmr_mode %u\n", src_tmr_mode);
return -EIO;
}
if (time_ref_sel == ICE_CGU_TIME_REF_SEL_TCXO &&
time_ref_freq != ICE_TIME_REF_FREQ_25_000) {
dev_err(ice_pf_to_dev(pf),
"TS PLL source specified as TCXO but specified frequency is not 25 MHz\n");
return -EIO;
}
err = ice_cgu_reg_read(pf, NAC_CGU_DWORD9, &dw9.val);
if (!err)
err = ice_cgu_reg_read(pf, NAC_CGU_DWORD24, &dw24.val);
if (!err)
err = ice_cgu_reg_read(pf, TSPLL_RO_BWM_LF, &bwm_lf.val);
if (err)
return err;
dev_dbg(ice_pf_to_dev(pf),
"Before change, %s, time_ref_freq %s, time_ref_sel %s, PLL %s\n",
dw24.field.ts_pll_enable ? "enabled" : "disabled",
ICE_TIME_REF_FREQ_TO_STR(dw9.field.time_ref_freq_sel),
ICE_TIME_REF_SEL_TO_STR(dw24.field.time_ref_sel),
bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
if (!enable) {
if (dw24.field.ts_pll_enable) {
dw24.field.ts_pll_enable = 0;
err = ice_cgu_reg_write(pf, NAC_CGU_DWORD24, dw24.val);
if (!err)
ice_cgu_usleep(1);
}
/* don't need to update the freq, sel, or mode; that'll happen
* when the PLL is re-enabled
*/
return err;
}
/* TS PLL must be disabled before changing freq or src */
if (dw24.field.ts_pll_enable && (dw9.field.time_ref_freq_sel != time_ref_freq ||
dw24.field.time_ref_sel != time_ref_sel)) {
dev_err(ice_pf_to_dev(pf),
"Can't adjust time_ref_freq or time_ref_sel while TS PLL is enabled\n");
return -EIO;
}
/* set frequency, configure TS PLL params, and enable the TS PLL */
err = ice_cgu_reg_read(pf, NAC_CGU_DWORD19, &dw19.val);
if (!err)
err = ice_cgu_reg_read(pf, NAC_CGU_DWORD22, &dw22.val);
if (!err) {
dw9.field.time_ref_freq_sel = time_ref_freq;
dw19.field.tspll_fbdiv_intgr = tspll_per_rate_params[time_ref_freq].feedback_div;
dw19.field.tspll_ndivratio = 1;
dw22.field.time1588clk_div = tspll_per_rate_params[time_ref_freq].post_pll_div;
dw22.field.time1588clk_sel_div2 = 0;
dw24.field.ref1588_ck_div = tspll_per_rate_params[time_ref_freq].refclk_pre_div;
dw24.field.tspll_fbdiv_frac = tspll_per_rate_params[time_ref_freq].frac_n_div;
dw24.field.time_ref_sel = time_ref_sel;
err = ice_cgu_reg_write(pf, NAC_CGU_DWORD9, dw9.val);
}
if (!err)
err = ice_cgu_reg_write(pf, NAC_CGU_DWORD19, dw19.val);
if (!err)
err = ice_cgu_reg_write(pf, NAC_CGU_DWORD22, dw22.val);
/* first write dw24 with updated values but still not enabled */
if (!err)
err = ice_cgu_reg_write(pf, NAC_CGU_DWORD24, dw24.val);
/* now enable the TS_PLL */
if (!err) {
dw24.field.ts_pll_enable = 1;
err = ice_cgu_reg_write(pf, NAC_CGU_DWORD24, dw24.val);
}
if (!err) {
cgu_info->time_ref_freq = time_ref_freq;
cgu_info->src_tmr_mode = src_tmr_mode;
err = ice_ptp_update_incval(pf, time_ref_freq, src_tmr_mode);
if (err) {
dev_err(ice_pf_to_dev(pf), "Failed to update INCVAL\n");
return err;
}
}
/* to check for lock, wait 1 ms; if it hasn't locked by then, it's not
* going to lock
*/
if (!err) {
ice_cgu_usleep(1000);
err = ice_cgu_reg_read(pf, TSPLL_RO_BWM_LF, &bwm_lf.val);
}
if (!err && bwm_lf.field.plllock_true_lock_cri) {
dev_dbg(ice_pf_to_dev(pf),
"TS PLL successfully locked, time_ref_freq %s, time_ref_sel %s\n",
ICE_TIME_REF_FREQ_TO_STR(time_ref_freq),
ICE_TIME_REF_SEL_TO_STR(time_ref_sel));
/* update state to indicate no unlock event since last lock */
cgu_info->unlock_event = false;
} else {
dev_err(ice_pf_to_dev(pf), "TS PLL failed to lock\n");
err = -EFAULT;
}
return err;
}
/**
* ice_cgu_init_state - Initialize CGU HW
* @pf: Board private structure
*
* Read CGU registers, initialize internal state, and lock the timestamp PLL using the parameters
* read from the soft straps.
*/
void ice_cgu_init_state(struct ice_pf *pf)
{
union tspll_cntr_bist_settings tspll_cntr_bist;
struct ice_cgu_info *cgu_info = &pf->cgu_info;
union nac_cgu_dword10 dw10;
union nac_cgu_dword11 dw11;
union nac_cgu_dword12 dw12;
union nac_cgu_dword14 dw14;
union nac_cgu_dword24 dw24;
union nac_cgu_dword9 dw9;
int err;
init_waitqueue_head(&cgu_info->wq_head);
mutex_init(&cgu_info->event_mutex);
err = ice_cgu_reg_read(pf, NAC_CGU_DWORD9, &dw9.val);
if (!err)
err = ice_cgu_reg_read(pf, NAC_CGU_DWORD10, &dw10.val);
if (!err)
err = ice_cgu_reg_read(pf, NAC_CGU_DWORD11, &dw11.val);
if (!err)
err = ice_cgu_reg_read(pf, NAC_CGU_DWORD12, &dw12.val);
if (!err)
err = ice_cgu_reg_read(pf, NAC_CGU_DWORD14, &dw14.val);
if (!err)
err = ice_cgu_reg_read(pf, NAC_CGU_DWORD24, &dw24.val);
if (!err)
err = ice_cgu_reg_read(pf, TSPLL_CNTR_BIST_SETTINGS, &tspll_cntr_bist.val);
if (err)
goto err;
/* Note that the TIME_SYNC, TIME_REF, and ONE_PPS_OUT pins are enabled
* through soft straps.
*/
/* Mux config */
cgu_info->mux_cfg.ref_clk_src = dw10.field.synce_s_ref_clk;
cgu_info->mux_cfg.byp_clk_src = dw11.field.synce_s_byp_clk;
cgu_info->mux_cfg.eth_clk_out = dw10.field.synce_ethclko_sel;
cgu_info->mux_cfg.clk_out = dw10.field.synce_clko_sel;
cgu_info->mux_cfg.clk_out_div = dw10.field.synce_clkodiv_m1;
cgu_info->mux_cfg.eth_clk_out_div = dw10.field.synce_ethdiv_m1;
cgu_info->mux_cfg.bypass = dw12.field.synce_dpll_byp;
cgu_info->mux_cfg.ref_clk_gnd_ena = dw10.field.synce_sel_gnd;
/* Timestamp PLL config */
/* Disable sticky lock detection so lock status reported is accurate */
tspll_cntr_bist.field.i_plllock_sel_0 = 0;
tspll_cntr_bist.field.i_plllock_sel_1 = 0;
err = ice_cgu_reg_write(pf, TSPLL_CNTR_BIST_SETTINGS, tspll_cntr_bist.val);
/* Assume the 1588 output to CGU isn't configured; require the app to reconfigure it before
* using it
*/
if (!err)
cgu_info->out_1588_enabled = false;
/* first, try to lock the timestamp PLL with the parameters from the soft straps */
/* disable first, then re-enable with correct parameters */
err = ice_cgu_cfg_ts_pll(pf, false, dw9.field.time_ref_freq_sel, dw24.field.time_ref_sel,
ICE_SRC_TMR_MODE_NANOSECONDS);
if (err)
dev_err(ice_pf_to_dev(pf), "Failed to disable TS PLL\n");
else
err = ice_cgu_cfg_ts_pll(pf, true, dw9.field.time_ref_freq_sel,
dw24.field.time_ref_sel, ICE_SRC_TMR_MODE_NANOSECONDS);
if (err) {
/* if that fails, try to lock the timestamp PLL with the TCXO
*/
dev_info(ice_pf_to_dev(pf),
"Unable to lock TS PLL with soft straps settings; trying TCXO\n");
/* disable first, then re-enable with correct parameters */
err = ice_cgu_cfg_ts_pll(pf, false, ICE_TIME_REF_FREQ_25_000,
ICE_CGU_TIME_REF_SEL_TCXO,
ICE_SRC_TMR_MODE_NANOSECONDS);
if (err)
dev_err(ice_pf_to_dev(pf), "Failed to disable TS PLL with TCXO\n");
else
err = ice_cgu_cfg_ts_pll(pf, true, ICE_TIME_REF_FREQ_25_000,
ICE_CGU_TIME_REF_SEL_TCXO,
ICE_SRC_TMR_MODE_NANOSECONDS);
if (err) {
dev_err(ice_pf_to_dev(pf), "Failed to lock TS PLL with TCXO\n");
goto err;
}
}
dev_info(ice_pf_to_dev(pf), "CGU init successful\n");
return;
err:
dev_err(ice_pf_to_dev(pf), "CGU init failed, err=%d\n", err);
}

View File

@ -1,121 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018-2021, Intel Corporation. */
#ifndef _ICE_CGU_OPS_H_
#define _ICE_CGU_OPS_H_
#define ICE_CGU_LOCK_CHECK_DELAY_USEC 256000 /* 256 msec */
/* fast mode lock check settings */
#define ICE_CGU_EDPLL_FAST_LOCK_DELAY_LOOPS 239 /* 60 seconds total */
#define ICE_CGU_TDPLL_FAST_LOCK_DELAY_LOOPS 25 /* 5 seconds total */
/* normal mode lock check settings */
#define ICE_CGU_EDPLL_NORMAL_LOCK_DELAY_LOOPS 52 /* 12 seconds total */
#define ICE_CGU_TDPLL_NORMAL_LOCK_DELAY_LOOPS 13 /* 2 seconds total */
/* number of consecutive locks to declare DPLL lock */
#define ICE_CGU_DPLL_LOCK_COUNT 5
#define ICE_CGU_CORE_CLOCK_MHZ 800
#define ICE_CGU_DPLL_FREQ_MHZ 25
/* DPLL lock/unlock threshold */
#define ICE_CGU_TRANSPORT_DPLL_LOCK_THRESHOLD_800MHZ 0x2D8
#define ICE_CGU_TRANSPORT_DPLL_UNLOCK_THRESHOLD_800MHZ 0x3640
#define ICE_CGU_ECC_DPLL_LOCK_THRESHOLD_800MHZ 0x5A
#define ICE_CGU_ECC_DPLL_UNLOCK_THRESHOLD_800MHZ 0x21E8
/* time to hold enable bits low to perform a JAPLL reset */
#define ICE_CGU_JAPLL_RESET_TIME_USEC 1
/* LCPLL lock alone (FDPLL disabled) should take < 10 usec */
#define ICE_CGU_LCPLL_LOCK_CHECK_DELAY_USEC 1
#define ICE_CGU_LCPLL_LOCK_DELAY_LOOPS 10
/* FDPLL lock time in fast mode is around 500 msec;
* use poll interval of 100ms, max poll time 5 seconds
* (max poll time was originally 2 seconds, increased
* to 5 to avoid occasional poll timeouts.)
*/
#define ICE_CGU_FDPLL_LOCK_CHECK_DELAY_USEC 100000
#define ICE_CGU_FDPLL_LOCK_DELAY_LOOPS 50
#define ICE_CGU_FDPLL_ACQ_TOGGLE_LOOPS 2
/* valid values for enum ice_cgu_clko_sel */
#define ICE_CGU_CLKO_SEL_VALID_BITMAP \
(BIT(ICE_CGU_CLKO_SEL_REF_CLK_BYP0_DIV) | \
BIT(ICE_CGU_CLKO_SEL_REF_CLK_BYP1_DIV) | \
BIT(ICE_CGU_CLKO_SEL_CLK_SYS_DIV) | \
BIT(ICE_CGU_CLKO_SEL_CLK_JAPLL_625000_DIV) | \
BIT(ICE_CGU_CLKO_SEL_REF_CLK_BYP0) | \
BIT(ICE_CGU_CLKO_SEL_REF_CLK_BYP1) | \
BIT(ICE_CGU_CLKO_SEL_CLK_1544) | \
BIT(ICE_CGU_CLKO_SEL_CLK_2048))
/* Only FW can read NAC_CGU_DWORD8 where these are defined, so they are exposed
* to the driver stack via soft straps in the misc24 field of NAC_CGU_DWORD9.
*/
#define MISC24_BIT_TCXO_FREQ_SEL_M BIT(0)
#define MISC24_BIT_TCXO_SEL_M BIT(4)
/* internal structure definitions */
enum ice_cgu_sample_rate {
ICE_CGU_SAMPLE_RATE_8K = 0, /* 8 KHz sample rate */
ICE_CGU_SAMPLE_RATE_10K, /* 10 KHz sample rate */
ICE_CGU_SAMPLE_RATE_12K5, /* 12.5 KHz sample rate */
NUM_ICE_CGU_SAMPLE_RATE
};
struct ice_cgu_div_rat_m1 {
u32 ref_clk_rate; /* reference clock rate in kHz */
u32 div_rat_m1; /* div_rat_m1 value */
};
struct ice_cgu_dpll_params {
enum ice_cgu_dpll_select dpll_select;
enum ice_cgu_sample_rate sample_rate;
u32 mul_rat_m1;
u32 scale;
u32 gain;
};
struct ice_cgu_dpll_per_rate_params {
u32 rate_hz;
enum ice_cgu_sample_rate sample_rate;
u32 div_rat_m1;
u32 synce_rat_sel;
};
struct ice_cgu_lcpll_per_rate_params {
u32 refclk_pre_div;
u32 feedback_div;
u32 frac_n_div;
u32 post_pll_div;
};
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
/* Function to init internal state */
void ice_cgu_init_state(struct ice_pf *pf);
/* Function to configure TS PLL */
int
ice_cgu_cfg_ts_pll(struct ice_pf *pf, bool enable, enum ice_time_ref_freq time_ref_freq,
enum ice_cgu_time_ref_sel time_ref_sel,
enum ice_src_tmr_mode src_tmr_mode);
#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
static inline void ice_cgu_init_state(struct ice_pf *pf) { }
#if IS_ENABLED(CONFIG_DEBUG_FS)
static inline int
ice_cgu_cfg_ts_pll(struct ice_pf __always_unused *pf, bool __always_unused enable,
enum ice_time_ref_freq __always_unused time_ref_freq,
enum ice_cgu_time_ref_sel __always_unused time_ref_sel,
enum ice_src_tmr_mode __always_unused src_tmr_mode)
{
return 0;
}
#endif /* IS_ENABLED(CONFIG_DEBUG_FS) */
#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
#endif /* _ICE_CGU_OPS_H_ */

View File

@ -4,101 +4,7 @@
#ifndef _ICE_CGU_REGS_H_ #ifndef _ICE_CGU_REGS_H_
#define _ICE_CGU_REGS_H_ #define _ICE_CGU_REGS_H_
#include "ice_osdep.h"
#define NAC_CGU_DWORD8 0x20
#define NAC_CGU_DWORD8_TCXO_FREQ_SEL_S 0
#define NAC_CGU_DWORD8_TCXO_FREQ_SEL_M BIT(0)
#define NAC_CGU_DWORD8_MISC8_S 1
#define NAC_CGU_DWORD8_MISC8_M ICE_M(0x7, 1)
#define NAC_CGU_DWORD8_HLP_SWITCH_FREQ_SEL_S 4
#define NAC_CGU_DWORD8_HLP_SWITCH_FREQ_SEL_M ICE_M(0xf, 4)
#define NAC_CGU_DWORD8_CGUPLL_NDIVRATIO_S 8
#define NAC_CGU_DWORD8_CGUPLL_NDIVRATIO_M ICE_M(0xf, 8)
#define NAC_CGU_DWORD8_CGUPLL_IREF_NDIVRATIO_S 12
#define NAC_CGU_DWORD8_CGUPLL_IREF_NDIVRATIO_M ICE_M(0x7, 12)
#define NAC_CGU_DWORD8_MISC28_S 15
#define NAC_CGU_DWORD8_MISC28_M BIT(15)
#define NAC_CGU_DWORD8_HLPPLL_NDIVRATIO_S 16
#define NAC_CGU_DWORD8_HLPPLL_NDIVRATIO_M ICE_M(0xf, 16)
#define NAC_CGU_DWORD8_HLPPLL_IREF_NDIVRATIO_S 20
#define NAC_CGU_DWORD8_HLPPLL_IREF_NDIVRATIO_M ICE_M(0x7, 20)
#define NAC_CGU_DWORD8_MISC29_S 23
#define NAC_CGU_DWORD8_MISC29_M BIT(23)
#define NAC_CGU_DWORD8_CLK_EREF1_EN_SELFBIAS_S 24
#define NAC_CGU_DWORD8_CLK_EREF1_EN_SELFBIAS_M BIT(24)
#define NAC_CGU_DWORD8_CLK_EREF0_EN_SELFBIAS_S 25
#define NAC_CGU_DWORD8_CLK_EREF0_EN_SELFBIAS_M BIT(25)
#define NAC_CGU_DWORD8_TIME_REF_EN_SELFBIAS_S 26
#define NAC_CGU_DWORD8_TIME_REF_EN_SELFBIAS_M BIT(26)
#define NAC_CGU_DWORD8_TIME_SYNC_EN_SELFBIAS_S 27
#define NAC_CGU_DWORD8_TIME_SYNC_EN_SELFBIAS_M BIT(27)
#define NAC_CGU_DWORD8_CLK_REF_SYNC_E_EN_SELFBIAS_S 28
#define NAC_CGU_DWORD8_CLK_REF_SYNC_E_EN_SELFBIAS_M BIT(28)
#define NAC_CGU_DWORD8_NET_CLK_REF1_EN_SELFBIAS_S 29
#define NAC_CGU_DWORD8_NET_CLK_REF1_EN_SELFBIAS_M BIT(29)
#define NAC_CGU_DWORD8_NET_CLK_REF0_EN_SELFBIAS_S 30
#define NAC_CGU_DWORD8_NET_CLK_REF0_EN_SELFBIAS_M BIT(30)
#define NAC_CGU_DWORD8_TCXO_SEL_S 31
#define NAC_CGU_DWORD8_TCXO_SEL_M BIT(31)
union nac_cgu_dword8 {
struct {
u32 tcxo_freq_sel : 1;
u32 misc8 : 3;
u32 hlp_switch_freq_sel : 4;
u32 cgupll_ndivratio : 4;
u32 cgupll_iref_ndivratio : 3;
u32 misc28 : 1;
u32 hlppll_ndivratio : 4;
u32 hlppll_iref_ndivratio : 3;
u32 misc29 : 1;
u32 clk_eref1_en_selfbias : 1;
u32 clk_eref0_en_selfbias : 1;
u32 time_ref_en_selfbias : 1;
u32 time_sync_en_selfbias : 1;
u32 clk_ref_sync_e_en_selfbias : 1;
u32 net_clk_ref1_en_selfbias : 1;
u32 net_clk_ref0_en_selfbias : 1;
u32 tcxo_sel : 1;
} field;
u32 val;
};
#define NAC_CGU_DWORD9 0x24 #define NAC_CGU_DWORD9 0x24
#define NAC_CGU_DWORD9_TIME_REF_FREQ_SEL_S 0
#define NAC_CGU_DWORD9_TIME_REF_FREQ_SEL_M ICE_M(0x7, 0)
#define NAC_CGU_DWORD9_CLK_EREF1_EN_S 3
#define NAC_CGU_DWORD9_CLK_EREF1_EN_M BIT(3)
#define NAC_CGU_DWORD9_CLK_EREF0_EN_S 4
#define NAC_CGU_DWORD9_CLK_EREF0_EN_M BIT(4)
#define NAC_CGU_DWORD9_TIME_REF_EN_S 5
#define NAC_CGU_DWORD9_TIME_REF_EN_M BIT(5)
#define NAC_CGU_DWORD9_TIME_SYNC_EN_S 6
#define NAC_CGU_DWORD9_TIME_SYNC_EN_M BIT(6)
#define NAC_CGU_DWORD9_ONE_PPS_OUT_EN_S 7
#define NAC_CGU_DWORD9_ONE_PPS_OUT_EN_M BIT(7)
#define NAC_CGU_DWORD9_CLK_REF_SYNCE_EN_S 8
#define NAC_CGU_DWORD9_CLK_REF_SYNCE_EN_M BIT(8)
#define NAC_CGU_DWORD9_CLK_SYNCE1_EN_S 9
#define NAC_CGU_DWORD9_CLK_SYNCE1_EN_M BIT(9)
#define NAC_CGU_DWORD9_CLK_SYNCE0_EN_S 10
#define NAC_CGU_DWORD9_CLK_SYNCE0_EN_M BIT(10)
#define NAC_CGU_DWORD9_NET_CLK_REF1_EN_S 11
#define NAC_CGU_DWORD9_NET_CLK_REF1_EN_M BIT(11)
#define NAC_CGU_DWORD9_NET_CLK_REF0_EN_S 12
#define NAC_CGU_DWORD9_NET_CLK_REF0_EN_M BIT(12)
#define NAC_CGU_DWORD9_CLK_SYNCE1_AMP_S 13
#define NAC_CGU_DWORD9_CLK_SYNCE1_AMP_M ICE_M(0x3, 13)
#define NAC_CGU_DWORD9_MISC6_S 15
#define NAC_CGU_DWORD9_MISC6_M BIT(15)
#define NAC_CGU_DWORD9_CLK_SYNCE0_AMP_S 16
#define NAC_CGU_DWORD9_CLK_SYNCE0_AMP_M ICE_M(0x3, 16)
#define NAC_CGU_DWORD9_ONE_PPS_OUT_AMP_S 18
#define NAC_CGU_DWORD9_ONE_PPS_OUT_AMP_M ICE_M(0x3, 18)
#define NAC_CGU_DWORD9_MISC24_S 20
#define NAC_CGU_DWORD9_MISC24_M ICE_M(0xfff, 20)
union nac_cgu_dword9 { union nac_cgu_dword9 {
struct { struct {
u32 time_ref_freq_sel : 3; u32 time_ref_freq_sel : 3;
@ -121,311 +27,7 @@ union nac_cgu_dword9 {
u32 val; u32 val;
}; };
#define NAC_CGU_DWORD10 0x28
#define NAC_CGU_DWORD10_JA_PLL_ENABLE_S 0
#define NAC_CGU_DWORD10_JA_PLL_ENABLE_M BIT(0)
#define NAC_CGU_DWORD10_MISC11_S 1
#define NAC_CGU_DWORD10_MISC11_M BIT(1)
#define NAC_CGU_DWORD10_FDPLL_ENABLE_S 2
#define NAC_CGU_DWORD10_FDPLL_ENABLE_M BIT(2)
#define NAC_CGU_DWORD10_FDPLL_SLOW_S 3
#define NAC_CGU_DWORD10_FDPLL_SLOW_M BIT(3)
#define NAC_CGU_DWORD10_FDPLL_LOCK_INT_ENB_S 4
#define NAC_CGU_DWORD10_FDPLL_LOCK_INT_ENB_M BIT(4)
#define NAC_CGU_DWORD10_SYNCE_CLKO_SEL_S 5
#define NAC_CGU_DWORD10_SYNCE_CLKO_SEL_M ICE_M(0xf, 5)
#define NAC_CGU_DWORD10_SYNCE_CLKODIV_M1_S 9
#define NAC_CGU_DWORD10_SYNCE_CLKODIV_M1_M ICE_M(0x1f, 9)
#define NAC_CGU_DWORD10_SYNCE_CLKODIV_LOAD_S 14
#define NAC_CGU_DWORD10_SYNCE_CLKODIV_LOAD_M BIT(14)
#define NAC_CGU_DWORD10_SYNCE_DCK_RST_S 15
#define NAC_CGU_DWORD10_SYNCE_DCK_RST_M BIT(15)
#define NAC_CGU_DWORD10_SYNCE_ETHCLKO_SEL_S 16
#define NAC_CGU_DWORD10_SYNCE_ETHCLKO_SEL_M ICE_M(0x7, 16)
#define NAC_CGU_DWORD10_SYNCE_ETHDIV_M1_S 19
#define NAC_CGU_DWORD10_SYNCE_ETHDIV_M1_M ICE_M(0x1f, 19)
#define NAC_CGU_DWORD10_SYNCE_ETHDIV_LOAD_S 24
#define NAC_CGU_DWORD10_SYNCE_ETHDIV_LOAD_M BIT(24)
#define NAC_CGU_DWORD10_SYNCE_DCK2_RST_S 25
#define NAC_CGU_DWORD10_SYNCE_DCK2_RST_M BIT(25)
#define NAC_CGU_DWORD10_SYNCE_SEL_GND_S 26
#define NAC_CGU_DWORD10_SYNCE_SEL_GND_M BIT(26)
#define NAC_CGU_DWORD10_SYNCE_S_REF_CLK_S 27
#define NAC_CGU_DWORD10_SYNCE_S_REF_CLK_M ICE_M(0x1f, 27)
union nac_cgu_dword10 {
struct {
u32 ja_pll_enable : 1;
u32 misc11 : 1;
u32 fdpll_enable : 1;
u32 fdpll_slow : 1;
u32 fdpll_lock_int_enb : 1;
u32 synce_clko_sel : 4;
u32 synce_clkodiv_m1 : 5;
u32 synce_clkodiv_load : 1;
u32 synce_dck_rst : 1;
u32 synce_ethclko_sel : 3;
u32 synce_ethdiv_m1 : 5;
u32 synce_ethdiv_load : 1;
u32 synce_dck2_rst : 1;
u32 synce_sel_gnd : 1;
u32 synce_s_ref_clk : 5;
} field;
u32 val;
};
#define NAC_CGU_DWORD11 0x2c
#define NAC_CGU_DWORD11_MISC25_S 0
#define NAC_CGU_DWORD11_MISC25_M BIT(0)
#define NAC_CGU_DWORD11_SYNCE_S_BYP_CLK_S 1
#define NAC_CGU_DWORD11_SYNCE_S_BYP_CLK_M ICE_M(0x3f, 1)
#define NAC_CGU_DWORD11_SYNCE_HDOV_MODE_S 7
#define NAC_CGU_DWORD11_SYNCE_HDOV_MODE_M BIT(7)
#define NAC_CGU_DWORD11_SYNCE_RAT_SEL_S 8
#define NAC_CGU_DWORD11_SYNCE_RAT_SEL_M ICE_M(0x3, 8)
#define NAC_CGU_DWORD11_SYNCE_LINK_ENABLE_S 10
#define NAC_CGU_DWORD11_SYNCE_LINK_ENABLE_M ICE_M(0xfffff, 10)
#define NAC_CGU_DWORD11_SYNCE_MISCLK_EN_S 30
#define NAC_CGU_DWORD11_SYNCE_MISCLK_EN_M BIT(30)
#define NAC_CGU_DWORD11_SYNCE_MISCLK_RAT_M1_S 31
#define NAC_CGU_DWORD11_SYNCE_MISCLK_RAT_M1_M BIT(31)
union nac_cgu_dword11 {
struct {
u32 misc25 : 1;
u32 synce_s_byp_clk : 6;
u32 synce_hdov_mode : 1;
u32 synce_rat_sel : 2;
u32 synce_link_enable : 20;
u32 synce_misclk_en : 1;
u32 synce_misclk_rat_m1 : 1;
} field;
u32 val;
};
#define NAC_CGU_DWORD12 0x30
#define NAC_CGU_DWORD12_SYNCE_MISCLK_RAT_M1_S 0
#define NAC_CGU_DWORD12_SYNCE_MISCLK_RAT_M1_M ICE_M(0x3ff, 0)
#define NAC_CGU_DWORD12_SYNCE_MCK_RST_S 10
#define NAC_CGU_DWORD12_SYNCE_MCK_RST_M BIT(10)
#define NAC_CGU_DWORD12_SYNCE_DPLL_BYP_S 11
#define NAC_CGU_DWORD12_SYNCE_DPLL_BYP_M BIT(11)
#define NAC_CGU_DWORD12_SYNCE_DV_RAT_M1_S 12
#define NAC_CGU_DWORD12_SYNCE_DV_RAT_M1_M ICE_M(0x1fff, 12)
#define NAC_CGU_DWORD12_SYNCE_ML_RAT_M1_S 25
#define NAC_CGU_DWORD12_SYNCE_ML_RAT_M1_M ICE_M(0x7f, 25)
union nac_cgu_dword12 {
struct {
u32 synce_misclk_rat_m1 : 10;
u32 synce_mck_rst : 1;
u32 synce_dpll_byp : 1;
u32 synce_dv_rat_m1 : 13;
u32 synce_ml_rat_m1 : 7;
} field;
u32 val;
};
#define NAC_CGU_DWORD13 0x34
#define NAC_CGU_DWORD13_SYNCE_ML_RAT_M1_S 0
#define NAC_CGU_DWORD13_SYNCE_ML_RAT_M1_M ICE_M(0x1f, 0)
#define NAC_CGU_DWORD13_SYNCE_HDOV_CHANGED_S 5
#define NAC_CGU_DWORD13_SYNCE_HDOV_CHANGED_M BIT(5)
#define NAC_CGU_DWORD13_SYNCE_LOCK_CHANGED_S 6
#define NAC_CGU_DWORD13_SYNCE_LOCK_CHANGED_M BIT(6)
#define NAC_CGU_DWORD13_SYNCE_HDOV_S 7
#define NAC_CGU_DWORD13_SYNCE_HDOV_M BIT(7)
#define NAC_CGU_DWORD13_SYNCE_HDOV_INT_ENB_S 8
#define NAC_CGU_DWORD13_SYNCE_HDOV_INT_ENB_M BIT(8)
#define NAC_CGU_DWORD13_SYNCE_LOCK_INT_ENB_S 9
#define NAC_CGU_DWORD13_SYNCE_LOCK_INT_ENB_M BIT(9)
#define NAC_CGU_DWORD13_SYNCE_LOCKED_NC_S 10
#define NAC_CGU_DWORD13_SYNCE_LOCKED_NC_M BIT(10)
#define NAC_CGU_DWORD13_FDPLL_LOCKED_NC_S 11
#define NAC_CGU_DWORD13_FDPLL_LOCKED_NC_M BIT(11)
#define NAC_CGU_DWORD13_SYNCE_LOCKED_CLEAR_S 12
#define NAC_CGU_DWORD13_SYNCE_LOCKED_CLEAR_M BIT(12)
#define NAC_CGU_DWORD13_SYNCE_HDOV_CLEAR_S 13
#define NAC_CGU_DWORD13_SYNCE_HDOV_CLEAR_M BIT(13)
#define NAC_CGU_DWORD13_FDPLL_LOCKED_CLEAR_S 14
#define NAC_CGU_DWORD13_FDPLL_LOCKED_CLEAR_M BIT(14)
#define NAC_CGU_DWORD13_FDPLL_LOCK_CHANGED_S 15
#define NAC_CGU_DWORD13_FDPLL_LOCK_CHANGED_M BIT(15)
#define NAC_CGU_DWORD13_RMNRXCLK_SEL_S 16
#define NAC_CGU_DWORD13_RMNRXCLK_SEL_M ICE_M(0x1f, 16)
#define NAC_CGU_DWORD13_ENABLE_ETH_COUNT_S 21
#define NAC_CGU_DWORD13_ENABLE_ETH_COUNT_M BIT(21)
#define NAC_CGU_DWORD13_ETH_COUNT_FAST_MODE_S 22
#define NAC_CGU_DWORD13_ETH_COUNT_FAST_MODE_M BIT(22)
#define NAC_CGU_DWORD13_MISC12_S 23
#define NAC_CGU_DWORD13_MISC12_M ICE_M(0x1ff, 23)
union nac_cgu_dword13 {
struct {
u32 synce_ml_rat_m1 : 5;
u32 synce_hdov_changed : 1;
u32 synce_lock_changed : 1;
u32 synce_hdov : 1;
u32 synce_hdov_int_enb : 1;
u32 synce_lock_int_enb : 1;
u32 synce_locked_nc : 1;
u32 fdpll_locked_nc : 1;
u32 synce_locked_clear : 1;
u32 synce_hdov_clear : 1;
u32 fdpll_locked_clear : 1;
u32 fdpll_lock_changed : 1;
u32 rmnrxclk_sel : 5;
u32 enable_eth_count : 1;
u32 eth_count_fast_mode : 1;
u32 misc12 : 9;
} field;
u32 val;
};
#define NAC_CGU_DWORD14 0x38
#define NAC_CGU_DWORD14_SYNCE_LNK_UP_MD_S 0
#define NAC_CGU_DWORD14_SYNCE_LNK_UP_MD_M BIT(0)
#define NAC_CGU_DWORD14_SYNCE_LNK_DN_MD_S 1
#define NAC_CGU_DWORD14_SYNCE_LNK_DN_MD_M BIT(1)
#define NAC_CGU_DWORD14_SYNCE_FAST_MODE_S 2
#define NAC_CGU_DWORD14_SYNCE_FAST_MODE_M BIT(2)
#define NAC_CGU_DWORD14_SYNCE_EEC_MODE_S 3
#define NAC_CGU_DWORD14_SYNCE_EEC_MODE_M BIT(3)
#define NAC_CGU_DWORD14_SYNCE_NGAIN_S 4
#define NAC_CGU_DWORD14_SYNCE_NGAIN_M ICE_M(0xff, 4)
#define NAC_CGU_DWORD14_SYNCE_NSCALE_S 12
#define NAC_CGU_DWORD14_SYNCE_NSCALE_M ICE_M(0x3f, 12)
#define NAC_CGU_DWORD14_SYNCE_UNLCK_THR_S 18
#define NAC_CGU_DWORD14_SYNCE_UNLCK_THR_M ICE_M(0x3fff, 18)
union nac_cgu_dword14 {
struct {
u32 synce_lnk_up_md : 1;
u32 synce_lnk_dn_md : 1;
u32 synce_fast_mode : 1;
u32 synce_eec_mode : 1;
u32 synce_ngain : 8;
u32 synce_nscale : 6;
u32 synce_unlck_thr : 14;
} field;
u32 val;
};
#define NAC_CGU_DWORD15 0x3c
#define NAC_CGU_DWORD15_SYNCE_UNLCK_THR_S 0
#define NAC_CGU_DWORD15_SYNCE_UNLCK_THR_M ICE_M(0x7, 0)
#define NAC_CGU_DWORD15_SYNCE_LOCK_THR_S 3
#define NAC_CGU_DWORD15_SYNCE_LOCK_THR_M ICE_M(0x1ffff, 3)
#define NAC_CGU_DWORD15_SYNCE_QUO_M1_S 20
#define NAC_CGU_DWORD15_SYNCE_QUO_M1_M ICE_M(0x3f, 20)
#define NAC_CGU_DWORD15_SYNCE_REMNDR_S 26
#define NAC_CGU_DWORD15_SYNCE_REMNDR_M ICE_M(0x3f, 26)
union nac_cgu_dword15 {
struct {
u32 synce_unlck_thr : 3;
u32 synce_lock_thr : 17;
u32 synce_quo_m1 : 6;
u32 synce_remndr : 6;
} field;
u32 val;
};
#define NAC_CGU_DWORD16 0x40
#define NAC_CGU_DWORD16_SYNCE_REMNDR_S 0
#define NAC_CGU_DWORD16_SYNCE_REMNDR_M ICE_M(0x3f, 0)
#define NAC_CGU_DWORD16_SYNCE_PHLMT_EN_S 6
#define NAC_CGU_DWORD16_SYNCE_PHLMT_EN_M BIT(6)
#define NAC_CGU_DWORD16_MISC13_S 7
#define NAC_CGU_DWORD16_MISC13_M ICE_M(0x1ffffff, 7)
union nac_cgu_dword16 {
struct {
u32 synce_remndr : 6;
u32 synce_phlmt_en : 1;
u32 misc13 : 25;
} field;
u32 val;
};
#define NAC_CGU_DWORD17 0x44
#define NAC_CGU_DWORD17_FDPLL_GAIN_S 0
#define NAC_CGU_DWORD17_FDPLL_GAIN_M ICE_M(0xf, 0)
#define NAC_CGU_DWORD17_FDPLL_SCALE_S 4
#define NAC_CGU_DWORD17_FDPLL_SCALE_M ICE_M(0xf, 4)
#define NAC_CGU_DWORD17_FDPLL_FGAIN_SHIFT_F_S 8
#define NAC_CGU_DWORD17_FDPLL_FGAIN_SHIFT_F_M ICE_M(0x3f, 8)
#define NAC_CGU_DWORD17_FDPLL_CLR_PHERR_S 14
#define NAC_CGU_DWORD17_FDPLL_CLR_PHERR_M BIT(14)
#define NAC_CGU_DWORD17_FDPLL_BB_EN_S 15
#define NAC_CGU_DWORD17_FDPLL_BB_EN_M BIT(15)
#define NAC_CGU_DWORD17_FDPLL_FGAIN_SHIFT_S 16
#define NAC_CGU_DWORD17_FDPLL_FGAIN_SHIFT_M ICE_M(0x3f, 16)
#define NAC_CGU_DWORD17_FDPLL_FSCALE_SHIFT_S 22
#define NAC_CGU_DWORD17_FDPLL_FSCALE_SHIFT_M ICE_M(0x1f, 22)
#define NAC_CGU_DWORD17_FDPLL_FSCALE_SHIFT_F_S 27
#define NAC_CGU_DWORD17_FDPLL_FSCALE_SHIFT_F_M ICE_M(0x1f, 27)
union nac_cgu_dword17 {
struct {
u32 fdpll_gain : 4;
u32 fdpll_scale : 4;
u32 fdpll_fgain_shift_f : 6;
u32 fdpll_clr_pherr : 1;
u32 fdpll_bb_en : 1;
u32 fdpll_fgain_shift : 6;
u32 fdpll_fscale_shift : 5;
u32 fdpll_fscale_shift_f : 5;
} field;
u32 val;
};
#define NAC_CGU_DWORD18 0x48
#define NAC_CGU_DWORD18_FDPLL_BYPASS_S 0
#define NAC_CGU_DWORD18_FDPLL_BYPASS_M BIT(0)
#define NAC_CGU_DWORD18_FDPLL_INP_NCO_S 1
#define NAC_CGU_DWORD18_FDPLL_INP_NCO_M ICE_M(0xff, 1)
#define NAC_CGU_DWORD18_FDPLL_AUTO_EN_S 9
#define NAC_CGU_DWORD18_FDPLL_AUTO_EN_M BIT(9)
#define NAC_CGU_DWORD18_FDPLL_SAMP_CNT_S 10
#define NAC_CGU_DWORD18_FDPLL_SAMP_CNT_M ICE_M(0xfff, 10)
#define NAC_CGU_DWORD18_FDPLL_LOCKCNT_S 22
#define NAC_CGU_DWORD18_FDPLL_LOCKCNT_M ICE_M(0x1f, 22)
#define NAC_CGU_DWORD18_FDPLL_LOCK_THR_S 27
#define NAC_CGU_DWORD18_FDPLL_LOCK_THR_M ICE_M(0x1f, 27)
union nac_cgu_dword18 {
struct {
u32 fdpll_bypass : 1;
u32 fdpll_inp_nco : 8;
u32 fdpll_auto_en : 1;
u32 fdpll_samp_cnt : 12;
u32 fdpll_lockcnt : 5;
u32 fdpll_lock_thr : 5;
} field;
u32 val;
};
#define NAC_CGU_DWORD19 0x4c #define NAC_CGU_DWORD19 0x4c
#define NAC_CGU_DWORD19_TSPLL_FBDIV_INTGR_S 0
#define NAC_CGU_DWORD19_TSPLL_FBDIV_INTGR_M ICE_M(0xff, 0)
#define NAC_CGU_DWORD19_FDPLL_ULCK_THR_S 8
#define NAC_CGU_DWORD19_FDPLL_ULCK_THR_M ICE_M(0x1f, 8)
#define NAC_CGU_DWORD19_MISC15_S 13
#define NAC_CGU_DWORD19_MISC15_M ICE_M(0x7, 13)
#define NAC_CGU_DWORD19_TSPLL_NDIVRATIO_S 16
#define NAC_CGU_DWORD19_TSPLL_NDIVRATIO_M ICE_M(0xf, 16)
#define NAC_CGU_DWORD19_TSPLL_IREF_NDIVRATIO_S 20
#define NAC_CGU_DWORD19_TSPLL_IREF_NDIVRATIO_M ICE_M(0x7, 20)
#define NAC_CGU_DWORD19_MISC19_S 23
#define NAC_CGU_DWORD19_MISC19_M BIT(23)
#define NAC_CGU_DWORD19_JAPLL_NDIVRATIO_S 24
#define NAC_CGU_DWORD19_JAPLL_NDIVRATIO_M ICE_M(0xf, 24)
#define NAC_CGU_DWORD19_JAPLL_IREF_NDIVRATIO_S 28
#define NAC_CGU_DWORD19_JAPLL_IREF_NDIVRATIO_M ICE_M(0x7, 28)
#define NAC_CGU_DWORD19_MISC27_S 31
#define NAC_CGU_DWORD19_MISC27_M BIT(31)
union nac_cgu_dword19 { union nac_cgu_dword19 {
struct { struct {
u32 tspll_fbdiv_intgr : 8; u32 tspll_fbdiv_intgr : 8;
@ -441,76 +43,7 @@ union nac_cgu_dword19 {
u32 val; u32 val;
}; };
#define NAC_CGU_DWORD20 0x50
#define NAC_CGU_DWORD20_JAPLL_INT_DIV_S 0
#define NAC_CGU_DWORD20_JAPLL_INT_DIV_M ICE_M(0xff, 0)
#define NAC_CGU_DWORD20_JAPLL_FRAC_DIV_S 8
#define NAC_CGU_DWORD20_JAPLL_FRAC_DIV_M ICE_M(0x3fffff, 8)
#define NAC_CGU_DWORD20_MISC16_S 30
#define NAC_CGU_DWORD20_MISC16_M ICE_M(0x3, 30)
union nac_cgu_dword20 {
struct {
u32 japll_int_div : 8;
u32 japll_frac_div : 22;
u32 misc16 : 2;
} field;
u32 val;
};
#define NAC_CGU_DWORD21 0x54
#define NAC_CGU_DWORD21_MISC17_S 0
#define NAC_CGU_DWORD21_MISC17_M ICE_M(0xf, 0)
#define NAC_CGU_DWORD21_FDPLL_INT_DIV_OUT_NC_S 4
#define NAC_CGU_DWORD21_FDPLL_INT_DIV_OUT_NC_M ICE_M(0xff, 4)
#define NAC_CGU_DWORD21_FDPLL_FRAC_DIV_OUT_NC_S 12
#define NAC_CGU_DWORD21_FDPLL_FRAC_DIV_OUT_NC_M ICE_M(0xfffff, 12)
union nac_cgu_dword21 {
struct {
u32 misc17 : 4;
u32 fdpll_int_div_out_nc : 8;
u32 fdpll_frac_div_out_nc : 20;
} field;
u32 val;
};
#define NAC_CGU_DWORD22 0x58 #define NAC_CGU_DWORD22 0x58
#define NAC_CGU_DWORD22_FDPLL_FRAC_DIV_OUT_NC_S 0
#define NAC_CGU_DWORD22_FDPLL_FRAC_DIV_OUT_NC_M ICE_M(0x3, 0)
#define NAC_CGU_DWORD22_FDPLL_LOCK_INT_FOR_S 2
#define NAC_CGU_DWORD22_FDPLL_LOCK_INT_FOR_M BIT(2)
#define NAC_CGU_DWORD22_SYNCE_HDOV_INT_FOR_S 3
#define NAC_CGU_DWORD22_SYNCE_HDOV_INT_FOR_M BIT(3)
#define NAC_CGU_DWORD22_SYNCE_LOCK_INT_FOR_S 4
#define NAC_CGU_DWORD22_SYNCE_LOCK_INT_FOR_M BIT(4)
#define NAC_CGU_DWORD22_FDPLL_PHLEAD_SLIP_NC_S 5
#define NAC_CGU_DWORD22_FDPLL_PHLEAD_SLIP_NC_M BIT(5)
#define NAC_CGU_DWORD22_FDPLL_ACC1_OVFL_NC_S 6
#define NAC_CGU_DWORD22_FDPLL_ACC1_OVFL_NC_M BIT(6)
#define NAC_CGU_DWORD22_FDPLL_ACC2_OVFL_NC_S 7
#define NAC_CGU_DWORD22_FDPLL_ACC2_OVFL_NC_M BIT(7)
#define NAC_CGU_DWORD22_SYNCE_STATUS_NC_S 8
#define NAC_CGU_DWORD22_SYNCE_STATUS_NC_M ICE_M(0x3f, 8)
#define NAC_CGU_DWORD22_FDPLL_ACC1F_OVFL_S 14
#define NAC_CGU_DWORD22_FDPLL_ACC1F_OVFL_M BIT(14)
#define NAC_CGU_DWORD22_MISC18_S 15
#define NAC_CGU_DWORD22_MISC18_M BIT(15)
#define NAC_CGU_DWORD22_FDPLLCLK_DIV_S 16
#define NAC_CGU_DWORD22_FDPLLCLK_DIV_M ICE_M(0xf, 16)
#define NAC_CGU_DWORD22_TIME1588CLK_DIV_S 20
#define NAC_CGU_DWORD22_TIME1588CLK_DIV_M ICE_M(0xf, 20)
#define NAC_CGU_DWORD22_SYNCECLK_DIV_S 24
#define NAC_CGU_DWORD22_SYNCECLK_DIV_M ICE_M(0xf, 24)
#define NAC_CGU_DWORD22_SYNCECLK_SEL_DIV2_S 28
#define NAC_CGU_DWORD22_SYNCECLK_SEL_DIV2_M BIT(28)
#define NAC_CGU_DWORD22_FDPLLCLK_SEL_DIV2_S 29
#define NAC_CGU_DWORD22_FDPLLCLK_SEL_DIV2_M BIT(29)
#define NAC_CGU_DWORD22_TIME1588CLK_SEL_DIV2_S 30
#define NAC_CGU_DWORD22_TIME1588CLK_SEL_DIV2_M BIT(30)
#define NAC_CGU_DWORD22_MISC3_S 31
#define NAC_CGU_DWORD22_MISC3_M BIT(31)
union nac_cgu_dword22 { union nac_cgu_dword22 {
struct { struct {
u32 fdpll_frac_div_out_nc : 2; u32 fdpll_frac_div_out_nc : 2;
@ -535,21 +68,6 @@ union nac_cgu_dword22 {
}; };
#define NAC_CGU_DWORD24 0x60 #define NAC_CGU_DWORD24 0x60
#define NAC_CGU_DWORD24_TSPLL_FBDIV_FRAC_S 0
#define NAC_CGU_DWORD24_TSPLL_FBDIV_FRAC_M ICE_M(0x3fffff, 0)
#define NAC_CGU_DWORD24_MISC20_S 22
#define NAC_CGU_DWORD24_MISC20_M ICE_M(0x3, 22)
#define NAC_CGU_DWORD24_TS_PLL_ENABLE_S 24
#define NAC_CGU_DWORD24_TS_PLL_ENABLE_M BIT(24)
#define NAC_CGU_DWORD24_TIME_SYNC_TSPLL_ALIGN_SEL_S 25
#define NAC_CGU_DWORD24_TIME_SYNC_TSPLL_ALIGN_SEL_M BIT(25)
#define NAC_CGU_DWORD24_EXT_SYNCE_SEL_S 26
#define NAC_CGU_DWORD24_EXT_SYNCE_SEL_M BIT(26)
#define NAC_CGU_DWORD24_REF1588_CK_DIV_S 27
#define NAC_CGU_DWORD24_REF1588_CK_DIV_M ICE_M(0xf, 27)
#define NAC_CGU_DWORD24_TIME_REF_SEL_S 31
#define NAC_CGU_DWORD24_TIME_REF_SEL_M BIT(31)
union nac_cgu_dword24 { union nac_cgu_dword24 {
struct { struct {
u32 tspll_fbdiv_frac : 22; u32 tspll_fbdiv_frac : 22;
@ -564,25 +82,6 @@ union nac_cgu_dword24 {
}; };
#define TSPLL_CNTR_BIST_SETTINGS 0x344 #define TSPLL_CNTR_BIST_SETTINGS 0x344
#define TSPLL_CNTR_BIST_SETTINGS_I_IREFGEN_SETTLING_TIME_CNTR_7_0_S 0
#define TSPLL_CNTR_BIST_SETTINGS_I_IREFGEN_SETTLING_TIME_CNTR_7_0_M \
ICE_M(0xff, 0)
#define TSPLL_CNTR_BIST_SETTINGS_I_IREFGEN_SETTLING_TIME_RO_STANDBY_1_0_S 8
#define TSPLL_CNTR_BIST_SETTINGS_I_IREFGEN_SETTLING_TIME_RO_STANDBY_1_0_M \
ICE_M(0x3, 8)
#define TSPLL_CNTR_BIST_SETTINGS_RESERVED195_S 10
#define TSPLL_CNTR_BIST_SETTINGS_RESERVED195_M ICE_M(0x1f, 10)
#define TSPLL_CNTR_BIST_SETTINGS_I_PLLLOCK_SEL_0_S 15
#define TSPLL_CNTR_BIST_SETTINGS_I_PLLLOCK_SEL_0_M BIT(15)
#define TSPLL_CNTR_BIST_SETTINGS_I_PLLLOCK_SEL_1_S 16
#define TSPLL_CNTR_BIST_SETTINGS_I_PLLLOCK_SEL_1_M BIT(16)
#define TSPLL_CNTR_BIST_SETTINGS_I_PLLLOCK_CNT_6_0_S 17
#define TSPLL_CNTR_BIST_SETTINGS_I_PLLLOCK_CNT_6_0_M ICE_M(0x7f, 17)
#define TSPLL_CNTR_BIST_SETTINGS_I_PLLLOCK_CNT_10_7_S 24
#define TSPLL_CNTR_BIST_SETTINGS_I_PLLLOCK_CNT_10_7_M ICE_M(0xf, 24)
#define TSPLL_CNTR_BIST_SETTINGS_RESERVED200_S 28
#define TSPLL_CNTR_BIST_SETTINGS_RESERVED200_M ICE_M(0xf, 28)
union tspll_cntr_bist_settings { union tspll_cntr_bist_settings {
struct { struct {
u32 i_irefgen_settling_time_cntr_7_0 : 8; u32 i_irefgen_settling_time_cntr_7_0 : 8;
@ -598,27 +97,6 @@ union tspll_cntr_bist_settings {
}; };
#define TSPLL_RO_BWM_LF 0x370 #define TSPLL_RO_BWM_LF 0x370
#define TSPLL_RO_BWM_LF_BW_FREQOV_HIGH_CRI_7_0_S 0
#define TSPLL_RO_BWM_LF_BW_FREQOV_HIGH_CRI_7_0_M ICE_M(0xff, 0)
#define TSPLL_RO_BWM_LF_BW_FREQOV_HIGH_CRI_9_8_S 8
#define TSPLL_RO_BWM_LF_BW_FREQOV_HIGH_CRI_9_8_M ICE_M(0x3, 8)
#define TSPLL_RO_BWM_LF_BIASCALDONE_CRI_S 10
#define TSPLL_RO_BWM_LF_BIASCALDONE_CRI_M BIT(10)
#define TSPLL_RO_BWM_LF_PLLLOCK_GAIN_TRAN_CRI_S 11
#define TSPLL_RO_BWM_LF_PLLLOCK_GAIN_TRAN_CRI_M BIT(11)
#define TSPLL_RO_BWM_LF_PLLLOCK_TRUE_LOCK_CRI_S 12
#define TSPLL_RO_BWM_LF_PLLLOCK_TRUE_LOCK_CRI_M BIT(12)
#define TSPLL_RO_BWM_LF_PLLUNLOCK_FLAG_CRI_S 13
#define TSPLL_RO_BWM_LF_PLLUNLOCK_FLAG_CRI_M BIT(13)
#define TSPLL_RO_BWM_LF_AFCERR_CRI_S 14
#define TSPLL_RO_BWM_LF_AFCERR_CRI_M BIT(14)
#define TSPLL_RO_BWM_LF_AFCDONE_CRI_S 15
#define TSPLL_RO_BWM_LF_AFCDONE_CRI_M BIT(15)
#define TSPLL_RO_BWM_LF_FEEDFWRDGAIN_CAL_CRI_7_0_S 16
#define TSPLL_RO_BWM_LF_FEEDFWRDGAIN_CAL_CRI_7_0_M ICE_M(0xff, 16)
#define TSPLL_RO_BWM_LF_M2FBDIVMOD_CRI_7_0_S 24
#define TSPLL_RO_BWM_LF_M2FBDIVMOD_CRI_7_0_M ICE_M(0xff, 24)
union tspll_ro_bwm_lf { union tspll_ro_bwm_lf {
struct { struct {
u32 bw_freqov_high_cri_7_0 : 8; u32 bw_freqov_high_cri_7_0 : 8;
@ -635,307 +113,4 @@ union tspll_ro_bwm_lf {
u32 val; u32 val;
}; };
#define JAPLL_DIV0 0x400
#define JAPLL_DIV0_I_FBDIV_INTGR_7_0_S 0
#define JAPLL_DIV0_I_FBDIV_INTGR_7_0_M ICE_M(0xff, 0)
#define JAPLL_DIV0_I_FBDIV_FRAC_7_0_S 8
#define JAPLL_DIV0_I_FBDIV_FRAC_7_0_M ICE_M(0xff, 8)
#define JAPLL_DIV0_I_FBDIV_FRAC_15_8_S 16
#define JAPLL_DIV0_I_FBDIV_FRAC_15_8_M ICE_M(0xff, 16)
#define JAPLL_DIV0_I_FBDIV_FRAC_21_16_S 24
#define JAPLL_DIV0_I_FBDIV_FRAC_21_16_M ICE_M(0x3f, 24)
#define JAPLL_DIV0_I_FRACNEN_H_S 30
#define JAPLL_DIV0_I_FRACNEN_H_M BIT(30)
#define JAPLL_DIV0_I_DIRECT_PIN_IF_EN_S 31
#define JAPLL_DIV0_I_DIRECT_PIN_IF_EN_M BIT(31)
union japll_div0 {
struct {
u32 i_fbdiv_intgr_7_0 : 8;
u32 i_fbdiv_frac_7_0 : 8;
u32 i_fbdiv_frac_15_8 : 8;
u32 i_fbdiv_frac_21_16 : 6;
u32 i_fracnen_h : 1;
u32 i_direct_pin_if_en : 1;
} field;
u32 val;
};
#define JAPLL_LF 0x408
#define JAPLL_LF_I_PROP_COEFF_3_0_S 0
#define JAPLL_LF_I_PROP_COEFF_3_0_M ICE_M(0xf, 0)
#define JAPLL_LF_I_FLL_INT_COEFF_3_0_S 4
#define JAPLL_LF_I_FLL_INT_COEFF_3_0_M ICE_M(0xf, 4)
#define JAPLL_LF_I_INT_COEFF_4_0_S 8
#define JAPLL_LF_I_INT_COEFF_4_0_M ICE_M(0x1f, 8)
#define JAPLL_LF_I_FLL_EN_H_S 13
#define JAPLL_LF_I_FLL_EN_H_M BIT(13)
#define JAPLL_LF_I_TDC_FINE_RES_S 14
#define JAPLL_LF_I_TDC_FINE_RES_M BIT(14)
#define JAPLL_LF_I_DCOFINE_RESOLUTION_S 15
#define JAPLL_LF_I_DCOFINE_RESOLUTION_M BIT(15)
#define JAPLL_LF_I_GAINCTRL_2_0_S 16
#define JAPLL_LF_I_GAINCTRL_2_0_M ICE_M(0x7, 16)
#define JAPLL_LF_I_AFC_DIVRATIO_S 19
#define JAPLL_LF_I_AFC_DIVRATIO_M BIT(19)
#define JAPLL_LF_I_AFCCNTSEL_S 20
#define JAPLL_LF_I_AFCCNTSEL_M BIT(20)
#define JAPLL_LF_I_AFC_STARTUP_1_0_S 21
#define JAPLL_LF_I_AFC_STARTUP_1_0_M ICE_M(0x3, 21)
#define JAPLL_LF_RESERVED31_S 23
#define JAPLL_LF_RESERVED31_M BIT(23)
#define JAPLL_LF_I_TDCTARGETCNT_7_0_S 24
#define JAPLL_LF_I_TDCTARGETCNT_7_0_M ICE_M(0xff, 24)
union japll_lf {
struct {
u32 i_prop_coeff_3_0 : 4;
u32 i_fll_int_coeff_3_0 : 4;
u32 i_int_coeff_4_0 : 5;
u32 i_fll_en_h : 1;
u32 i_tdc_fine_res : 1;
u32 i_dcofine_resolution : 1;
u32 i_gainctrl_2_0 : 3;
u32 i_afc_divratio : 1;
u32 i_afccntsel : 1;
u32 i_afc_startup_1_0 : 2;
u32 reserved31 : 1;
u32 i_tdctargetcnt_7_0 : 8;
} field;
u32 val;
};
#define JAPLL_FRAC_LOCK 0x40c
#define JAPLL_FRAC_LOCK_I_FEEDFWRDGAIN_7_0_S 0
#define JAPLL_FRAC_LOCK_I_FEEDFWRDGAIN_7_0_M ICE_M(0xff, 0)
#define JAPLL_FRAC_LOCK_I_FEEDFWRDCAL_EN_H_S 8
#define JAPLL_FRAC_LOCK_I_FEEDFWRDCAL_EN_H_M BIT(8)
#define JAPLL_FRAC_LOCK_I_FEEDFWRDCAL_PAUSE_H_S 9
#define JAPLL_FRAC_LOCK_I_FEEDFWRDCAL_PAUSE_H_M BIT(9)
#define JAPLL_FRAC_LOCK_I_DCODITHEREN_H_S 10
#define JAPLL_FRAC_LOCK_I_DCODITHEREN_H_M BIT(10)
#define JAPLL_FRAC_LOCK_I_LOCKTHRESH_3_0_S 11
#define JAPLL_FRAC_LOCK_I_LOCKTHRESH_3_0_M ICE_M(0xf, 11)
#define JAPLL_FRAC_LOCK_I_DCODITHER_CONFIG_S 15
#define JAPLL_FRAC_LOCK_I_DCODITHER_CONFIG_M BIT(15)
#define JAPLL_FRAC_LOCK_I_EARLYLOCK_CRITERIA_1_0_S 16
#define JAPLL_FRAC_LOCK_I_EARLYLOCK_CRITERIA_1_0_M ICE_M(0x3, 16)
#define JAPLL_FRAC_LOCK_I_TRUELOCK_CRITERIA_1_0_S 18
#define JAPLL_FRAC_LOCK_I_TRUELOCK_CRITERIA_1_0_M ICE_M(0x3, 18)
#define JAPLL_FRAC_LOCK_I_LF_HALF_CYC_EN_S 20
#define JAPLL_FRAC_LOCK_I_LF_HALF_CYC_EN_M BIT(20)
#define JAPLL_FRAC_LOCK_I_DITHER_OVRD_S 21
#define JAPLL_FRAC_LOCK_I_DITHER_OVRD_M BIT(21)
#define JAPLL_FRAC_LOCK_I_PLLLC_RESTORE_REG_S 22
#define JAPLL_FRAC_LOCK_I_PLLLC_RESTORE_REG_M BIT(22)
#define JAPLL_FRAC_LOCK_I_PLLLC_RESTORE_MODE_CTRL_S 23
#define JAPLL_FRAC_LOCK_I_PLLLC_RESTORE_MODE_CTRL_M BIT(23)
#define JAPLL_FRAC_LOCK_I_PLLRAMPEN_H_S 24
#define JAPLL_FRAC_LOCK_I_PLLRAMPEN_H_M BIT(24)
#define JAPLL_FRAC_LOCK_I_FBDIV_STROBE_H_S 25
#define JAPLL_FRAC_LOCK_I_FBDIV_STROBE_H_M BIT(25)
#define JAPLL_FRAC_LOCK_I_OVC_SNAPSHOT_H_S 26
#define JAPLL_FRAC_LOCK_I_OVC_SNAPSHOT_H_M BIT(26)
#define JAPLL_FRAC_LOCK_I_DITHER_VALUE_4_0_S 27
#define JAPLL_FRAC_LOCK_I_DITHER_VALUE_4_0_M ICE_M(0x1f, 27)
union japll_frac_lock {
struct {
u32 i_feedfwrdgain_7_0 : 8;
u32 i_feedfwrdcal_en_h : 1;
u32 i_feedfwrdcal_pause_h : 1;
u32 i_dcoditheren_h : 1;
u32 i_lockthresh_3_0 : 4;
u32 i_dcodither_config : 1;
u32 i_earlylock_criteria_1_0 : 2;
u32 i_truelock_criteria_1_0 : 2;
u32 i_lf_half_cyc_en : 1;
u32 i_dither_ovrd : 1;
u32 i_plllc_restore_reg : 1;
u32 i_plllc_restore_mode_ctrl : 1;
u32 i_pllrampen_h : 1;
u32 i_fbdiv_strobe_h : 1;
u32 i_ovc_snapshot_h : 1;
u32 i_dither_value_4_0 : 5;
} field;
u32 val;
};
#define JAPLL_BIAS 0x414
#define JAPLL_BIAS_I_IREFTRIM_4_0_S 0
#define JAPLL_BIAS_I_IREFTRIM_4_0_M ICE_M(0x1f, 0)
#define JAPLL_BIAS_I_VREF_RDAC_2_0_S 5
#define JAPLL_BIAS_I_VREF_RDAC_2_0_M ICE_M(0x7, 5)
#define JAPLL_BIAS_I_CTRIM_4_0_S 8
#define JAPLL_BIAS_I_CTRIM_4_0_M ICE_M(0x1f, 8)
#define JAPLL_BIAS_I_IREF_REFCLK_MODE_1_0_S 13
#define JAPLL_BIAS_I_IREF_REFCLK_MODE_1_0_M ICE_M(0x3, 13)
#define JAPLL_BIAS_I_BIASCAL_EN_H_S 15
#define JAPLL_BIAS_I_BIASCAL_EN_H_M BIT(15)
#define JAPLL_BIAS_I_BIAS_BONUS_7_0_S 16
#define JAPLL_BIAS_I_BIAS_BONUS_7_0_M ICE_M(0xff, 16)
#define JAPLL_BIAS_I_INIT_DCOAMP_5_0_S 24
#define JAPLL_BIAS_I_INIT_DCOAMP_5_0_M ICE_M(0x3f, 24)
#define JAPLL_BIAS_I_BIAS_GB_SEL_1_0_S 30
#define JAPLL_BIAS_I_BIAS_GB_SEL_1_0_M ICE_M(0x3, 30)
union japll_bias {
struct {
u32 i_ireftrim_4_0 : 5;
u32 i_vref_rdac_2_0 : 3;
u32 i_ctrim_4_0 : 5;
u32 i_iref_refclk_mode_1_0 : 2;
u32 i_biascal_en_h : 1;
u32 i_bias_bonus_7_0 : 8;
u32 i_init_dcoamp_5_0 : 6;
u32 i_bias_gb_sel_1_0 : 2;
} field;
u32 val;
};
#define JAPLL_TDC_COLDST_BIAS 0x418
#define JAPLL_TDC_COLDST_BIAS_I_TDCSEL_1_0_S 0
#define JAPLL_TDC_COLDST_BIAS_I_TDCSEL_1_0_M ICE_M(0x3, 0)
#define JAPLL_TDC_COLDST_BIAS_I_TDCOVCCORR_EN_H_S 2
#define JAPLL_TDC_COLDST_BIAS_I_TDCOVCCORR_EN_H_M BIT(2)
#define JAPLL_TDC_COLDST_BIAS_I_TDCDC_EN_H_S 3
#define JAPLL_TDC_COLDST_BIAS_I_TDCDC_EN_H_M BIT(3)
#define JAPLL_TDC_COLDST_BIAS_I_TDC_OFFSET_LOCK_1_0_S 4
#define JAPLL_TDC_COLDST_BIAS_I_TDC_OFFSET_LOCK_1_0_M ICE_M(0x3, 4)
#define JAPLL_TDC_COLDST_BIAS_I_SWCAP_IREFGEN_CLKMODE_1_0_S 6
#define JAPLL_TDC_COLDST_BIAS_I_SWCAP_IREFGEN_CLKMODE_1_0_M ICE_M(0x3, 6)
#define JAPLL_TDC_COLDST_BIAS_I_BB_GAIN_2_0_S 8
#define JAPLL_TDC_COLDST_BIAS_I_BB_GAIN_2_0_M ICE_M(0x7, 8)
#define JAPLL_TDC_COLDST_BIAS_I_BBTHRESH_3_0_S 11
#define JAPLL_TDC_COLDST_BIAS_I_BBTHRESH_3_0_M ICE_M(0xf, 11)
#define JAPLL_TDC_COLDST_BIAS_I_BBINLOCK_H_S 15
#define JAPLL_TDC_COLDST_BIAS_I_BBINLOCK_H_M BIT(15)
#define JAPLL_TDC_COLDST_BIAS_I_COLDSTART_S 16
#define JAPLL_TDC_COLDST_BIAS_I_COLDSTART_M BIT(16)
#define JAPLL_TDC_COLDST_BIAS_I_IREFBIAS_STARTUP_PULSE_WIDTH_1_0_S 17
#define JAPLL_TDC_COLDST_BIAS_I_IREFBIAS_STARTUP_PULSE_WIDTH_1_0_M \
ICE_M(0x3, 17)
#define JAPLL_TDC_COLDST_BIAS_I_DCO_SETTLING_TIME_CNTR_3_0_S 19
#define JAPLL_TDC_COLDST_BIAS_I_DCO_SETTLING_TIME_CNTR_3_0_M ICE_M(0xf, 19)
#define JAPLL_TDC_COLDST_BIAS_I_IREFBIAS_STARTUP_PULSE_BYPASS_S 23
#define JAPLL_TDC_COLDST_BIAS_I_IREFBIAS_STARTUP_PULSE_BYPASS_M BIT(23)
#define JAPLL_TDC_COLDST_BIAS_I_BIAS_CALIB_STEPSIZE_1_0_S 24
#define JAPLL_TDC_COLDST_BIAS_I_BIAS_CALIB_STEPSIZE_1_0_M ICE_M(0x3, 24)
#define JAPLL_TDC_COLDST_BIAS_RESERVED81_S 26
#define JAPLL_TDC_COLDST_BIAS_RESERVED81_M BIT(26)
#define JAPLL_TDC_COLDST_BIAS_I_IREFINT_EN_S 27
#define JAPLL_TDC_COLDST_BIAS_I_IREFINT_EN_M BIT(27)
#define JAPLL_TDC_COLDST_BIAS_I_VGSBUFEN_S 28
#define JAPLL_TDC_COLDST_BIAS_I_VGSBUFEN_M BIT(28)
#define JAPLL_TDC_COLDST_BIAS_I_DIGDFTSWEP_S 29
#define JAPLL_TDC_COLDST_BIAS_I_DIGDFTSWEP_M BIT(29)
#define JAPLL_TDC_COLDST_BIAS_I_IREFDIGDFTEN_S 30
#define JAPLL_TDC_COLDST_BIAS_I_IREFDIGDFTEN_M BIT(30)
#define JAPLL_TDC_COLDST_BIAS_I_IREF_REFCLK_INV_EN_S 31
#define JAPLL_TDC_COLDST_BIAS_I_IREF_REFCLK_INV_EN_M BIT(31)
union japll_tdc_coldst_bias {
struct {
u32 i_tdcsel_1_0 : 2;
u32 i_tdcovccorr_en_h : 1;
u32 i_tdcdc_en_h : 1;
u32 i_tdc_offset_lock_1_0 : 2;
u32 i_swcap_irefgen_clkmode_1_0 : 2;
u32 i_bb_gain_2_0 : 3;
u32 i_bbthresh_3_0 : 4;
u32 i_bbinlock_h : 1;
u32 i_coldstart : 1;
u32 i_irefbias_startup_pulse_width_1_0 : 2;
u32 i_dco_settling_time_cntr_3_0 : 4;
u32 i_irefbias_startup_pulse_bypass : 1;
u32 i_bias_calib_stepsize_1_0 : 2;
u32 reserved81 : 1;
u32 i_irefint_en : 1;
u32 i_vgsbufen : 1;
u32 i_digdftswep : 1;
u32 i_irefdigdften : 1;
u32 i_iref_refclk_inv_en : 1;
} field;
u32 val;
};
#define JAPLL_DFX_DCO 0x424
#define JAPLL_DFX_DCO_I_DCOFINEDFTSEL_1_0_S 0
#define JAPLL_DFX_DCO_I_DCOFINEDFTSEL_1_0_M ICE_M(0x3, 0)
#define JAPLL_DFX_DCO_I_DCOCOARSE_OVRD_H_S 2
#define JAPLL_DFX_DCO_I_DCOCOARSE_OVRD_H_M BIT(2)
#define JAPLL_DFX_DCO_I_BIAS_FILTER_EN_S 3
#define JAPLL_DFX_DCO_I_BIAS_FILTER_EN_M BIT(3)
#define JAPLL_DFX_DCO_I_PLLPWRMODE_1_0_S 4
#define JAPLL_DFX_DCO_I_PLLPWRMODE_1_0_M ICE_M(0x3, 4)
#define JAPLL_DFX_DCO_I_DCOAMP_STATICLEG_CFG_1_0_S 6
#define JAPLL_DFX_DCO_I_DCOAMP_STATICLEG_CFG_1_0_M ICE_M(0x3, 6)
#define JAPLL_DFX_DCO_I_DCOFINE_7_0_S 8
#define JAPLL_DFX_DCO_I_DCOFINE_7_0_M ICE_M(0xff, 8)
#define JAPLL_DFX_DCO_I_DCOFINE_9_8_S 16
#define JAPLL_DFX_DCO_I_DCOFINE_9_8_M ICE_M(0x3, 16)
#define JAPLL_DFX_DCO_I_DCOAMPOVRDEN_H_S 18
#define JAPLL_DFX_DCO_I_DCOAMPOVRDEN_H_M BIT(18)
#define JAPLL_DFX_DCO_I_DCOAMP_3_0_S 19
#define JAPLL_DFX_DCO_I_DCOAMP_3_0_M ICE_M(0xf, 19)
#define JAPLL_DFX_DCO_I_BIASFILTER_EN_DELAY_S 23
#define JAPLL_DFX_DCO_I_BIASFILTER_EN_DELAY_M BIT(23)
#define JAPLL_DFX_DCO_I_DCOCOARSE_7_0_S 24
#define JAPLL_DFX_DCO_I_DCOCOARSE_7_0_M ICE_M(0xff, 24)
union japll_dfx_dco {
struct {
u32 i_dcofinedftsel_1_0 : 2;
u32 i_dcocoarse_ovrd_h : 1;
u32 i_bias_filter_en : 1;
u32 i_pllpwrmode_1_0 : 2;
u32 i_dcoamp_staticleg_cfg_1_0 : 2;
u32 i_dcofine_7_0 : 8;
u32 i_dcofine_9_8 : 2;
u32 i_dcoampovrden_h : 1;
u32 i_dcoamp_3_0 : 4;
u32 i_biasfilter_en_delay : 1;
u32 i_dcocoarse_7_0 : 8;
} field;
u32 val;
};
#define JAPLL_RO_BWM_LF 0x470
#define JAPLL_RO_BWM_LF_BW_FREQOV_HIGH_CRI_7_0_S 0
#define JAPLL_RO_BWM_LF_BW_FREQOV_HIGH_CRI_7_0_M ICE_M(0xff, 0)
#define JAPLL_RO_BWM_LF_BW_FREQOV_HIGH_CRI_9_8_S 8
#define JAPLL_RO_BWM_LF_BW_FREQOV_HIGH_CRI_9_8_M ICE_M(0x3, 8)
#define JAPLL_RO_BWM_LF_BIASCALDONE_CRI_S 10
#define JAPLL_RO_BWM_LF_BIASCALDONE_CRI_M BIT(10)
#define JAPLL_RO_BWM_LF_PLLLOCK_GAIN_TRAN_CRI_S 11
#define JAPLL_RO_BWM_LF_PLLLOCK_GAIN_TRAN_CRI_M BIT(11)
#define JAPLL_RO_BWM_LF_PLLLOCK_TRUE_LOCK_CRI_S 12
#define JAPLL_RO_BWM_LF_PLLLOCK_TRUE_LOCK_CRI_M BIT(12)
#define JAPLL_RO_BWM_LF_PLLUNLOCK_FLAG_CRI_S 13
#define JAPLL_RO_BWM_LF_PLLUNLOCK_FLAG_CRI_M BIT(13)
#define JAPLL_RO_BWM_LF_AFCERR_CRI_S 14
#define JAPLL_RO_BWM_LF_AFCERR_CRI_M BIT(14)
#define JAPLL_RO_BWM_LF_AFCDONE_CRI_S 15
#define JAPLL_RO_BWM_LF_AFCDONE_CRI_M BIT(15)
#define JAPLL_RO_BWM_LF_FEEDFWRDGAIN_CAL_CRI_7_0_S 16
#define JAPLL_RO_BWM_LF_FEEDFWRDGAIN_CAL_CRI_7_0_M ICE_M(0xff, 16)
#define JAPLL_RO_BWM_LF_M2FBDIVMOD_CRI_7_0_S 24
#define JAPLL_RO_BWM_LF_M2FBDIVMOD_CRI_7_0_M ICE_M(0xff, 24)
union japll_ro_bwm_lf {
struct {
u32 bw_freqov_high_cri_7_0 : 8;
u32 bw_freqov_high_cri_9_8 : 2;
u32 biascaldone_cri : 1;
u32 plllock_gain_tran_cri : 1;
u32 plllock_true_lock_cri : 1;
u32 pllunlock_flag_cri : 1;
u32 afcerr_cri : 1;
u32 afcdone_cri : 1;
u32 feedfwrdgain_cal_cri_7_0 : 8;
u32 m2fbdivmod_cri_7_0 : 8;
} field;
u32 val;
};
#endif /* _ICE_CGU_REGS_H_ */ #endif /* _ICE_CGU_REGS_H_ */

View File

@ -1,444 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018-2021, Intel Corporation. */
#include "ice.h"
/**
* ice_cgu_reg_read - Read a CGU register
* @pf: Board private structure
* @reg: Register to read from
* @val: Pointer to the value to read (out param)
*/
int ice_cgu_reg_read(struct ice_pf *pf, u32 reg, u32 *val)
{
struct ice_sbq_msg_input cgu_msg;
int status;
cgu_msg.opcode = ice_sbq_msg_rd;
cgu_msg.dest_dev = cgu;
cgu_msg.msg_addr_low = reg;
cgu_msg.msg_addr_high = 0x0;
status = ice_sbq_rw_reg_lp(&pf->hw, &cgu_msg, true);
if (status) {
dev_dbg(ice_pf_to_dev(pf), "addr 0x%04x, val 0x%08x\n", reg, cgu_msg.data);
return -EIO;
}
*val = cgu_msg.data;
return 0;
}
/**
* ice_cgu_reg_write - Write a CGU register with lock parameter
* @pf: Board private structure
* @reg: Register to write to
* @val: Value to write
*/
int ice_cgu_reg_write(struct ice_pf *pf, u32 reg, u32 val)
{
struct ice_sbq_msg_input cgu_msg;
int status;
cgu_msg.opcode = ice_sbq_msg_wr;
cgu_msg.dest_dev = cgu;
cgu_msg.msg_addr_low = reg;
cgu_msg.msg_addr_high = 0x0;
cgu_msg.data = val;
dev_dbg(ice_pf_to_dev(pf), "addr 0x%04x, val 0x%08x\n", reg, val);
status = ice_sbq_rw_reg_lp(&pf->hw, &cgu_msg, true);
if (status)
return -EIO;
return 0;
}
/**
* ice_cgu_set_gnd - Ground the refclk
* @pf: Board private structure
* @enable: True to ground the refclk
*/
int ice_cgu_set_gnd(struct ice_pf *pf, bool enable)
{
int status = 0;
union nac_cgu_dword10 dw10;
int i;
status = ice_cgu_reg_read(pf, NAC_CGU_DWORD10, &dw10.val);
if (status)
goto err;
if (enable)
dw10.field.synce_sel_gnd = 1;
else
dw10.field.synce_sel_gnd = 0;
status = ice_cgu_reg_write(pf, NAC_CGU_DWORD10, dw10.val);
if (status)
goto err;
for (i = 0; i < 3; i++)
status = ice_cgu_reg_read(pf, NAC_CGU_DWORD10, &dw10.val);
if (status)
goto err;
err:
return status;
}
/**
* ice_cgu_set_byp - Set the DPLL bypass
* @pf: Board private structure
* @enable: True to enable bypass
*/
int ice_cgu_set_byp(struct ice_pf *pf, bool enable)
{
union nac_cgu_dword12 dw12;
int status = 0;
status = ice_cgu_reg_read(pf, NAC_CGU_DWORD12, &dw12.val);
if (status)
goto err;
if (enable)
dw12.field.synce_dpll_byp = 1;
else
dw12.field.synce_dpll_byp = 0;
status = ice_cgu_reg_write(pf, NAC_CGU_DWORD12, dw12.val);
if (status)
goto err;
err:
return status;
}
/**
* ice_cgu_set_holdover_lock_irq - Set holdover/lock interrupt
* @pf: Board private structure
* @enable: True to enable the lock
*/
int ice_cgu_set_holdover_lock_irq(struct ice_pf *pf, bool enable)
{
union nac_cgu_dword13 dw13;
int status;
status = ice_cgu_reg_read(pf, NAC_CGU_DWORD13, &dw13.val);
if (status)
goto err;
/* the *_int_enb bits are defined opposite of what one would expect.
* 0 = enabled, 1 = disabled
*/
if (enable) {
dw13.field.synce_hdov_int_enb = 0;
dw13.field.synce_lock_int_enb = 0;
} else {
dw13.field.synce_hdov_int_enb = 1;
dw13.field.synce_lock_int_enb = 1;
}
status = ice_cgu_reg_write(pf, NAC_CGU_DWORD13, dw13.val);
if (status)
goto err;
err:
return status;
}
/**
* ice_cgu_mux_sel_set_reg - Write to selected mux register
* @pf: Board private structure
* @mux_sel: Target mux
* @val: Value to write to
*/
int ice_cgu_mux_sel_set_reg(struct ice_pf *pf, enum ice_cgu_mux_sel mux_sel, u32 val)
{
union nac_cgu_dword10 dw10;
union nac_cgu_dword11 dw11;
int status;
switch (mux_sel) {
case ICE_CGU_MUX_SEL_REF_CLK:
status = ice_cgu_reg_read(pf, NAC_CGU_DWORD10, &dw10.val);
if (status)
goto err;
dw10.field.synce_s_ref_clk = val;
status = ice_cgu_reg_write(pf, NAC_CGU_DWORD10, dw10.val);
if (status)
goto err;
break;
case ICE_CGU_MUX_SEL_BYPASS_CLK:
status = ice_cgu_reg_read(pf, NAC_CGU_DWORD11, &dw11.val);
if (status)
goto err;
dw11.field.synce_s_byp_clk = val;
status = ice_cgu_reg_write(pf, NAC_CGU_DWORD11, dw11.val);
if (status)
goto err;
break;
case ICE_CGU_MUX_SEL_ETHCLKO:
status = ice_cgu_reg_read(pf, NAC_CGU_DWORD10, &dw10.val);
if (status)
goto err;
dw10.field.synce_ethclko_sel = val;
status = ice_cgu_reg_write(pf, NAC_CGU_DWORD10, dw10.val);
if (status)
goto err;
break;
case ICE_CGU_MUX_SEL_CLKO:
status = ice_cgu_reg_read(pf, NAC_CGU_DWORD10, &dw10.val);
if (status)
goto err;
dw10.field.synce_clko_sel = val;
status = ice_cgu_reg_write(pf, NAC_CGU_DWORD10, dw10.val);
if (status)
goto err;
break;
default:
dev_err(ice_pf_to_dev(pf), "internal error -- invalid mux!\n");
return -EIO;
}
err:
return status;
}
/**
* ice_cgu_dck_rst_assert_release - Assert the dck reset
* @pf: Board private structure
* @assert: True to assert, false to release
*/
int ice_cgu_dck_rst_assert_release(struct ice_pf *pf, bool assert)
{
union nac_cgu_dword10 dw10;
int status = 0;
int i;
status = ice_cgu_reg_read(pf, NAC_CGU_DWORD10, &dw10.val);
if (status)
goto err;
if (assert)
dw10.field.synce_dck_rst = 1;
else
dw10.field.synce_dck_rst = 0;
status = ice_cgu_reg_write(pf, NAC_CGU_DWORD10, dw10.val);
if (status)
goto err;
for (i = 0; i < 3; i++)
status = ice_cgu_reg_read(pf, NAC_CGU_DWORD10, &dw10.val);
if (status)
goto err;
err:
return status;
}
/**
* ice_cgu_dck2_rst_assert_release - Assert the dck2 reset
* @pf: Board private structure
* @assert: True to assert, false to release
*/
int ice_cgu_dck2_rst_assert_release(struct ice_pf *pf, bool assert)
{
union nac_cgu_dword10 dw10;
int status = 0;
int i;
status = ice_cgu_reg_read(pf, NAC_CGU_DWORD10, &dw10.val);
if (status)
goto err;
if (assert)
dw10.field.synce_dck2_rst = 1;
else
dw10.field.synce_dck2_rst = 0;
status = ice_cgu_reg_write(pf, NAC_CGU_DWORD10, dw10.val);
if (status)
goto err;
for (i = 0; i < 3; i++)
status = ice_cgu_reg_read(pf, NAC_CGU_DWORD10, &dw10.val);
if (status)
goto err;
err:
return status;
}
/**
* ice_cgu_mck_rst_assert_release - Assert the mck reset
* @pf: Board private structure
* @assert: True to assert, false to release
*/
int ice_cgu_mck_rst_assert_release(struct ice_pf *pf, bool assert)
{
union nac_cgu_dword12 dw12;
int status = 0;
int i;
status = ice_cgu_reg_read(pf, NAC_CGU_DWORD12, &dw12.val);
if (status)
goto err;
if (assert)
dw12.field.synce_mck_rst = 1;
else
dw12.field.synce_mck_rst = 0;
status = ice_cgu_reg_write(pf, NAC_CGU_DWORD12, dw12.val);
if (status)
goto err;
for (i = 0; i < 3; i++)
status = ice_cgu_reg_read(pf, NAC_CGU_DWORD12, &dw12.val);
if (status)
goto err;
err:
return status;
}
/**
* ice_cgu_usleep - Sleep for a specified period of time
* @usec: Time to sleep in microseconds
*/
void ice_cgu_usleep(u64 usec)
{
if (usec <= 10) {
udelay(usec);
} else if (usec <= 20000) {
usleep_range(usec, usec + 10);
} else {
int msec;
msec = (usec + 999) / 1000;
msleep_interruptible(msec);
}
}
/**
* ice_cgu_poll - Poll the specified CGU register for the specified value
* @pf: Board private structure
* @offset: Offset of the register
* @mask: Bitmask for testing the value
* @value: Value to poll for
* @delay_time: Delay between the register reads
* @delay_loops: Number of read loops
*/
int ice_cgu_poll(struct ice_pf *pf, u64 offset, u32 mask, u32 value, u32 delay_time,
u32 delay_loops)
{
int status;
u32 reg, i;
for (i = 0; i < delay_loops; i++) {
status = ice_cgu_reg_read(pf, offset, &reg);
if (status)
goto err;
if ((reg & mask) == value)
return 0;
/* delay for a bit */
ice_cgu_usleep(delay_time);
}
return -EBUSY;
err:
return status;
}
/**
* ice_cgu_npoll - Poll the specified CGU register for the specified value occurring n times
* @pf: Board private structure
* @offset: Offset of the register
* @mask: Bitmask for testing the value
* @value: Value to poll for
* @delay_time: Delay between the register reads
* @delay_loops: Number of read loops
* @poll_count: Number of the value matches to poll for
* @count_delay_time: Additional delay after the value match
*/
int ice_cgu_npoll(struct ice_pf *pf, u32 offset, u32 mask, u32 value, u32 delay_time,
u32 delay_loops, u32 poll_count, u32 count_delay_time)
{
u32 reg, i, my_count = 0, complete = 0;
int status;
for (i = 0; i < delay_loops; i++) {
status = ice_cgu_reg_read(pf, offset, &reg);
if (status)
goto err;
dev_dbg(ice_pf_to_dev(pf), "count=%u, reg=%08x\n", my_count, reg);
if ((reg & mask) == value) {
my_count++;
if (my_count < poll_count) {
ice_cgu_usleep(count_delay_time);
} else {
complete = 1;
break;
}
} else {
my_count = 0;
ice_cgu_usleep(delay_time);
}
}
if (complete)
return 0;
else
return -EBUSY;
err:
return status;
}
struct ice_cgu_dpll_params dpll_params_table[ICE_NUM_DPLL_PARAMS] = {
/* {dpll select, sample rate, mul_rat_m1, scale, gain} */
{ ICE_CGU_DPLL_SELECT_TRANSPORT, ICE_CGU_SAMPLE_RATE_8K, 3124, 16, 42 },
{ ICE_CGU_DPLL_SELECT_EEC_RELAXED_BW, ICE_CGU_SAMPLE_RATE_8K, 3124, 7, 3 },
{ ICE_CGU_DPLL_SELECT_TRANSPORT, ICE_CGU_SAMPLE_RATE_10K, 2499, 20, 66 },
{ ICE_CGU_DPLL_SELECT_EEC_RELAXED_BW, ICE_CGU_SAMPLE_RATE_10K, 2499, 8, 4 },
{ ICE_CGU_DPLL_SELECT_TRANSPORT, ICE_CGU_SAMPLE_RATE_12K5, 1999, 25, 103 },
{ ICE_CGU_DPLL_SELECT_EEC_RELAXED_BW, ICE_CGU_SAMPLE_RATE_12K5, 1999, 10, 6 }
};
struct ice_cgu_dpll_per_rate_params dpll_per_rate_params[NUM_ICE_TIME_REF_FREQ] = {
/* {rate_hz, sample_rate, div_rat_m1, synce_rat_sel} */
{ 25000000, ICE_CGU_SAMPLE_RATE_10K, 2499, 0 }, /* 25 MHz */
{ 122880000, ICE_CGU_SAMPLE_RATE_8K, 3071, 1 }, /* 122.88 MHz */
{ 125000000, ICE_CGU_SAMPLE_RATE_10K, 2499, 1 }, /* 125 MHz */
{ 153600000, ICE_CGU_SAMPLE_RATE_10K, 3071, 1 }, /* 153.6 MHz */
{ 156250000, ICE_CGU_SAMPLE_RATE_10K, 3124, 1 }, /* 156.25 MHz */
};
struct ice_cgu_lcpll_per_rate_params tspll_per_rate_params[NUM_ICE_TIME_REF_FREQ] = {
/* {refclk_pre_div, feedback_div, frac_n_div, post_pll_div} */
{ 1, 197, 2621440, 6 }, /* 25 MHz */
{ 5, 223, 524288, 7 }, /* 122.88 MHz */
{ 5, 223, 524288, 7 }, /* 125 MHz */
{ 5, 159, 1572864, 6 }, /* 153.6 MHz */
{ 5, 159, 1572864, 6 }, /* 156.25 MHz */
{ 10, 223, 524288, 7 }, /* 245.76 MHz */
};
struct ice_cgu_lcpll_per_rate_params japll_per_rate_params[NUM_ICE_CGU_JAPLL_REF_FREQ] = {
/* {refclk_pre_div, feedback_div, frac_n_div, post_pll_div} */
{ 1, 150, 0, 6 }, /* 25 MHz */
{ 1, 120, 0, 6 }, /* 156.25 MHz */
};

View File

@ -1,46 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018-2021, Intel Corporation. */
#ifndef _ICE_CGU_UTIL_H_
#define _ICE_CGU_UTIL_H_
/* offset of last valid CGU register */
#define ICE_CGU_MAX_REG_OFFS 0x47c
int ice_cgu_reg_read(struct ice_pf *pf, u32 reg, u32 *val);
int ice_cgu_reg_write(struct ice_pf *pf, u32 reg, u32 val);
int ice_cgu_set_gnd(struct ice_pf *pf, bool enable);
int ice_cgu_set_byp(struct ice_pf *pf, bool enable);
int ice_cgu_set_holdover_lock_irq(struct ice_pf *pf, bool enable);
int ice_cgu_mux_sel_set_reg(struct ice_pf *pf, enum ice_cgu_mux_sel mux_sel, u32 val);
int ice_cgu_dck_rst_assert_release(struct ice_pf *pf, bool assert);
int ice_cgu_dck2_rst_assert_release(struct ice_pf *pf, bool assert);
int ice_cgu_mck_rst_assert_release(struct ice_pf *pf, bool assert);
void ice_cgu_usleep(u64 usec);
int ice_cgu_poll(struct ice_pf *pf, u64 offset, u32 mask, u32 value, u32 delay_time,
u32 delay_loops);
int ice_cgu_npoll(struct ice_pf *pf, u32 offset, u32 mask, u32 value, u32 delay_time,
u32 delay_loops, u32 poll_count, u32 count_delay_time);
#define ICE_NUM_DPLL_PARAMS (NUM_ICE_CGU_SAMPLE_RATE * NUM_ICE_CGU_DPLL_SELECT)
extern struct ice_cgu_dpll_params dpll_params_table[ICE_NUM_DPLL_PARAMS];
extern struct ice_cgu_dpll_per_rate_params dpll_per_rate_params[NUM_ICE_TIME_REF_FREQ];
extern struct ice_cgu_lcpll_per_rate_params tspll_per_rate_params[NUM_ICE_TIME_REF_FREQ];
extern struct ice_cgu_lcpll_per_rate_params japll_per_rate_params[NUM_ICE_CGU_JAPLL_REF_FREQ];
#endif /* _ICE_CGU_UTIL_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -4,10 +4,10 @@
#ifndef _ICE_COMMON_H_ #ifndef _ICE_COMMON_H_
#define _ICE_COMMON_H_ #define _ICE_COMMON_H_
#include "ice.h"
#include "ice_type.h" #include "ice_type.h"
#include "ice_nvm.h" #include "ice_nvm.h"
#include "ice_flex_pipe.h" #include "ice_flex_pipe.h"
#include "ice_parser.h"
#include "virtchnl.h" #include "virtchnl.h"
#include "ice_switch.h" #include "ice_switch.h"
#include "ice_fdir.h" #include "ice_fdir.h"
@ -22,81 +22,84 @@ enum ice_fw_modes {
ICE_FW_MODE_ROLLBACK ICE_FW_MODE_ROLLBACK
}; };
int ice_init_fltr_mgmt_struct(struct ice_hw *hw);
void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw);
void ice_set_umac_shared(struct ice_hw *hw); void ice_set_umac_shared(struct ice_hw *hw);
enum ice_status ice_init_hw(struct ice_hw *hw); int ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw); void ice_deinit_hw(struct ice_hw *hw);
enum ice_status ice_check_reset(struct ice_hw *hw); int ice_check_reset(struct ice_hw *hw);
enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req); int ice_reset(struct ice_hw *hw, enum ice_reset_req req);
enum ice_status ice_create_all_ctrlq(struct ice_hw *hw); int ice_create_all_ctrlq(struct ice_hw *hw);
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw); int ice_init_all_ctrlq(struct ice_hw *hw);
void ice_shutdown_all_ctrlq(struct ice_hw *hw); void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading);
void ice_destroy_all_ctrlq(struct ice_hw *hw); void ice_destroy_all_ctrlq(struct ice_hw *hw);
enum ice_status int
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending); struct ice_rq_event_info *e, u16 *pending);
enum ice_status int
ice_get_link_status(struct ice_port_info *pi, bool *link_up); ice_get_link_status(struct ice_port_info *pi, bool *link_up);
enum ice_status ice_update_link_info(struct ice_port_info *pi); int ice_update_link_info(struct ice_port_info *pi);
enum ice_status int
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout); enum ice_aq_res_access_type access, u32 timeout);
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res); void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
enum ice_status int
ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res); ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res);
enum ice_status int
ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res); ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res);
enum ice_status int
ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
enum ice_adminq_opc opc, struct ice_sq_cd *cd); enum ice_adminq_opc opc, struct ice_sq_cd *cd);
enum ice_status int
ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq, ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status int
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
void ice_clear_pxe_mode(struct ice_hw *hw); void ice_clear_pxe_mode(struct ice_hw *hw);
enum ice_status ice_get_caps(struct ice_hw *hw); int ice_get_caps(struct ice_hw *hw);
void ice_set_safe_mode_caps(struct ice_hw *hw); void ice_set_safe_mode_caps(struct ice_hw *hw);
int
ice_aq_get_internal_data(struct ice_hw *hw, u8 cluster_id, u16 table_id,
u32 start, void *buf, u16 buf_size, u16 *ret_buf_size,
u16 *ret_next_table, u32 *ret_next_index,
struct ice_sq_cd *cd);
int
enum ice_status
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index); u32 rxq_index);
enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index); int ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index);
enum ice_status int
ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index); ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index);
enum ice_status int
ice_write_tx_cmpltnq_ctx(struct ice_hw *hw, ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx, struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
u32 tx_cmpltnq_index); u32 tx_cmpltnq_index);
enum ice_status int
ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index); ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index);
enum ice_status int
ice_write_tx_drbell_q_ctx(struct ice_hw *hw, ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx, struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
u32 tx_drbell_q_index); u32 tx_drbell_q_index);
enum ice_status int
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params); ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params);
enum ice_status int
ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params); ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params);
enum ice_status int
ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys); struct ice_aqc_get_set_rss_keys *keys);
enum ice_status int
ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys); struct ice_aqc_get_set_rss_keys *keys);
enum ice_status int
ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move, ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
bool is_tc_change, bool subseq_call, bool flush_pipe, bool is_tc_change, bool subseq_call, bool flush_pipe,
u8 timeout, u32 *blocked_cgds, u8 timeout, u32 *blocked_cgds,
@ -104,59 +107,70 @@ ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
u8 *txqs_moved, struct ice_sq_cd *cd); u8 *txqs_moved, struct ice_sq_cd *cd);
bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq); bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
extern const struct ice_ctx_ele ice_tlan_ctx_info[]; extern const struct ice_ctx_ele ice_tlan_ctx_info[];
enum ice_status int
ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info); const struct ice_ctx_ele *ce_info);
extern struct mutex ice_global_cfg_lock_sw; extern struct mutex ice_global_cfg_lock_sw;
enum ice_status int
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd); void *buf, u16 buf_size, struct ice_sq_cd *cd);
enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd); int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
enum ice_status int
ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status int
ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi, ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
bool save_bad_pac, bool pad_short_pac, bool double_vlan, bool save_bad_pac, bool pad_short_pac, bool double_vlan,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps, struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status int
ice_aq_get_netlist_node_pin(struct ice_hw *hw,
struct ice_aqc_get_link_topo_pin *cmd,
u16 *node_handle);
int
ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
u8 *node_part_number, u16 *node_handle);
int
ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number,
u16 *node_handle);
int
ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
enum ice_adminq_opc opc, struct ice_sq_cd *cd); enum ice_adminq_opc opc, struct ice_sq_cd *cd);
enum ice_status int
ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps); ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps);
void void
ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
u16 link_speeds_bitmap); u16 link_speeds_bitmap);
enum ice_status int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status ice_clear_pf_cfg(struct ice_hw *hw); int ice_clear_pf_cfg(struct ice_hw *hw);
enum ice_status int
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd); struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd);
bool ice_fw_supports_link_override(struct ice_hw *hw); bool ice_fw_supports_link_override(struct ice_hw *hw);
enum ice_status bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw);
int
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi); struct ice_port_info *pi);
bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps); bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps);
enum ice_fc_mode ice_caps_to_fc_mode(u8 caps); enum ice_fc_mode ice_caps_to_fc_mode(u8 caps);
enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options); enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options);
enum ice_status int
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, ice_set_fc(struct ice_port_info *pi, u8 *aq_failures,
bool ena_auto_link_update); bool ena_auto_link_update);
enum ice_status int
ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fc_mode req_mode); enum ice_fc_mode req_mode);
bool bool
@ -166,85 +180,130 @@ void
ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
struct ice_aqc_get_phy_caps_data *caps, struct ice_aqc_get_phy_caps_data *caps,
struct ice_aqc_set_phy_cfg_data *cfg); struct ice_aqc_set_phy_cfg_data *cfg);
enum ice_status int
ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fec_mode fec); enum ice_fec_mode fec);
enum ice_status int
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status int
ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd); ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, bool auto_drop,
enum ice_status struct ice_sq_cd *cd);
int
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd); struct ice_link_status *link, struct ice_sq_cd *cd);
enum ice_status int
ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status int
ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd); ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd);
int
enum ice_status
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
bool write, struct ice_sq_cd *cd); bool write, struct ice_sq_cd *cd);
enum ice_status int
ice_aq_prog_topo_dev_nvm(struct ice_hw *hw, ice_aq_prog_topo_dev_nvm(struct ice_hw *hw,
struct ice_aqc_link_topo_params *topo_params, struct ice_aqc_link_topo_params *topo_params,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status int
ice_aq_read_topo_dev_nvm(struct ice_hw *hw, ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
struct ice_aqc_link_topo_params *topo_params, struct ice_aqc_link_topo_params *topo_params,
u32 start_address, u8 *buf, u8 buf_size, u32 start_address, u8 *buf, u8 buf_size,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
void ice_dump_port_info(struct ice_port_info *pi); void ice_dump_port_info(struct ice_port_info *pi);
void ice_dump_caps(struct ice_hw *hw); void ice_dump_caps(struct ice_hw *hw);
void ice_dump_ptp_dev_caps(struct ice_hw *hw); void ice_dump_ptp_dev_caps(struct ice_hw *hw);
void ice_dump_ptp_func_caps(struct ice_hw *hw); void ice_dump_ptp_func_caps(struct ice_hw *hw);
enum ice_status ice_dump_port_dflt_topo(struct ice_port_info *pi); int ice_dump_port_dflt_topo(struct ice_port_info *pi);
void ice_dump_port_topo(struct ice_port_info *pi); void ice_dump_port_topo(struct ice_port_info *pi);
enum ice_status int
ice_aq_get_port_options(struct ice_hw *hw, ice_aq_get_port_options(struct ice_hw *hw,
struct ice_aqc_get_port_options_elem *options, struct ice_aqc_get_port_options_elem *options,
u8 *option_count, u8 lport, bool lport_valid, u8 *option_count, u8 lport, bool lport_valid,
u8 *active_option_idx, bool *active_option_valid); u8 *active_option_idx, bool *active_option_valid);
enum ice_status int
ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
u16 *max_rdmaqs); u16 *max_rdmaqs);
enum ice_status int
ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 *rdma_qset, u16 num_qsets, u32 *qset_teid); u16 *rdma_qset, u16 num_qsets, u32 *qset_teid);
enum ice_status int
ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
u16 *q_id); u16 *q_id);
enum ice_status int
ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
u16 *q_handle, u16 *q_ids, u32 *q_teids, u16 *q_handle, u16 *q_ids, u32 *q_teids,
enum ice_disq_rst_src rst_src, u16 vmvf_num, enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status int
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
u16 *max_lanqs); u16 *max_lanqs);
enum ice_status int
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); int
ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw);
int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw); void ice_replay_post(struct ice_hw *hw);
struct ice_q_ctx * struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle); ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
enum ice_status ice_sbq_rw_reg_lp(struct ice_hw *hw, int ice_sbq_rw_reg_lp(struct ice_hw *hw,
struct ice_sbq_msg_input *in, bool lock); struct ice_sbq_msg_input *in, bool lock);
void ice_sbq_lock(struct ice_hw *hw); void ice_sbq_lock(struct ice_hw *hw);
void ice_sbq_unlock(struct ice_hw *hw); void ice_sbq_unlock(struct ice_hw *hw);
enum ice_status ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in); int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in);
int
ice_aq_cfg_cgu_err(struct ice_hw *hw, bool ena_event_report, bool ena_err_report,
struct ice_sq_cd *cd);
int
ice_aq_get_cgu_abilities(struct ice_hw *hw,
struct ice_aqc_get_cgu_abilities *abilities);
int
ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2,
u32 freq, s32 phase_delay);
int
ice_aq_get_input_pin_cfg(struct ice_hw *hw,
struct ice_aqc_get_cgu_input_config *cfg,
u8 input_idx);
int
ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags,
u8 src_sel, u32 freq, s32 phase_delay);
int
ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags,
u8 *src_sel, u32 *freq, u32 *src_freq);
int
ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state,
u16 *dpll_state, s64 *phase_offset, u8 *eec_mode);
int
ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state,
u8 config, u8 eec_mode);
int
ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
u8 ref_priority);
int
ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
u8 *ref_prio);
int
ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver,
u32 *cgu_fw_ver);
int
ice_aq_read_cgu_reg(struct ice_hw *hw, u16 offset, u8 data_len, u8 *data);
int
ice_aq_write_cgu_reg(struct ice_hw *hw, u16 offset, u8 data_len, u8 *data);
int
ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable,
u32 *freq);
int
ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, u8 *port_num,
u8 *flags, u32 *freq);
void void
ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat); u64 *prev_stat, u64 *cur_stat);
@ -255,60 +314,44 @@ enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw);
void ice_print_rollback_msg(struct ice_hw *hw); void ice_print_rollback_msg(struct ice_hw *hw);
bool ice_is_generic_mac(struct ice_hw *hw); bool ice_is_generic_mac(struct ice_hw *hw);
bool ice_is_e810(struct ice_hw *hw); bool ice_is_e810(struct ice_hw *hw);
enum ice_status bool ice_is_e810t(struct ice_hw *hw);
bool ice_is_e823(struct ice_hw *hw);
int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf); struct ice_aqc_txsched_elem_data *buf);
enum ice_status int
ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
u32 value, struct ice_sq_cd *cd); u32 value, struct ice_sq_cd *cd);
enum ice_status int
ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
u32 *value, struct ice_sq_cd *cd); u32 *value, struct ice_sq_cd *cd);
enum ice_status int
ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status int
ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
bool *value, struct ice_sq_cd *cd); bool *value, struct ice_sq_cd *cd);
enum ice_status bool ice_is_100m_speed_supported(struct ice_hw *hw);
int
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw); bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
enum ice_status int
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add); ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
enum ice_status int ice_lldp_execute_pending_mib(struct ice_hw *hw);
int
ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, u8 *data, u16 bus_addr, __le16 addr, u8 params, u8 *data,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status int
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, u8 *data, u16 bus_addr, __le16 addr, u8 params, u8 *data,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status int
ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source, ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
bool ice_is_fw_health_report_supported(struct ice_hw *hw); bool ice_is_fw_health_report_supported(struct ice_hw *hw);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw); bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
/* AQ API version for FW auto drop reports */
/* E810T PCA9575 IO controller registers */ bool ice_is_fw_auto_drop_supported(struct ice_hw *hw);
#define ICE_PCA9575_P0_IN 0x0
#define ICE_PCA9575_P1_IN 0x1
#define ICE_PCA9575_P0_CFG 0x8
#define ICE_PCA9575_P1_CFG 0x9
#define ICE_PCA9575_P0_OUT 0xA
#define ICE_PCA9575_P1_OUT 0xB
/* E810T PCA9575 IO controller pin control */
#define ICE_E810T_P0_GNSS_PRSNT_N BIT(4)
#define ICE_E810T_P1_SMA1_DIR_EN BIT(4)
#define ICE_E810T_P1_SMA1_TX_EN BIT(5)
#define ICE_E810T_P1_SMA2_UFL2_RX_DIS BIT(3)
#define ICE_E810T_P1_SMA2_DIR_EN BIT(6)
#define ICE_E810T_P1_SMA2_TX_EN BIT(7)
enum ice_status
ice_read_e810t_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data);
enum ice_status
ice_write_e810t_pca9575_reg(struct ice_hw *hw, u8 offset, u8 data);
bool ice_e810t_is_pca9575_present(struct ice_hw *hw);
#endif /* _ICE_COMMON_H_ */ #endif /* _ICE_COMMON_H_ */

View File

@ -3,7 +3,6 @@
#include "ice_common.h" #include "ice_common.h"
#define ICE_CQ_INIT_REGS(qinfo, prefix) \ #define ICE_CQ_INIT_REGS(qinfo, prefix) \
do { \ do { \
(qinfo)->sq.head = prefix##_ATQH; \ (qinfo)->sq.head = prefix##_ATQH; \
@ -26,7 +25,6 @@ do { \
(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \ (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
} while (0) } while (0)
/** /**
* ice_adminq_init_regs - Initialize AdminQ registers * ice_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
@ -40,7 +38,6 @@ static void ice_adminq_init_regs(struct ice_hw *hw)
ICE_CQ_INIT_REGS(cq, PF_FW); ICE_CQ_INIT_REGS(cq, PF_FW);
} }
/** /**
* ice_mailbox_init_regs - Initialize Mailbox registers * ice_mailbox_init_regs - Initialize Mailbox registers
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
@ -90,7 +87,7 @@ bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue * @cq: pointer to the specific Control queue
*/ */
static enum ice_status static int
ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{ {
size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc); size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
@ -99,7 +96,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
&cq->sq.desc_buf.pa, &cq->sq.desc_buf.pa,
GFP_KERNEL | __GFP_ZERO); GFP_KERNEL | __GFP_ZERO);
if (!cq->sq.desc_buf.va) if (!cq->sq.desc_buf.va)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
cq->sq.desc_buf.size = size; cq->sq.desc_buf.size = size;
cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
@ -110,7 +107,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->sq.desc_buf.va = NULL; cq->sq.desc_buf.va = NULL;
cq->sq.desc_buf.pa = 0; cq->sq.desc_buf.pa = 0;
cq->sq.desc_buf.size = 0; cq->sq.desc_buf.size = 0;
return ICE_ERR_NO_MEMORY; return -ENOMEM;
} }
return 0; return 0;
@ -121,7 +118,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue * @cq: pointer to the specific Control queue
*/ */
static enum ice_status static int
ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{ {
size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc); size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
@ -130,7 +127,7 @@ ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
&cq->rq.desc_buf.pa, &cq->rq.desc_buf.pa,
GFP_KERNEL | __GFP_ZERO); GFP_KERNEL | __GFP_ZERO);
if (!cq->rq.desc_buf.va) if (!cq->rq.desc_buf.va)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
cq->rq.desc_buf.size = size; cq->rq.desc_buf.size = size;
return 0; return 0;
} }
@ -157,7 +154,7 @@ static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue * @cq: pointer to the specific Control queue
*/ */
static enum ice_status static int
ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{ {
int i; int i;
@ -168,7 +165,7 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries, cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
sizeof(cq->rq.desc_buf), GFP_KERNEL); sizeof(cq->rq.desc_buf), GFP_KERNEL);
if (!cq->rq.dma_head) if (!cq->rq.dma_head)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head; cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
/* allocate the mapped buffers */ /* allocate the mapped buffers */
@ -221,7 +218,7 @@ unwind_alloc_rq_bufs:
devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
cq->rq.dma_head = NULL; cq->rq.dma_head = NULL;
return ICE_ERR_NO_MEMORY; return -ENOMEM;
} }
/** /**
@ -229,7 +226,7 @@ unwind_alloc_rq_bufs:
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue * @cq: pointer to the specific Control queue
*/ */
static enum ice_status static int
ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{ {
int i; int i;
@ -238,7 +235,7 @@ ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
sizeof(cq->sq.desc_buf), GFP_KERNEL); sizeof(cq->sq.desc_buf), GFP_KERNEL);
if (!cq->sq.dma_head) if (!cq->sq.dma_head)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
/* allocate the mapped buffers */ /* allocate the mapped buffers */
@ -269,10 +266,10 @@ unwind_alloc_sq_bufs:
devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
cq->sq.dma_head = NULL; cq->sq.dma_head = NULL;
return ICE_ERR_NO_MEMORY; return -ENOMEM;
} }
static enum ice_status static int
ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
{ {
/* Clear Head and Tail */ /* Clear Head and Tail */
@ -286,7 +283,7 @@ ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
/* Check one register to verify that config was applied */ /* Check one register to verify that config was applied */
if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa)) if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa))
return ICE_ERR_AQ_ERROR; return -EIO;
return 0; return 0;
} }
@ -298,7 +295,7 @@ ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
* *
* Configure base address and length registers for the transmit queue * Configure base address and length registers for the transmit queue
*/ */
static enum ice_status static int
ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{ {
return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries); return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
@ -311,10 +308,10 @@ ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* *
* Configure base address and length registers for the receive (event queue) * Configure base address and length registers for the receive (event queue)
*/ */
static enum ice_status static int
ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{ {
enum ice_status status; int status;
status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries); status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
if (status) if (status)
@ -364,19 +361,19 @@ do { \
* Do *NOT* hold the lock when calling this as the memory allocation routines * Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe * called are not going to be atomic context safe
*/ */
static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{ {
enum ice_status ret_code; int ret_code;
if (cq->sq.count > 0) { if (cq->sq.count > 0) {
/* queue already initialized */ /* queue already initialized */
ret_code = ICE_ERR_NOT_READY; ret_code = -EBUSY;
goto init_ctrlq_exit; goto init_ctrlq_exit;
} }
/* verify input for valid configuration */ /* verify input for valid configuration */
if (!cq->num_sq_entries || !cq->sq_buf_size) { if (!cq->num_sq_entries || !cq->sq_buf_size) {
ret_code = ICE_ERR_CFG; ret_code = -EIO;
goto init_ctrlq_exit; goto init_ctrlq_exit;
} }
@ -424,19 +421,19 @@ init_ctrlq_exit:
* Do *NOT* hold the lock when calling this as the memory allocation routines * Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe * called are not going to be atomic context safe
*/ */
static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{ {
enum ice_status ret_code; int ret_code;
if (cq->rq.count > 0) { if (cq->rq.count > 0) {
/* queue already initialized */ /* queue already initialized */
ret_code = ICE_ERR_NOT_READY; ret_code = -EBUSY;
goto init_ctrlq_exit; goto init_ctrlq_exit;
} }
/* verify input for valid configuration */ /* verify input for valid configuration */
if (!cq->num_rq_entries || !cq->rq_buf_size) { if (!cq->num_rq_entries || !cq->rq_buf_size) {
ret_code = ICE_ERR_CFG; ret_code = -EIO;
goto init_ctrlq_exit; goto init_ctrlq_exit;
} }
@ -477,15 +474,15 @@ init_ctrlq_exit:
* *
* The main shutdown routine for the Control Transmit Queue * The main shutdown routine for the Control Transmit Queue
*/ */
static enum ice_status static int
ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{ {
enum ice_status ret_code = 0; int ret_code = 0;
mutex_lock(&cq->sq_lock); mutex_lock(&cq->sq_lock);
if (!cq->sq.count) { if (!cq->sq.count) {
ret_code = ICE_ERR_NOT_READY; ret_code = -EBUSY;
goto shutdown_sq_out; goto shutdown_sq_out;
} }
@ -525,14 +522,20 @@ static bool ice_aq_ver_check(struct ice_hw *hw)
} else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) { } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2)) if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
dev_info(ice_hw_to_dev(hw), dev_info(ice_hw_to_dev(hw),
"The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); "The driver for the device detected a newer version (%u.%u) of the NVM image than expected (%u.%u). Please install the most recent version of the network driver.\n",
hw->api_maj_ver, hw->api_min_ver,
EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR) else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
dev_info(ice_hw_to_dev(hw), dev_info(ice_hw_to_dev(hw),
"The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
hw->api_maj_ver, hw->api_min_ver,
EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
} else { } else {
/* Major API version is older than expected, log a warning */ /* Major API version is older than expected, log a warning */
dev_info(ice_hw_to_dev(hw), dev_info(ice_hw_to_dev(hw),
"The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
hw->api_maj_ver, hw->api_min_ver,
EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
} }
return true; return true;
} }
@ -544,15 +547,15 @@ static bool ice_aq_ver_check(struct ice_hw *hw)
* *
* The main shutdown routine for the Control Receive Queue * The main shutdown routine for the Control Receive Queue
*/ */
static enum ice_status static int
ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{ {
enum ice_status ret_code = 0; int ret_code = 0;
mutex_lock(&cq->rq_lock); mutex_lock(&cq->rq_lock);
if (!cq->rq.count) { if (!cq->rq.count) {
ret_code = ICE_ERR_NOT_READY; ret_code = -EBUSY;
goto shutdown_rq_out; goto shutdown_rq_out;
} }
@ -575,23 +578,21 @@ shutdown_rq_out:
return ret_code; return ret_code;
} }
/** /**
* ice_init_check_adminq - Check version for Admin Queue to know if its alive * ice_init_check_adminq - Check version for Admin Queue to know if its alive
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
*/ */
static enum ice_status ice_init_check_adminq(struct ice_hw *hw) static int ice_init_check_adminq(struct ice_hw *hw)
{ {
struct ice_ctl_q_info *cq = &hw->adminq; struct ice_ctl_q_info *cq = &hw->adminq;
enum ice_status status; int status;
status = ice_aq_get_fw_ver(hw, NULL); status = ice_aq_get_fw_ver(hw, NULL);
if (status) if (status)
goto init_ctrlq_free_rq; goto init_ctrlq_free_rq;
if (!ice_aq_ver_check(hw)) { if (!ice_aq_ver_check(hw)) {
status = ICE_ERR_FW_API_VER; status = -EIO;
goto init_ctrlq_free_rq; goto init_ctrlq_free_rq;
} }
@ -617,10 +618,10 @@ init_ctrlq_free_rq:
* *
* NOTE: this function does not initialize the controlq locks * NOTE: this function does not initialize the controlq locks
*/ */
static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) static int ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
{ {
struct ice_ctl_q_info *cq; struct ice_ctl_q_info *cq;
enum ice_status ret_code; int ret_code;
switch (q_type) { switch (q_type) {
case ICE_CTL_Q_ADMIN: case ICE_CTL_Q_ADMIN:
@ -636,14 +637,14 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
cq = &hw->mailboxq; cq = &hw->mailboxq;
break; break;
default: default:
return ICE_ERR_PARAM; return -EINVAL;
} }
cq->qtype = q_type; cq->qtype = q_type;
/* verify input for valid configuration */ /* verify input for valid configuration */
if (!cq->num_rq_entries || !cq->num_sq_entries || if (!cq->num_rq_entries || !cq->num_sq_entries ||
!cq->rq_buf_size || !cq->sq_buf_size) { !cq->rq_buf_size || !cq->sq_buf_size) {
return ICE_ERR_CFG; return -EIO;
} }
/* setup SQ command write back timeout */ /* setup SQ command write back timeout */
@ -683,10 +684,12 @@ static bool ice_is_sbq_supported(struct ice_hw *hw)
* ice_shutdown_ctrlq - shutdown routine for any control queue * ice_shutdown_ctrlq - shutdown routine for any control queue
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
* @q_type: specific Control queue type * @q_type: specific Control queue type
* @unloading: is the driver unloading itself
* *
* NOTE: this function does not destroy the control queue locks. * NOTE: this function does not destroy the control queue locks.
*/ */
static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type,
bool unloading)
{ {
struct ice_ctl_q_info *cq; struct ice_ctl_q_info *cq;
@ -694,7 +697,7 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
case ICE_CTL_Q_ADMIN: case ICE_CTL_Q_ADMIN:
cq = &hw->adminq; cq = &hw->adminq;
if (ice_check_sq_alive(hw, cq)) if (ice_check_sq_alive(hw, cq))
ice_aq_q_shutdown(hw, true); ice_aq_q_shutdown(hw, unloading);
break; break;
case ICE_CTL_Q_SB: case ICE_CTL_Q_SB:
cq = &hw->sbq; cq = &hw->sbq;
@ -713,20 +716,21 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
/** /**
* ice_shutdown_all_ctrlq - shutdown routine for all control queues * ice_shutdown_all_ctrlq - shutdown routine for all control queues
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
* @unloading: is the driver unloading itself
* *
* NOTE: this function does not destroy the control queue locks. The driver * NOTE: this function does not destroy the control queue locks. The driver
* may call this at runtime to shutdown and later restart control queues, such * may call this at runtime to shutdown and later restart control queues, such
* as in response to a reset event. * as in response to a reset event.
*/ */
void ice_shutdown_all_ctrlq(struct ice_hw *hw) void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading)
{ {
/* Shutdown FW admin queue */ /* Shutdown FW admin queue */
ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading);
/* Shutdown PHY Sideband */ /* Shutdown PHY Sideband */
if (ice_is_sbq_supported(hw)) if (ice_is_sbq_supported(hw))
ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB); ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB, unloading);
/* Shutdown PF-VF Mailbox */ /* Shutdown PF-VF Mailbox */
ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX); ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading);
} }
/** /**
@ -742,10 +746,10 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw)
* *
* NOTE: this function does not initialize the controlq locks. * NOTE: this function does not initialize the controlq locks.
*/ */
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) int ice_init_all_ctrlq(struct ice_hw *hw)
{ {
enum ice_status status;
u32 retry = 0; u32 retry = 0;
int status;
/* Init FW admin queue */ /* Init FW admin queue */
do { do {
@ -754,11 +758,11 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
return status; return status;
status = ice_init_check_adminq(hw); status = ice_init_check_adminq(hw);
if (status != ICE_ERR_AQ_FW_CRITICAL) if (status != -EIO)
break; break;
ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n"); ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, true);
msleep(ICE_CTL_Q_ADMIN_INIT_MSEC); msleep(ICE_CTL_Q_ADMIN_INIT_MSEC);
} while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT); } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
@ -805,7 +809,7 @@ static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
* driver needs to re-initialize control queues at run time it should call * driver needs to re-initialize control queues at run time it should call
* ice_init_all_ctrlq instead. * ice_init_all_ctrlq instead.
*/ */
enum ice_status ice_create_all_ctrlq(struct ice_hw *hw) int ice_create_all_ctrlq(struct ice_hw *hw)
{ {
ice_init_ctrlq_locks(&hw->adminq); ice_init_ctrlq_locks(&hw->adminq);
if (ice_is_sbq_supported(hw)) if (ice_is_sbq_supported(hw))
@ -839,7 +843,7 @@ static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
void ice_destroy_all_ctrlq(struct ice_hw *hw) void ice_destroy_all_ctrlq(struct ice_hw *hw)
{ {
/* shut down all the control queues first */ /* shut down all the control queues first */
ice_shutdown_all_ctrlq(hw); ice_shutdown_all_ctrlq(hw, true);
ice_destroy_ctrlq_locks(&hw->adminq); ice_destroy_ctrlq_locks(&hw->adminq);
if (ice_is_sbq_supported(hw)) if (ice_is_sbq_supported(hw))
@ -956,7 +960,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* This is the main send command routine for the ATQ. It runs the queue, * This is the main send command routine for the ATQ. It runs the queue,
* cleans the queue, etc. * cleans the queue, etc.
*/ */
enum ice_status int
ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq, ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
@ -964,26 +968,26 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_dma_mem *dma_buf = NULL; struct ice_dma_mem *dma_buf = NULL;
struct ice_aq_desc *desc_on_ring; struct ice_aq_desc *desc_on_ring;
bool cmd_completed = false; bool cmd_completed = false;
enum ice_status status = 0;
struct ice_sq_cd *details; struct ice_sq_cd *details;
u32 total_delay = 0; u32 total_delay = 0;
int status = 0;
u16 retval = 0; u16 retval = 0;
u32 val = 0; u32 val = 0;
/* if reset is in progress return a soft error */ /* if reset is in progress return a soft error */
if (hw->reset_ongoing) if (hw->reset_ongoing)
return ICE_ERR_RESET_ONGOING; return -EBUSY;
cq->sq_last_status = ICE_AQ_RC_OK; cq->sq_last_status = ICE_AQ_RC_OK;
if (!cq->sq.count) { if (!cq->sq.count) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n"); ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
status = ICE_ERR_AQ_EMPTY; status = -EIO;
goto sq_send_command_error; goto sq_send_command_error;
} }
if ((buf && !buf_size) || (!buf && buf_size)) { if ((buf && !buf_size) || (!buf && buf_size)) {
status = ICE_ERR_PARAM; status = -EINVAL;
goto sq_send_command_error; goto sq_send_command_error;
} }
@ -991,7 +995,7 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (buf_size > cq->sq_buf_size) { if (buf_size > cq->sq_buf_size) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n", ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
buf_size); buf_size);
status = ICE_ERR_INVAL_SIZE; status = -EINVAL;
goto sq_send_command_error; goto sq_send_command_error;
} }
@ -1004,7 +1008,7 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (val >= cq->num_sq_entries) { if (val >= cq->num_sq_entries) {
ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n", ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
val); val);
status = ICE_ERR_AQ_EMPTY; status = -EIO;
goto sq_send_command_error; goto sq_send_command_error;
} }
@ -1021,7 +1025,7 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
*/ */
if (ice_clean_sq(hw, cq) == 0) { if (ice_clean_sq(hw, cq) == 0) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n"); ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
status = ICE_ERR_AQ_FULL; status = -ENOSPC;
goto sq_send_command_error; goto sq_send_command_error;
} }
@ -1075,7 +1079,7 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (copy_size > buf_size) { if (copy_size > buf_size) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n", ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
copy_size, buf_size); copy_size, buf_size);
status = ICE_ERR_AQ_ERROR; status = -EIO;
} else { } else {
memcpy(buf, dma_buf->va, copy_size); memcpy(buf, dma_buf->va, copy_size);
} }
@ -1091,7 +1095,7 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
} }
cmd_completed = true; cmd_completed = true;
if (!status && retval != ICE_AQ_RC_OK) if (!status && retval != ICE_AQ_RC_OK)
status = ICE_ERR_AQ_ERROR; status = -EIO;
cq->sq_last_status = (enum ice_aq_err)retval; cq->sq_last_status = (enum ice_aq_err)retval;
} }
@ -1109,10 +1113,10 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask || if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) { rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n"); ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
status = ICE_ERR_AQ_FW_CRITICAL; status = -EIO;
} else { } else {
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n"); ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
status = ICE_ERR_AQ_TIMEOUT; status = -EIO;
} }
} }
@ -1132,16 +1136,16 @@ sq_send_command_error:
* This is the main send command routine for the ATQ. It runs the queue, * This is the main send command routine for the ATQ. It runs the queue,
* cleans the queue, etc. * cleans the queue, etc.
*/ */
enum ice_status int
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
{ {
enum ice_status status = 0; int status = 0;
/* if reset is in progress return a soft error */ /* if reset is in progress return a soft error */
if (hw->reset_ongoing) if (hw->reset_ongoing)
return ICE_ERR_RESET_ONGOING; return -EBUSY;
mutex_lock(&cq->sq_lock); mutex_lock(&cq->sq_lock);
status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd); status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd);
@ -1176,15 +1180,15 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
* the contents through e. It can also return how many events are * the contents through e. It can also return how many events are
* left to process through 'pending'. * left to process through 'pending'.
*/ */
enum ice_status int
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending) struct ice_rq_event_info *e, u16 *pending)
{ {
u16 ntc = cq->rq.next_to_clean; u16 ntc = cq->rq.next_to_clean;
enum ice_aq_err rq_last_status; enum ice_aq_err rq_last_status;
enum ice_status ret_code = 0;
struct ice_aq_desc *desc; struct ice_aq_desc *desc;
struct ice_dma_mem *bi; struct ice_dma_mem *bi;
int ret_code = 0;
u16 desc_idx; u16 desc_idx;
u16 datalen; u16 datalen;
u16 flags; u16 flags;
@ -1198,7 +1202,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (!cq->rq.count) { if (!cq->rq.count) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n"); ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
ret_code = ICE_ERR_AQ_EMPTY; ret_code = -EIO;
goto clean_rq_elem_err; goto clean_rq_elem_err;
} }
@ -1207,7 +1211,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (ntu == ntc) { if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */ /* nothing to do - shouldn't need to update ring's values */
ret_code = ICE_ERR_AQ_NO_WORK; ret_code = -EALREADY;
goto clean_rq_elem_out; goto clean_rq_elem_out;
} }
@ -1218,7 +1222,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval); rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags); flags = le16_to_cpu(desc->flags);
if (flags & ICE_AQ_FLAG_ERR) { if (flags & ICE_AQ_FLAG_ERR) {
ret_code = ICE_ERR_AQ_ERROR; ret_code = -EIO;
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n", ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
le16_to_cpu(desc->opcode), rq_last_status); le16_to_cpu(desc->opcode), rq_last_status);
} }
@ -1232,7 +1236,6 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size); ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
/* Restore the original datalen and buffer address in the desc, /* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message size * FW updates datalen to indicate the event message size
*/ */

View File

@ -6,7 +6,6 @@
#include "ice_adminq_cmd.h" #include "ice_adminq_cmd.h"
/* Maximum buffer lengths for all control queue types */ /* Maximum buffer lengths for all control queue types */
#define ICE_AQ_MAX_BUF_LEN 4096 #define ICE_AQ_MAX_BUF_LEN 4096
#define ICE_MBXQ_MAX_BUF_LEN 4096 #define ICE_MBXQ_MAX_BUF_LEN 4096

View File

@ -18,19 +18,19 @@
* *
* Requests the complete LLDP MIB (entire packet). (0x0A00) * Requests the complete LLDP MIB (entire packet). (0x0A00)
*/ */
enum ice_status int
ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf, ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
u16 buf_size, u16 *local_len, u16 *remote_len, u16 buf_size, u16 *local_len, u16 *remote_len,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
{ {
struct ice_aqc_lldp_get_mib *cmd; struct ice_aqc_lldp_get_mib *cmd;
struct ice_aq_desc desc; struct ice_aq_desc desc;
enum ice_status status; int status;
cmd = &desc.params.lldp_get_mib; cmd = &desc.params.lldp_get_mib;
if (buf_size == 0 || !buf) if (buf_size == 0 || !buf)
return ICE_ERR_PARAM; return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_get_mib); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_get_mib);
@ -60,7 +60,7 @@ ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
* Enable or Disable posting of an event on ARQ when LLDP MIB * Enable or Disable posting of an event on ARQ when LLDP MIB
* associated with the interface changes (0x0A01) * associated with the interface changes (0x0A01)
*/ */
static enum ice_status static int
ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
{ {
@ -73,6 +73,9 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
if (!ena_update) if (!ena_update)
cmd->command |= ICE_AQ_LLDP_MIB_UPDATE_DIS; cmd->command |= ICE_AQ_LLDP_MIB_UPDATE_DIS;
else
cmd->command |= ICE_AQ_LLDP_MIB_PENDING_ENABLE <<
ICE_AQ_LLDP_MIB_PENDING_S;
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
} }
@ -99,17 +102,17 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
* Delete the specified TLV from LLDP Local MIB for the given bridge type. * Delete the specified TLV from LLDP Local MIB for the given bridge type.
* The firmware places the entire LLDP MIB in the response buffer. (0x0A04) * The firmware places the entire LLDP MIB in the response buffer. (0x0A04)
*/ */
enum ice_status int
ice_aq_add_delete_lldp_tlv(struct ice_hw *hw, u8 bridge_type, bool add_lldp_tlv, ice_aq_add_delete_lldp_tlv(struct ice_hw *hw, u8 bridge_type, bool add_lldp_tlv,
void *buf, u16 buf_size, u16 tlv_len, u16 *mib_len, void *buf, u16 buf_size, u16 tlv_len, u16 *mib_len,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
{ {
struct ice_aqc_lldp_add_delete_tlv *cmd; struct ice_aqc_lldp_add_delete_tlv *cmd;
struct ice_aq_desc desc; struct ice_aq_desc desc;
enum ice_status status; int status;
if (tlv_len == 0) if (tlv_len == 0)
return ICE_ERR_PARAM; return -EINVAL;
cmd = &desc.params.lldp_add_delete_tlv; cmd = &desc.params.lldp_add_delete_tlv;
@ -148,19 +151,19 @@ ice_aq_add_delete_lldp_tlv(struct ice_hw *hw, u8 bridge_type, bool add_lldp_tlv,
* Firmware will place the complete LLDP MIB in response buffer with the * Firmware will place the complete LLDP MIB in response buffer with the
* updated TLV. (0x0A03) * updated TLV. (0x0A03)
*/ */
enum ice_status int
ice_aq_update_lldp_tlv(struct ice_hw *hw, u8 bridge_type, void *buf, ice_aq_update_lldp_tlv(struct ice_hw *hw, u8 bridge_type, void *buf,
u16 buf_size, u16 old_len, u16 new_len, u16 offset, u16 buf_size, u16 old_len, u16 new_len, u16 offset,
u16 *mib_len, struct ice_sq_cd *cd) u16 *mib_len, struct ice_sq_cd *cd)
{ {
struct ice_aqc_lldp_update_tlv *cmd; struct ice_aqc_lldp_update_tlv *cmd;
struct ice_aq_desc desc; struct ice_aq_desc desc;
enum ice_status status; int status;
cmd = &desc.params.lldp_update_tlv; cmd = &desc.params.lldp_update_tlv;
if (offset == 0 || old_len == 0 || new_len == 0) if (offset == 0 || old_len == 0 || new_len == 0)
return ICE_ERR_PARAM; return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_update_tlv); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_update_tlv);
@ -190,7 +193,7 @@ ice_aq_update_lldp_tlv(struct ice_hw *hw, u8 bridge_type, void *buf,
* *
* Stop or Shutdown the embedded LLDP Agent (0x0A05) * Stop or Shutdown the embedded LLDP Agent (0x0A05)
*/ */
enum ice_status int
ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist, ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
{ {
@ -218,7 +221,7 @@ ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
* *
* Start the embedded LLDP Agent on all ports. (0x0A06) * Start the embedded LLDP Agent on all ports. (0x0A06)
*/ */
enum ice_status int
ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd) ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
{ {
struct ice_aqc_lldp_start *cmd; struct ice_aqc_lldp_start *cmd;
@ -699,18 +702,18 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
* *
* Parse DCB configuration from the LLDPDU * Parse DCB configuration from the LLDPDU
*/ */
static enum ice_status static int
ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg) ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
{ {
struct ice_lldp_org_tlv *tlv; struct ice_lldp_org_tlv *tlv;
enum ice_status ret = 0;
u16 offset = 0; u16 offset = 0;
int ret = 0;
u16 typelen; u16 typelen;
u16 type; u16 type;
u16 len; u16 len;
if (!lldpmib || !dcbcfg) if (!lldpmib || !dcbcfg)
return ICE_ERR_PARAM; return -EINVAL;
/* set to the start of LLDPDU */ /* set to the start of LLDPDU */
lldpmib += ETH_HLEN; lldpmib += ETH_HLEN;
@ -750,17 +753,17 @@ ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
* *
* Query DCB configuration from the firmware * Query DCB configuration from the firmware
*/ */
enum ice_status int
ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg) struct ice_dcbx_cfg *dcbcfg)
{ {
enum ice_status ret;
u8 *lldpmib; u8 *lldpmib;
int ret;
/* Allocate the LLDPDU */ /* Allocate the LLDPDU */
lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL); lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL);
if (!lldpmib) if (!lldpmib)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
ret = ice_aq_get_lldp_mib(hw, bridgetype, mib_type, (void *)lldpmib, ret = ice_aq_get_lldp_mib(hw, bridgetype, mib_type, (void *)lldpmib,
ICE_LLDPDU_SIZE, NULL, NULL, NULL); ICE_LLDPDU_SIZE, NULL, NULL, NULL);
@ -785,13 +788,13 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
* This sends out request/release to ignore PFC condition for a TC. * This sends out request/release to ignore PFC condition for a TC.
* It will return the TCs for which PFC is currently ignored. (0x0301) * It will return the TCs for which PFC is currently ignored. (0x0301)
*/ */
enum ice_status int
ice_aq_dcb_ignore_pfc(struct ice_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret, ice_aq_dcb_ignore_pfc(struct ice_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
{ {
struct ice_aqc_pfc_ignore *cmd; struct ice_aqc_pfc_ignore *cmd;
struct ice_aq_desc desc; struct ice_aq_desc desc;
enum ice_status status; int status;
cmd = &desc.params.pfc_ignore; cmd = &desc.params.pfc_ignore;
@ -821,17 +824,17 @@ ice_aq_dcb_ignore_pfc(struct ice_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret,
* @cd: pointer to command details structure or NULL * @cd: pointer to command details structure or NULL
* *
* Start/Stop the embedded dcbx Agent. In case that this wrapper function * Start/Stop the embedded dcbx Agent. In case that this wrapper function
* returns ICE_SUCCESS, caller will need to check if FW returns back the same * returns 0, caller will need to check if FW returns back the same
* value as stated in dcbx_agent_status, and react accordingly. (0x0A09) * value as stated in dcbx_agent_status, and react accordingly. (0x0A09)
*/ */
enum ice_status int
ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent, ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
bool *dcbx_agent_status, struct ice_sq_cd *cd) bool *dcbx_agent_status, struct ice_sq_cd *cd)
{ {
struct ice_aqc_lldp_stop_start_specific_agent *cmd; struct ice_aqc_lldp_stop_start_specific_agent *cmd;
enum ice_status status; enum ice_adminq_opc opcode;
struct ice_aq_desc desc; struct ice_aq_desc desc;
u16 opcode; int status;
cmd = &desc.params.lldp_agent_ctrl; cmd = &desc.params.lldp_agent_ctrl;
@ -861,7 +864,7 @@ ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
* *
* Get CEE DCBX mode operational configuration from firmware (0x0A07) * Get CEE DCBX mode operational configuration from firmware (0x0A07)
*/ */
static enum ice_status static int
ice_aq_get_cee_dcb_cfg(struct ice_hw *hw, ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
struct ice_aqc_get_cee_dcb_cfg_resp *buff, struct ice_aqc_get_cee_dcb_cfg_resp *buff,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
@ -882,12 +885,12 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
* This will return an indication if DSCP-based PFC or VLAN-based PFC * This will return an indication if DSCP-based PFC or VLAN-based PFC
* is enabled. (0x0302) * is enabled. (0x0302)
*/ */
enum ice_status int
ice_aq_query_pfc_mode(struct ice_hw *hw, u8 *pfcmode_ret, struct ice_sq_cd *cd) ice_aq_query_pfc_mode(struct ice_hw *hw, u8 *pfcmode_ret, struct ice_sq_cd *cd)
{ {
struct ice_aqc_set_query_pfc_mode *cmd; struct ice_aqc_set_query_pfc_mode *cmd;
struct ice_aq_desc desc; struct ice_aq_desc desc;
enum ice_status status; int status;
cmd = &desc.params.set_query_pfc_mode; cmd = &desc.params.set_query_pfc_mode;
@ -910,15 +913,15 @@ ice_aq_query_pfc_mode(struct ice_hw *hw, u8 *pfcmode_ret, struct ice_sq_cd *cd)
* This AQ call configures the PFC mdoe to DSCP-based PFC mode or VLAN * This AQ call configures the PFC mdoe to DSCP-based PFC mode or VLAN
* -based PFC (0x0303) * -based PFC (0x0303)
*/ */
enum ice_status int
ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd) ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
{ {
struct ice_aqc_set_query_pfc_mode *cmd; struct ice_aqc_set_query_pfc_mode *cmd;
struct ice_aq_desc desc; struct ice_aq_desc desc;
enum ice_status status; int status;
if (pfc_mode > ICE_AQC_PFC_DSCP_BASED_PFC) if (pfc_mode > ICE_AQC_PFC_DSCP_BASED_PFC)
return ICE_ERR_PARAM; return -EINVAL;
cmd = &desc.params.set_query_pfc_mode; cmd = &desc.params.set_query_pfc_mode;
@ -930,14 +933,13 @@ ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
if (status) if (status)
return status; return status;
/* The spec isn't clear about whether the FW will return an error code /* FW will write the PFC mode set back into cmd->pfc_mode, but if DCB is
* if the PFC mode requested by the driver was not set. The spec just * disabled, FW will write back 0 to cmd->pfc_mode. After the AQ has
* says that the FW will write the PFC mode set back into cmd->pfc_mode, * been executed, check if cmd->pfc_mode is what was requested. If not,
* so after the AQ has been executed, check if cmd->pfc_mode is what was * return an error.
* requested.
*/ */
if (cmd->pfc_mode != pfc_mode) if (cmd->pfc_mode != pfc_mode)
return ICE_ERR_NOT_SUPPORTED; return -EOPNOTSUPP;
return 0; return 0;
} }
@ -951,7 +953,7 @@ ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
* This AQ command will tell FW if it will apply or not apply the default DCB * This AQ command will tell FW if it will apply or not apply the default DCB
* configuration when link up (0x0306). * configuration when link up (0x0306).
*/ */
enum ice_status int
ice_aq_set_dcb_parameters(struct ice_hw *hw, bool dcb_enable, ice_aq_set_dcb_parameters(struct ice_hw *hw, bool dcb_enable,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
{ {
@ -981,8 +983,8 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
struct ice_port_info *pi) struct ice_port_info *pi)
{ {
u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status); u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift; u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift, j;
u8 i, j, err, sync, oper, app_index, ice_app_sel_type; u8 i, err, sync, oper, app_index, ice_app_sel_type;
u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift; u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift;
struct ice_dcbx_cfg *cmp_dcbcfg, *dcbcfg; struct ice_dcbx_cfg *cmp_dcbcfg, *dcbcfg;
@ -1079,8 +1081,8 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
*/ */
if (!err && sync && oper) { if (!err && sync && oper) {
dcbcfg->app[app_index].priority = dcbcfg->app[app_index].priority =
(app_prio & ice_aqc_cee_app_mask) >> (u8)((app_prio & ice_aqc_cee_app_mask) >>
ice_aqc_cee_app_shift; ice_aqc_cee_app_shift);
dcbcfg->app[app_index].selector = ice_app_sel_type; dcbcfg->app[app_index].selector = ice_app_sel_type;
dcbcfg->app[app_index].prot_id = ice_app_prot_id_type; dcbcfg->app[app_index].prot_id = ice_app_prot_id_type;
app_index++; app_index++;
@ -1097,14 +1099,14 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
* *
* Get IEEE or CEE mode DCB configuration from the Firmware * Get IEEE or CEE mode DCB configuration from the Firmware
*/ */
static enum ice_status static int
ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode) ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
{ {
struct ice_dcbx_cfg *dcbx_cfg = NULL; struct ice_dcbx_cfg *dcbx_cfg = NULL;
enum ice_status ret; int ret;
if (!pi) if (!pi)
return ICE_ERR_PARAM; return -EINVAL;
if (dcbx_mode == ICE_DCBX_MODE_IEEE) if (dcbx_mode == ICE_DCBX_MODE_IEEE)
dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
@ -1137,14 +1139,14 @@ out:
* *
* Get DCB configuration from the Firmware * Get DCB configuration from the Firmware
*/ */
enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi) int ice_get_dcb_cfg(struct ice_port_info *pi)
{ {
struct ice_aqc_get_cee_dcb_cfg_resp cee_cfg; struct ice_aqc_get_cee_dcb_cfg_resp cee_cfg;
struct ice_dcbx_cfg *dcbx_cfg; struct ice_dcbx_cfg *dcbx_cfg;
enum ice_status ret; int ret;
if (!pi) if (!pi)
return ICE_ERR_PARAM; return -EINVAL;
ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL); ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
if (!ret) { if (!ret) {
@ -1161,6 +1163,43 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
return ret; return ret;
} }
/**
* ice_get_dcb_cfg_from_mib_change
* @pi: port information structure
* @event: pointer to the admin queue receive event
*
* Set DCB configuration from received MIB Change event
*/
void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
struct ice_rq_event_info *event)
{
struct ice_dcbx_cfg *dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
struct ice_aqc_lldp_get_mib *mib;
u8 change_type, dcbx_mode;
mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
change_type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M;
if (change_type == ICE_AQ_LLDP_MIB_REMOTE)
dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg;
dcbx_mode = ((mib->type & ICE_AQ_LLDP_DCBX_M) >>
ICE_AQ_LLDP_DCBX_S);
switch (dcbx_mode) {
case ICE_AQ_LLDP_DCBX_IEEE:
dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE;
ice_lldp_to_dcb_cfg(event->msg_buf, dcbx_cfg);
break;
case ICE_AQ_LLDP_DCBX_CEE:
pi->qos_cfg.desired_dcbx_cfg = pi->qos_cfg.local_dcbx_cfg;
ice_cee_to_dcb_cfg((struct ice_aqc_get_cee_dcb_cfg_resp *)
event->msg_buf, pi);
break;
}
}
/** /**
* ice_init_dcb * ice_init_dcb
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
@ -1168,13 +1207,13 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
* *
* Update DCB configuration from the Firmware * Update DCB configuration from the Firmware
*/ */
enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
{ {
struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg; struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
enum ice_status ret = 0; int ret = 0;
if (!hw->func_caps.common_cap.dcb) if (!hw->func_caps.common_cap.dcb)
return ICE_ERR_NOT_SUPPORTED; return -EOPNOTSUPP;
qos_cfg->is_sw_lldp = true; qos_cfg->is_sw_lldp = true;
@ -1190,7 +1229,7 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
return ret; return ret;
qos_cfg->is_sw_lldp = false; qos_cfg->is_sw_lldp = false;
} else if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) { } else if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) {
return ICE_ERR_NOT_READY; return -EBUSY;
} }
/* Configure the LLDP MIB change event */ /* Configure the LLDP MIB change event */
@ -1210,19 +1249,19 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
* *
* Configure (disable/enable) MIB * Configure (disable/enable) MIB
*/ */
enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib) int ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib)
{ {
struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg; struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
enum ice_status ret; int ret;
if (!hw->func_caps.common_cap.dcb) if (!hw->func_caps.common_cap.dcb)
return ICE_ERR_NOT_SUPPORTED; return -EOPNOTSUPP;
/* Get DCBX status */ /* Get DCBX status */
qos_cfg->dcbx_status = ice_get_dcbx_status(hw); qos_cfg->dcbx_status = ice_get_dcbx_status(hw);
if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS)
return ICE_ERR_NOT_READY; return -EBUSY;
ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL); ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL);
if (!ret) if (!ret)
@ -1570,7 +1609,7 @@ ice_add_dscp_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
tlv->ouisubtype = htonl(ouisubtype); tlv->ouisubtype = htonl(ouisubtype);
buf[0] = dcbcfg->pfc.pfccap & 0xF; buf[0] = dcbcfg->pfc.pfccap & 0xF;
buf[1] = dcbcfg->pfc.pfcena & 0xF; buf[1] = dcbcfg->pfc.pfcena;
} }
/** /**
@ -1663,16 +1702,16 @@ ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg)
* *
* Set DCB configuration to the Firmware * Set DCB configuration to the Firmware
*/ */
enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi) int ice_set_dcb_cfg(struct ice_port_info *pi)
{ {
u8 mib_type, *lldpmib = NULL; u8 mib_type, *lldpmib = NULL;
struct ice_dcbx_cfg *dcbcfg; struct ice_dcbx_cfg *dcbcfg;
enum ice_status ret;
struct ice_hw *hw; struct ice_hw *hw;
u16 miblen; u16 miblen;
int ret;
if (!pi) if (!pi)
return ICE_ERR_PARAM; return -EINVAL;
hw = pi->hw; hw = pi->hw;
@ -1681,7 +1720,7 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
/* Allocate the LLDPDU */ /* Allocate the LLDPDU */
lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL); lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL);
if (!lldpmib) if (!lldpmib)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING) if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING)
@ -1705,20 +1744,21 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
* *
* query current port ETS configuration * query current port ETS configuration
*/ */
static enum ice_status static int
ice_aq_query_port_ets(struct ice_port_info *pi, ice_aq_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size, struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
{ {
struct ice_aqc_query_port_ets *cmd; struct ice_aqc_query_port_ets *cmd;
struct ice_aq_desc desc; struct ice_aq_desc desc;
enum ice_status status; int status;
if (!pi) if (!pi)
return ICE_ERR_PARAM; return -EINVAL;
cmd = &desc.params.port_ets; cmd = &desc.params.port_ets;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets);
cmd->port_teid = pi->root->info.node_teid; if (pi->root)
cmd->port_teid = pi->root->info.node_teid;
status = ice_aq_send_cmd(pi->hw, &desc, buf, buf_size, cd); status = ice_aq_send_cmd(pi->hw, &desc, buf, buf_size, cd);
return status; return status;
@ -1731,18 +1771,18 @@ ice_aq_query_port_ets(struct ice_port_info *pi,
* *
* update the SW DB with the new TC changes * update the SW DB with the new TC changes
*/ */
static enum ice_status static int
ice_update_port_tc_tree_cfg(struct ice_port_info *pi, ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf) struct ice_aqc_port_ets_elem *buf)
{ {
struct ice_sched_node *node, *tc_node; struct ice_sched_node *node, *tc_node;
struct ice_aqc_txsched_elem_data elem; struct ice_aqc_txsched_elem_data elem;
enum ice_status status = 0;
u32 teid1, teid2; u32 teid1, teid2;
int status = 0;
u8 i, j; u8 i, j;
if (!pi) if (!pi)
return ICE_ERR_PARAM; return -EINVAL;
/* suspend the missing TC nodes */ /* suspend the missing TC nodes */
for (i = 0; i < pi->root->num_children; i++) { for (i = 0; i < pi->root->num_children; i++) {
teid1 = le32_to_cpu(pi->root->children[i]->info.node_teid); teid1 = le32_to_cpu(pi->root->children[i]->info.node_teid);
@ -1799,12 +1839,12 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
* query current port ETS configuration and update the * query current port ETS configuration and update the
* SW DB with the TC changes * SW DB with the TC changes
*/ */
enum ice_status int
ice_query_port_ets(struct ice_port_info *pi, ice_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size, struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
{ {
enum ice_status status; int status;
mutex_lock(&pi->sched_lock); mutex_lock(&pi->sched_lock);
status = ice_aq_query_port_ets(pi, buf, buf_size, cd); status = ice_aq_query_port_ets(pi, buf, buf_size, cd);

View File

@ -6,6 +6,7 @@
#include "ice_type.h" #include "ice_type.h"
#include "ice_common.h" #include "ice_common.h"
#include <scsi/iscsi_proto.h>
#define ICE_DCBX_STATUS_NOT_STARTED 0 #define ICE_DCBX_STATUS_NOT_STARTED 0
#define ICE_DCBX_STATUS_IN_PROGRESS 1 #define ICE_DCBX_STATUS_IN_PROGRESS 1
@ -144,51 +145,52 @@ struct ice_cee_app_prio {
u8 prio_map; u8 prio_map;
} __packed; } __packed;
int
enum ice_status
ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf, ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
u16 buf_size, u16 *local_len, u16 *remote_len, u16 buf_size, u16 *local_len, u16 *remote_len,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status int
ice_aq_add_delete_lldp_tlv(struct ice_hw *hw, u8 bridge_type, bool add_lldp_tlv, ice_aq_add_delete_lldp_tlv(struct ice_hw *hw, u8 bridge_type, bool add_lldp_tlv,
void *buf, u16 buf_size, u16 tlv_len, u16 *mib_len, void *buf, u16 buf_size, u16 tlv_len, u16 *mib_len,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status int
ice_aq_update_lldp_tlv(struct ice_hw *hw, u8 bridge_type, void *buf, ice_aq_update_lldp_tlv(struct ice_hw *hw, u8 bridge_type, void *buf,
u16 buf_size, u16 old_len, u16 new_len, u16 offset, u16 buf_size, u16 old_len, u16 new_len, u16 offset,
u16 *mib_len, struct ice_sq_cd *cd); u16 *mib_len, struct ice_sq_cd *cd);
enum ice_status int
ice_aq_dcb_ignore_pfc(struct ice_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret, ice_aq_dcb_ignore_pfc(struct ice_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status int
ice_aq_query_pfc_mode(struct ice_hw *hw, u8 *pfcmode_ret, struct ice_sq_cd *cd); ice_aq_query_pfc_mode(struct ice_hw *hw, u8 *pfcmode_ret, struct ice_sq_cd *cd);
enum ice_status int
ice_aq_set_dcb_parameters(struct ice_hw *hw, bool dcb_enable, ice_aq_set_dcb_parameters(struct ice_hw *hw, bool dcb_enable,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status int
ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd); ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd);
enum ice_status int
ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg); struct ice_dcbx_cfg *dcbcfg);
enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi); int ice_get_dcb_cfg(struct ice_port_info *pi);
enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi); int ice_set_dcb_cfg(struct ice_port_info *pi);
enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change); void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
enum ice_status struct ice_rq_event_info *event);
int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change);
int
ice_query_port_ets(struct ice_port_info *pi, ice_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size, struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cmd_details); struct ice_sq_cd *cmd_details);
#ifdef CONFIG_DCB #ifdef CONFIG_DCB
enum ice_status int
ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist, ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status int
ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd); ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd);
enum ice_status int
ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent, ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
bool *dcbx_agent_status, struct ice_sq_cd *cd); bool *dcbx_agent_status, struct ice_sq_cd *cd);
enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib); int ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib);
#else /* CONFIG_DCB */ #else /* CONFIG_DCB */
static inline enum ice_status static inline int
ice_aq_stop_lldp(struct ice_hw __always_unused *hw, ice_aq_stop_lldp(struct ice_hw __always_unused *hw,
bool __always_unused shutdown_lldp_agent, bool __always_unused shutdown_lldp_agent,
bool __always_unused persist, bool __always_unused persist,
@ -197,7 +199,7 @@ ice_aq_stop_lldp(struct ice_hw __always_unused *hw,
return 0; return 0;
} }
static inline enum ice_status static inline int
ice_aq_start_lldp(struct ice_hw __always_unused *hw, ice_aq_start_lldp(struct ice_hw __always_unused *hw,
bool __always_unused persist, bool __always_unused persist,
struct ice_sq_cd __always_unused *cd) struct ice_sq_cd __always_unused *cd)
@ -205,7 +207,7 @@ ice_aq_start_lldp(struct ice_hw __always_unused *hw,
return 0; return 0;
} }
static inline enum ice_status static inline int
ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw, ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw,
bool __always_unused start_dcbx_agent, bool __always_unused start_dcbx_agent,
bool *dcbx_agent_status, bool *dcbx_agent_status,
@ -216,7 +218,7 @@ ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw,
return 0; return 0;
} }
static inline enum ice_status static inline int
ice_cfg_lldp_mib_change(struct ice_hw __always_unused *hw, ice_cfg_lldp_mib_change(struct ice_hw __always_unused *hw,
bool __always_unused ena_mib) bool __always_unused ena_mib)
{ {

View File

@ -182,15 +182,16 @@ void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi)
switch (vsi->type) { switch (vsi->type) {
case ICE_VSI_PF: case ICE_VSI_PF:
case ICE_VSI_VF:
vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg); vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg); vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
break; break;
#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO #ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO
case ICE_VSI_CHNL: case ICE_VSI_CHNL:
#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ #endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */
#ifdef HAVE_NETDEV_SB_DEV #ifdef HAVE_NDO_DFWD_OPS
case ICE_VSI_OFFLOAD_MACVLAN: case ICE_VSI_OFFLOAD_MACVLAN:
#endif /* HAVE_NETDEV_SB_DEV */ #endif /* HAVE_NDO_DFWD_OPS */
case ICE_VSI_VMDQ2: case ICE_VSI_VMDQ2:
case ICE_VSI_SWITCHDEV_CTRL: case ICE_VSI_SWITCHDEV_CTRL:
vsi->tc_cfg.ena_tc = BIT(ice_get_first_droptc(vsi)); vsi->tc_cfg.ena_tc = BIT(ice_get_first_droptc(vsi));
@ -222,8 +223,7 @@ u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index)
*/ */
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
{ {
u16 qoffset; u16 qoffset, qcount;
u16 qcount;
int i, n; int i, n;
if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) { if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
@ -252,7 +252,7 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
vsi->rx_rings[i]->dcb_tc = n; vsi->rx_rings[i]->dcb_tc = n;
} }
#ifdef HAVE_NETDEV_SB_DEV #ifdef HAVE_NDO_DFWD_OPS
/* when DCB is configured TC for MACVLAN queues should be /* when DCB is configured TC for MACVLAN queues should be
* the first drop TC of the main VSI * the first drop TC of the main VSI
*/ */
@ -264,7 +264,7 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
ice_for_each_alloc_rxq(vsi, i) ice_for_each_alloc_rxq(vsi, i)
vsi->rx_rings[i]->dcb_tc = first_droptc; vsi->rx_rings[i]->dcb_tc = first_droptc;
} }
#endif /* HAVE_NETDEV_SB_DEV */ #endif /* HAVE_NDO_DFWD_OPS */
#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO #ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO
if (vsi->type == ICE_VSI_PF) { if (vsi->type == ICE_VSI_PF) {
@ -288,30 +288,6 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ #endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */
} }
/**
* ice_peer_prep_tc_change - Pre-notify RDMA Peer in blocking call of TC change
* @peer_obj_int: ptr to peer device internal struct
* @data: ptr to opaque data
*/
static int
ice_peer_prep_tc_change(struct ice_peer_obj_int *peer_obj_int,
void __always_unused *data)
{
struct ice_peer_obj *peer_obj;
peer_obj = ice_get_peer_obj(peer_obj_int);
if (!ice_validate_peer_obj(peer_obj))
return 0;
if (!test_bit(ICE_PEER_OBJ_STATE_OPENED, peer_obj_int->state))
return 0;
if (peer_obj->peer_ops && peer_obj->peer_ops->prep_tc_change)
peer_obj->peer_ops->prep_tc_change(peer_obj);
return 0;
}
/** /**
* ice_dcb_ena_dis_vsi - disable certain VSIs for DCB config/reconfig * ice_dcb_ena_dis_vsi - disable certain VSIs for DCB config/reconfig
* @pf: pointer to the PF instance * @pf: pointer to the PF instance
@ -352,7 +328,7 @@ static void ice_dcb_ena_dis_vsi(struct ice_pf *pf, bool ena, bool locked)
/** /**
* ice_dcb_bwchk - check if ETS bandwidth input parameters are correct * ice_dcb_bwchk - check if ETS bandwidth input parameters are correct
* @pf: pointer to PF struct * @pf: pointer to the PF struct
* @dcbcfg: pointer to DCB config structure * @dcbcfg: pointer to DCB config structure
*/ */
int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg) int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg)
@ -380,8 +356,7 @@ int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg)
if (!total_bw) { if (!total_bw) {
etscfg->tcbwtable[0] = ICE_TC_MAX_BW; etscfg->tcbwtable[0] = ICE_TC_MAX_BW;
} else if (total_bw != ICE_TC_MAX_BW) { } else if (total_bw != ICE_TC_MAX_BW) {
dev_err(ice_pf_to_dev(pf), dev_err(ice_pf_to_dev(pf), "Invalid config, total bandwidth must equal 100\n");
"Invalid config, total bandwidth must equal 100\n");
return -EINVAL; return -EINVAL;
} }
@ -400,6 +375,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
struct ice_dcbx_cfg *old_cfg, *curr_cfg; struct ice_dcbx_cfg *old_cfg, *curr_cfg;
struct device *dev = ice_pf_to_dev(pf); struct device *dev = ice_pf_to_dev(pf);
int ret = ICE_DCB_NO_HW_CHG; int ret = ICE_DCB_NO_HW_CHG;
struct iidc_event *event;
curr_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; curr_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
@ -430,8 +406,16 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
return -ENOMEM; return -ENOMEM;
dev_info(dev, "Commit DCB Configuration to the hardware\n"); dev_info(dev, "Commit DCB Configuration to the hardware\n");
/* Notify capable peers about impending change to TCs */ /* Notify capable aux drivers about impending change to TCs */
ice_for_each_peer(pf, NULL, ice_peer_prep_tc_change); event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event) {
kfree(old_cfg);
return -ENOMEM;
}
set_bit(IIDC_EVENT_BEFORE_TC_CHANGE, event->type);
ice_send_event_to_auxs(pf, event);
kfree(event);
/* avoid race conditions by holding the lock while disabling and /* avoid race conditions by holding the lock while disabling and
* re-enabling the VSI * re-enabling the VSI
@ -558,7 +542,7 @@ void ice_dcb_rebuild(struct ice_pf *pf)
struct ice_aqc_port_ets_elem buf = { 0 }; struct ice_aqc_port_ets_elem buf = { 0 };
struct device *dev = ice_pf_to_dev(pf); struct device *dev = ice_pf_to_dev(pf);
struct ice_dcbx_cfg *err_cfg; struct ice_dcbx_cfg *err_cfg;
enum ice_status ret; int ret;
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) { if (ret) {
@ -738,7 +722,7 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
/* Configure SW DCB default with ETS non-willing */ /* Configure SW DCB default with ETS non-willing */
ret = ice_dcb_sw_dflt_cfg(pf, false, true); ret = ice_dcb_sw_dflt_cfg(pf, false, true);
if (ret) { if (ret) {
dev_err(dev, "Failed to set local DCB config %d\n", ret); ice_dev_err_errno(dev, ret, "Failed to set local DCB config");
return ret; return ret;
} }
@ -762,10 +746,11 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
void ice_pf_dcb_recfg(struct ice_pf *pf) void ice_pf_dcb_recfg(struct ice_pf *pf)
{ {
struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
struct iidc_core_dev_info *cdev_info;
struct iidc_event *event;
u8 tc_map = 0; u8 tc_map = 0;
int v, ret; int v, ret;
/* Update each VSI */ /* Update each VSI */
ice_for_each_vsi(pf, v) { ice_for_each_vsi(pf, v) {
struct ice_vsi *vsi = pf->vsi[v]; struct ice_vsi *vsi = pf->vsi[v];
@ -774,6 +759,8 @@ void ice_pf_dcb_recfg(struct ice_pf *pf)
continue; continue;
if (vsi->type == ICE_VSI_PF) { if (vsi->type == ICE_VSI_PF) {
if (ice_dcb_get_num_tc(dcbcfg) > vsi->alloc_txq)
dev_warn(ice_pf_to_dev(vsi->back), "More TCs defined than queues/rings allocated.\n");
tc_map = ice_dcb_get_ena_tc(dcbcfg); tc_map = ice_dcb_get_ena_tc(dcbcfg);
/* If DCBX request non-contiguous TC, then configure /* If DCBX request non-contiguous TC, then configure
@ -783,11 +770,11 @@ void ice_pf_dcb_recfg(struct ice_pf *pf)
tc_map = ICE_DFLT_TRAFFIC_CLASS; tc_map = ICE_DFLT_TRAFFIC_CLASS;
ice_dcb_noncontig_cfg(pf); ice_dcb_noncontig_cfg(pf);
} }
#if defined(HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO) && defined(HAVE_NETDEV_SB_DEV) #if defined(HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO) && defined(HAVE_NDO_DFWD_OPS)
} else if (vsi->type == ICE_VSI_CHNL || } else if (vsi->type == ICE_VSI_CHNL ||
vsi->type == ICE_VSI_OFFLOAD_MACVLAN) { vsi->type == ICE_VSI_OFFLOAD_MACVLAN) {
tc_map = BIT(ice_get_first_droptc(vsi)); tc_map = BIT(ice_get_first_droptc(vsi));
# endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO && HAVE_NETDEV_SB_DEV */ # endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO && HAVE_NDO_DFWD_OPS */
} else { } else {
tc_map = ICE_DFLT_TRAFFIC_CLASS; tc_map = ICE_DFLT_TRAFFIC_CLASS;
} }
@ -798,20 +785,32 @@ void ice_pf_dcb_recfg(struct ice_pf *pf)
vsi->idx); vsi->idx);
continue; continue;
} }
/* no need to proceed with remaining cfg if it is CHNL VSI */ /* no need to proceed with remaining cfg if it is CHNL
if (vsi->type == ICE_VSI_CHNL) * or switchdev VSI
*/
if (vsi->type == ICE_VSI_CHNL ||
vsi->type == ICE_VSI_SWITCHDEV_CTRL)
continue; continue;
ice_vsi_map_rings_to_vectors(vsi); ice_vsi_map_rings_to_vectors(vsi);
if (vsi->type == ICE_VSI_PF) if (vsi->type == ICE_VSI_PF)
ice_dcbnl_set_all(vsi); ice_dcbnl_set_all(vsi);
} }
/* If the RDMA peer is registered, update that peer's initial_qos_info struct. /* Notify the aux drivers that TC change is finished
* The peer is closed during this process, so when it is opened, it will access
* the initial_qos_info element to configure itself.
*/ */
if (pf->rdma_peer) cdev_info = ice_find_cdev_info_by_id(pf, IIDC_RDMA_ID);
ice_setup_dcb_qos_info(pf, &pf->rdma_peer->initial_qos_info); if (cdev_info) {
ice_setup_dcb_qos_info(pf, &cdev_info->qos_info);
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event)
return;
set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type);
event->info.port_qos = cdev_info->qos_info;
ice_send_event_to_auxs(pf, event);
kfree(event);
}
} }
/** /**
@ -824,16 +823,13 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
struct device *dev = ice_pf_to_dev(pf); struct device *dev = ice_pf_to_dev(pf);
struct ice_port_info *port_info; struct ice_port_info *port_info;
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
enum ice_status status;
int err; int err;
port_info = hw->port_info; port_info = hw->port_info;
status = ice_init_dcb(hw, false); err = ice_init_dcb(hw, false);
if (status && !port_info->qos_cfg.is_sw_lldp) { if (err && !port_info->qos_cfg.is_sw_lldp) {
dev_err(dev, "Error initializing DCB %s\n", dev_err(dev, "Error initializing DCB %d\n", err);
ice_stat_str(status));
err = ice_status_to_errno(status);
goto dcb_init_err; goto dcb_init_err;
} }
@ -848,12 +844,12 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
err = ice_aq_set_pfc_mode(&pf->hw, ICE_AQC_PFC_VLAN_BASED_PFC, err = ice_aq_set_pfc_mode(&pf->hw, ICE_AQC_PFC_VLAN_BASED_PFC,
NULL); NULL);
if (err) if (err)
dev_info(dev, "Fail to set VLAN PFC mode\n"); dev_info(dev, "Failed to set VLAN PFC mode\n");
err = ice_dcb_sw_dflt_cfg(pf, true, locked); err = ice_dcb_sw_dflt_cfg(pf, true, locked);
if (err) { if (err) {
dev_err(dev, "Failed to set local DCB config %d\n", ice_dev_err_errno(dev, err,
err); "Failed to set local DCB config");
err = -EIO; err = -EIO;
goto dcb_init_err; goto dcb_init_err;
} }
@ -883,7 +879,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
if (err) if (err)
goto dcb_init_err; goto dcb_init_err;
return err; return 0;
dcb_init_err: dcb_init_err:
dev_err(dev, "DCB init failed\n"); dev_err(dev, "DCB init failed\n");
@ -969,19 +965,28 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
* @pf: ptr to ice_pf * @pf: ptr to ice_pf
* @qos_info: QoS param instance * @qos_info: QoS param instance
*/ */
void ice_setup_dcb_qos_info(struct ice_pf *pf, struct ice_qos_params *qos_info) void ice_setup_dcb_qos_info(struct ice_pf *pf, struct iidc_qos_params *qos_info)
{ {
struct iidc_core_dev_info *cdev_info;
struct ice_dcbx_cfg *dcbx_cfg; struct ice_dcbx_cfg *dcbx_cfg;
unsigned int i; unsigned int i;
u32 up2tc; u32 up2tc;
if (!pf || !qos_info)
return;
cdev_info = ice_find_cdev_info_by_id(pf, IIDC_RDMA_ID);
if (!cdev_info)
return;
dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
up2tc = rd32(&pf->hw, PRTDCB_TUP2TC); up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
qos_info->num_apps = dcbx_cfg->numapps; qos_info->num_apps = dcbx_cfg->numapps;
qos_info->num_tc = ice_dcb_get_num_tc(dcbx_cfg); qos_info->num_tc = ice_dcb_get_num_tc(dcbx_cfg);
for (i = 0; i < ICE_IDC_MAX_USER_PRIORITY; i++) for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
qos_info->up2tc[i] = (up2tc >> (i * 3)) & 0x7; qos_info->up2tc[i] = (up2tc >> (i * 3)) & 0x7;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
@ -995,10 +1000,20 @@ void ice_setup_dcb_qos_info(struct ice_pf *pf, struct ice_qos_params *qos_info)
} }
qos_info->pfc_mode = dcbx_cfg->pfc_mode; qos_info->pfc_mode = dcbx_cfg->pfc_mode;
for (i = 0; i < ICE_IDC_DSCP_NUM_VAL; i++) for (i = 0; i < ICE_DSCP_NUM_VAL; i++)
qos_info->dscp_map[i] = dcbx_cfg->dscp_map[i]; qos_info->dscp_map[i] = dcbx_cfg->dscp_map[i];
} }
/**
* ice_dcb_is_mib_change_pending - Check if MIB change is pending
* @state: MIB change state
*/
static bool ice_dcb_is_mib_change_pending(u8 state)
{
return ICE_AQ_LLDP_MIB_CHANGE_PENDING ==
FIELD_GET(ICE_AQ_LLDP_MIB_CHANGE_STATE_M, state);
}
/** /**
* ice_dcb_process_lldp_set_mib_change - Process MIB change * ice_dcb_process_lldp_set_mib_change - Process MIB change
* @pf: ptr to ice_pf * @pf: ptr to ice_pf
@ -1010,11 +1025,14 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
{ {
struct ice_aqc_port_ets_elem buf = { 0 }; struct ice_aqc_port_ets_elem buf = { 0 };
struct device *dev = ice_pf_to_dev(pf); struct device *dev = ice_pf_to_dev(pf);
struct iidc_core_dev_info *cdev_info;
struct ice_aqc_lldp_get_mib *mib; struct ice_aqc_lldp_get_mib *mib;
struct ice_dcbx_cfg tmp_dcbx_cfg; struct ice_dcbx_cfg tmp_dcbx_cfg;
bool pending_handled = true;
bool need_reconfig = false; bool need_reconfig = false;
struct ice_port_info *pi; struct ice_port_info *pi;
u8 mib_type; u8 mib_type;
u32 numtc;
int ret; int ret;
/* Not DCB capable or capability disabled */ /* Not DCB capable or capability disabled */
@ -1028,41 +1046,58 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
pi = pf->hw.port_info; pi = pf->hw.port_info;
mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw; mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
/* Ignore if event is not for Nearest Bridge */ /* Ignore if event is not for Nearest Bridge */
mib_type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) & mib_type = FIELD_GET(ICE_AQ_LLDP_BRID_TYPE_M, mib->type);
ICE_AQ_LLDP_BRID_TYPE_M);
dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", mib_type); dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", mib_type);
if (mib_type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID) if (mib_type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID)
return; return;
/* A pending change event contains accurate config information, and
* the FW setting has not been updaed yet, so detect if change is
* pending to determine where to pull config information from
* (FW vs event)
*/
if (ice_dcb_is_mib_change_pending(mib->state))
pending_handled = false;
/* Check MIB Type and return if event for Remote MIB update */ /* Check MIB Type and return if event for Remote MIB update */
mib_type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M; mib_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type);
dev_dbg(dev, "LLDP event mib type %s\n", mib_type ? "remote" : "local"); dev_dbg(dev, "LLDP event mib type %s\n", mib_type ? "remote" : "local");
if (mib_type == ICE_AQ_LLDP_MIB_REMOTE) { if (mib_type == ICE_AQ_LLDP_MIB_REMOTE) {
/* Update the remote cached instance and return */ /* Update the remote cached instance and return */
ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE, if (!pending_handled) {
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, ice_get_dcb_cfg_from_mib_change(pi, event);
&pi->qos_cfg.remote_dcbx_cfg); } else {
if (ret) { ret =
dev_err(dev, "Failed to get remote DCB config\n"); ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
return; ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
&pi->qos_cfg.remote_dcbx_cfg);
if (ret)
dev_dbg(dev, "Failed to get remote DCB config\n");
} }
return;
} }
/* That a DCB change has happened is now determined */
mutex_lock(&pf->tc_mutex); mutex_lock(&pf->tc_mutex);
/* store the old configuration */ /* store the old configuration */
tmp_dcbx_cfg = pf->hw.port_info->qos_cfg.local_dcbx_cfg; tmp_dcbx_cfg = pi->qos_cfg.local_dcbx_cfg;
/* Reset the old DCBX configuration data */ /* Reset the old DCBX configuration data */
memset(&pi->qos_cfg.local_dcbx_cfg, 0, memset(&pi->qos_cfg.local_dcbx_cfg, 0,
sizeof(pi->qos_cfg.local_dcbx_cfg)); sizeof(pi->qos_cfg.local_dcbx_cfg));
/* Get updated DCBX data from firmware */ /* Get updated DCBX data from firmware */
ret = ice_get_dcb_cfg(pf->hw.port_info); if (!pending_handled) {
if (ret) { ice_get_dcb_cfg_from_mib_change(pi, event);
dev_err(dev, "Failed to get DCB config\n"); } else {
goto out; ret = ice_get_dcb_cfg(pi);
if (ret) {
dev_err(dev, "Failed to get DCB config\n");
goto out;
}
} }
/* No change detected in DCBX configs */ /* No change detected in DCBX configs */
@ -1081,7 +1116,8 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
goto out; goto out;
/* Enable DCB tagging only when more than one TC */ /* Enable DCB tagging only when more than one TC */
if (ice_dcb_get_num_tc(&pi->qos_cfg.local_dcbx_cfg) > 1) { numtc = ice_dcb_get_num_tc(&pi->qos_cfg.local_dcbx_cfg);
if (numtc > 1) {
dev_dbg(dev, "DCB tagging enabled (num TC > 1)\n"); dev_dbg(dev, "DCB tagging enabled (num TC > 1)\n");
set_bit(ICE_FLAG_DCB_ENA, pf->flags); set_bit(ICE_FLAG_DCB_ENA, pf->flags);
} else { } else {
@ -1089,11 +1125,36 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
clear_bit(ICE_FLAG_DCB_ENA, pf->flags); clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
} }
if (numtc > pf->hw.func_caps.common_cap.maxtc)
dev_warn(dev, "%d TCs more than supported max of %d\n", numtc,
pf->hw.func_caps.common_cap.maxtc);
cdev_info = ice_find_cdev_info_by_id(pf, IIDC_RDMA_ID);
if (cdev_info) {
struct iidc_event *ievent;
/* can't fail the LAN flow based on a failure to notify
* the RDMA driver
*/
ievent = kzalloc(sizeof(*ievent), GFP_KERNEL);
if (ievent) {
set_bit(IIDC_EVENT_BEFORE_TC_CHANGE, ievent->type);
ice_send_event_to_auxs(pf, ievent);
kfree(ievent);
}
}
/* Send Execute Pending MIB Change event if it is a Pending event */
if (!pending_handled) {
ice_lldp_execute_pending_mib(&pf->hw);
pending_handled = true;
}
rtnl_lock(); rtnl_lock();
/* disable VSIs affected by DCB changes */ /* disable VSIs affected by DCB changes */
ice_dcb_ena_dis_vsi(pf, false, true); ice_dcb_ena_dis_vsi(pf, false, true);
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); ret = ice_query_port_ets(pi, &buf, sizeof(buf), NULL);
if (ret) { if (ret) {
dev_err(dev, "Query Port ETS failed\n"); dev_err(dev, "Query Port ETS failed\n");
goto unlock_rtnl; goto unlock_rtnl;
@ -1108,4 +1169,8 @@ unlock_rtnl:
rtnl_unlock(); rtnl_unlock();
out: out:
mutex_unlock(&pf->tc_mutex); mutex_unlock(&pf->tc_mutex);
/* Send Execute Pending MIB Change event if it is a Pending event */
if (!pending_handled)
ice_lldp_execute_pending_mib(&pf->hw);
} }

View File

@ -31,7 +31,8 @@ void ice_update_dcb_stats(struct ice_pf *pf);
void void
ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring, ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
struct ice_tx_buf *first); struct ice_tx_buf *first);
void ice_setup_dcb_qos_info(struct ice_pf *pf, struct ice_qos_params *qos_info); void
ice_setup_dcb_qos_info(struct ice_pf *pf, struct iidc_qos_params *qos_info);
void void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event); struct ice_rq_event_info *event);
@ -124,16 +125,19 @@ ice_is_pfc_causing_hung_q(struct ice_pf __always_unused *pf,
static inline u8 ice_get_pfc_mode(struct ice_pf *pf) static inline u8 ice_get_pfc_mode(struct ice_pf *pf)
{ {
return -EOPNOTSUPP; return 0;
} }
static inline void ice_pf_dcb_recfg(struct ice_pf *pf) { } static inline void ice_pf_dcb_recfg(struct ice_pf *pf) { }
static inline void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) { } static inline void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) { }
static inline void ice_update_dcb_stats(struct ice_pf *pf) { } static inline void ice_update_dcb_stats(struct ice_pf *pf) { }
static inline void ice_setup_dcb_qos_info(struct ice_pf *pf, struct ice_qos_params *qos_info) { } static inline void
static inline ice_setup_dcb_qos_info(struct ice_pf *pf, struct iidc_qos_params *qos_info) { }
void ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event) { } static inline void
static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring) { } ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event) { }
static inline void
ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring) { }
#endif /* CONFIG_DCB */ #endif /* CONFIG_DCB */
#endif /* _ICE_DCB_LIB_H_ */ #endif /* _ICE_DCB_LIB_H_ */

View File

@ -66,6 +66,11 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
int bwcfg = 0, bwrec = 0; int bwcfg = 0, bwrec = 0;
int err, i; int err, i;
#ifdef HAVE_NETDEV_UPPER_INFO
if (pf->lag->bonded)
return -EINVAL;
#endif /* HAVE_NETDEV_UPPER_INFO */
if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL; return -EINVAL;
@ -104,6 +109,12 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
new_cfg->etscfg.maxtcs = pf->hw.func_caps.common_cap.maxtc; new_cfg->etscfg.maxtcs = pf->hw.func_caps.common_cap.maxtc;
/* Not all TCs can have a BW of zero, FW requires at least one TC
* with BW assigned, and sum of all has to be 100%. Set TC0 to 100%
*/
if (!bwcfg)
new_cfg->etscfg.tcbwtable[0] = 100;
if (!bwrec) if (!bwrec)
new_cfg->etsrec.tcbwtable[0] = 100; new_cfg->etsrec.tcbwtable[0] = 100;
@ -160,6 +171,11 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_qos_cfg *qos_cfg; struct ice_qos_cfg *qos_cfg;
#ifdef HAVE_NETDEV_UPPER_INFO
if (pf->lag->bonded)
return ICE_DCB_NO_HW_CHG;
#endif /* HAVE_NETDEV_UPPER_INFO */
/* if FW LLDP agent is running, DCBNL not allowed to change mode */ /* if FW LLDP agent is running, DCBNL not allowed to change mode */
if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
return ICE_DCB_NO_HW_CHG; return ICE_DCB_NO_HW_CHG;
@ -174,15 +190,17 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
if (mode == pf->dcbx_cap) if (mode == pf->dcbx_cap)
return ICE_DCB_NO_HW_CHG; return ICE_DCB_NO_HW_CHG;
pf->dcbx_cap = mode;
qos_cfg = &pf->hw.port_info->qos_cfg; qos_cfg = &pf->hw.port_info->qos_cfg;
if (mode & DCB_CAP_DCBX_VER_CEE) {
if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP) /* DSCP configuration is not DCBx negotiated */
return ICE_DCB_NO_HW_CHG; if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP)
return ICE_DCB_NO_HW_CHG;
pf->dcbx_cap = mode;
if (mode & DCB_CAP_DCBX_VER_CEE)
qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE; qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE;
} else { else
qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE; qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
}
dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode); dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode);
return ICE_DCB_HW_CHG_RST; return ICE_DCB_HW_CHG_RST;
@ -258,6 +276,12 @@ static int ice_dcbnl_setpfc(struct net_device *netdev, struct ieee_pfc *pfc)
struct ice_dcbx_cfg *new_cfg; struct ice_dcbx_cfg *new_cfg;
int err; int err;
#ifdef HAVE_NETDEV_UPPER_INFO
if (pf->lag->bonded)
return -EINVAL;
#endif /* HAVE_NETDEV_UPPER_INFO */
if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL; return -EINVAL;
@ -327,6 +351,12 @@ static void ice_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 set)
struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_dcbx_cfg *new_cfg; struct ice_dcbx_cfg *new_cfg;
#ifdef HAVE_NETDEV_UPPER_INFO
if (pf->lag->bonded)
return;
#endif /* HAVE_NETDEV_UPPER_INFO */
if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return; return;
@ -386,6 +416,12 @@ static u8 ice_dcbnl_setstate(struct net_device *netdev, u8 state)
{ {
struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_pf *pf = ice_netdev_to_pf(netdev);
#ifdef HAVE_NETDEV_UPPER_INFO
if (pf->lag->bonded)
return ICE_DCB_NO_HW_CHG;
#endif /* HAVE_NETDEV_UPPER_INFO */
if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return ICE_DCB_NO_HW_CHG; return ICE_DCB_NO_HW_CHG;
@ -455,6 +491,12 @@ ice_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
struct ice_dcbx_cfg *new_cfg; struct ice_dcbx_cfg *new_cfg;
int i; int i;
#ifdef HAVE_NETDEV_UPPER_INFO
if (pf->lag->bonded)
return;
#endif /* HAVE_NETDEV_UPPER_INFO */
if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return; return;
@ -509,6 +551,12 @@ ice_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 bw_pct)
struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_dcbx_cfg *new_cfg; struct ice_dcbx_cfg *new_cfg;
#ifdef HAVE_NETDEV_UPPER_INFO
if (pf->lag->bonded)
return;
#endif /* HAVE_NETDEV_UPPER_INFO */
if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return; return;
@ -718,6 +766,12 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app)
u8 max_tc; u8 max_tc;
int ret; int ret;
#ifdef HAVE_NETDEV_UPPER_INFO
if (pf->lag->bonded)
return -EINVAL;
#endif /* HAVE_NETDEV_UPPER_INFO */
/* ONLY DSCP APP TLVs have operational significance */ /* ONLY DSCP APP TLVs have operational significance */
if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP) if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
return -EINVAL; return -EINVAL;
@ -754,7 +808,6 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app)
return -EINVAL; return -EINVAL;
} }
/* grab TC mutex */ /* grab TC mutex */
mutex_lock(&pf->tc_mutex); mutex_lock(&pf->tc_mutex);
@ -854,6 +907,12 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
unsigned int i, j; unsigned int i, j;
int ret = 0; int ret = 0;
#ifdef HAVE_NETDEV_UPPER_INFO
if (pf->lag->bonded)
return -EINVAL;
#endif /* HAVE_NETDEV_UPPER_INFO */
if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) { if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) {
netdev_err(netdev, "can't delete DSCP netlink app when FW DCB agent is active\n"); netdev_err(netdev, "can't delete DSCP netlink app when FW DCB agent is active\n");
return -EINVAL; return -EINVAL;
@ -964,6 +1023,12 @@ static u8 ice_dcbnl_cee_set_all(struct net_device *netdev)
struct ice_dcbx_cfg *new_cfg; struct ice_dcbx_cfg *new_cfg;
int err; int err;
#ifdef HAVE_NETDEV_UPPER_INFO
if (pf->lag->bonded)
return ICE_DCB_NO_HW_CHG;
#endif /* HAVE_NETDEV_UPPER_INFO */
if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return ICE_DCB_NO_HW_CHG; return ICE_DCB_NO_HW_CHG;

View File

@ -13,8 +13,8 @@ ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
#else #else
static inline void ice_dcbnl_setup(struct ice_vsi *vsi) { } static inline void ice_dcbnl_setup(struct ice_vsi *vsi) { }
static inline void ice_dcbnl_set_all(struct ice_vsi *vsi) { } static inline void ice_dcbnl_set_all(struct ice_vsi *vsi) { }
static inline void ice_dcbnl_flush_apps(struct ice_pf *pf, static inline void
struct ice_dcbx_cfg *old_cfg, ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
struct ice_dcbx_cfg *new_cfg) { } struct ice_dcbx_cfg *new_cfg) { }
#endif /* CONFIG_DCB */ #endif /* CONFIG_DCB */
#endif /* _ICE_DCB_NL_H_ */ #endif /* _ICE_DCB_NL_H_ */

View File

@ -54,6 +54,9 @@ static const enum ice_adminq_opc aqc_permitted_tbl[] = {
ice_aqc_opc_query_acl_entry, ice_aqc_opc_query_acl_entry,
ice_aqc_opc_query_acl_actpair, ice_aqc_opc_query_acl_actpair,
ice_aqc_opc_query_acl_counter, ice_aqc_opc_query_acl_counter,
/* QoS */
ice_aqc_opc_query_port_ets,
}; };
/** /**
@ -117,6 +120,58 @@ bool ice_dcf_is_udp_tunnel_aq_cmd(struct ice_aq_desc *desc, u8 *aq_buf)
return false; return false;
} }
/**
* ice_is_vf_adq_enabled - Check if any VF has ADQ enabled
* @pf: pointer to the PF structure
* @vf_id: on true return, the first VF ID that we found had ADQ enabled
*
* Return true if any VF has ADQ enabled. Return false otherwise.
*/
static bool ice_is_vf_adq_enabled(struct ice_pf *pf, u16 *vf_id)
{
bool adq_enabled = false;
struct ice_vf *vf;
unsigned int bkt;
rcu_read_lock();
ice_for_each_vf_rcu(pf, bkt, vf) {
if (vf->adq_enabled) {
*vf_id = vf->vf_id;
adq_enabled = true;
break;
}
}
rcu_read_unlock();
return adq_enabled;
}
/**
* ice_vf_chnl_fltrs_enabled - Check if a VF has TC filters enabled
* @pf: pointer to the PF structure
* @vf_id: on true return, the first VF ID that we found had TC filters
*
* Return true if any VF has TC filters. Return false otherwise.
*/
static bool ice_vf_chnl_fltrs_enabled(struct ice_pf *pf, u16 *vf_id)
{
bool chnl_fltrs_enabled = false;
struct ice_vf *vf;
unsigned int bkt;
rcu_read_lock();
ice_for_each_vf_rcu(pf, bkt, vf) {
if (vf->num_dmac_chnl_fltrs) {
*vf_id = vf->vf_id;
chnl_fltrs_enabled = true;
break;
}
}
rcu_read_unlock();
return chnl_fltrs_enabled;
}
/** /**
* ice_check_dcf_allowed - check if DCF is allowed based on various checks * ice_check_dcf_allowed - check if DCF is allowed based on various checks
* @vf: pointer to the VF to check * @vf: pointer to the VF to check
@ -130,25 +185,15 @@ bool ice_check_dcf_allowed(struct ice_vf *vf)
dev = ice_pf_to_dev(pf); dev = ice_pf_to_dev(pf);
if (vf->vf_id != ICE_DCF_VFID0 && vf->vf_id != ICE_DCF_VFID1) { if (vf->vf_id != ICE_DCF_VFID) {
dev_err(dev, "VF %d requested DCF capability, but only VF %d and %d are allowed to request DCF capability\n", dev_err(dev, "VF %d requested DCF capability, but only VF %d is allowed to request DCF capability\n",
vf->vf_id, ICE_DCF_VFID0, ICE_DCF_VFID1); vf->vf_id, ICE_DCF_VFID);
return false; return false;
} }
if (!vf->trusted) { if (!vf->trusted) {
#ifdef HAVE_NDO_SET_VF_TRUST
dev_err(dev, "VF needs to be trusted to configure DCF capability\n"); dev_err(dev, "VF needs to be trusted to configure DCF capability\n");
return false; return false;
#else
int ret;
ret = ice_set_vf_trust(ice_get_main_vsi(pf)->netdev, vf->vf_id, true);
if (ret) {
dev_err(dev, "Failed to set trusted VF to configure DCF capability.\n");
return false;
}
#endif /* HAVE_NDO_SET_VF_TRUST */
} }
/* DCF and ADQ are mutually exclusive. */ /* DCF and ADQ are mutually exclusive. */
@ -158,12 +203,11 @@ bool ice_check_dcf_allowed(struct ice_vf *vf)
return false; return false;
} }
#endif /* NETIF_F_HW_TC */ #endif /* NETIF_F_HW_TC */
ice_for_each_vf(pf, i) {
if (pf->vf[i].adq_enabled) { if (ice_is_vf_adq_enabled(pf, &i)) {
dev_err(dev, "ADQ on VF %d is currently enabled. Device Control Functionality cannot be enabled.\n", dev_err(dev, "ADQ on VF %d is currently enabled. Device Control Functionality cannot be enabled.\n",
pf->vf[i].vf_id); i);
return false; return false;
}
} }
#ifdef HAVE_TC_SETUP_CLSFLOWER #ifdef HAVE_TC_SETUP_CLSFLOWER
@ -172,20 +216,19 @@ bool ice_check_dcf_allowed(struct ice_vf *vf)
return false; return false;
} }
#endif /* HAVE_TC_SETUP_CLSFLOWER */ #endif /* HAVE_TC_SETUP_CLSFLOWER */
ice_for_each_vf(pf, i) {
if (pf->vf[i].num_dmac_chnl_fltrs) { if (ice_vf_chnl_fltrs_enabled(pf, &i)) {
dev_err(dev, "TC filters on VF %d are currently in use. Device Control Functionality cannot be enabled.\n", dev_err(dev, "TC filters on VF %d are currently in use. Device Control Functionality cannot be enabled.\n",
pf->vf[i].vf_id); i);
return false; return false;
}
} }
#ifdef HAVE_NETDEV_SB_DEV #ifdef HAVE_NDO_DFWD_OPS
if (ice_is_offloaded_macvlan_ena(pf)) { if (ice_is_offloaded_macvlan_ena(pf)) {
dev_err(dev, "L2 Forwarding Offload is currently enabled. Device Control Functionality cannot be enabled.\n"); dev_err(dev, "L2 Forwarding Offload is currently enabled. Device Control Functionality cannot be enabled.\n");
return false; return false;
} }
#endif /* HAVE_NETDEV_SB_DEV */ #endif /* HAVE_NDO_DFWD_OPS */
sw = pf->hw.switch_info; sw = pf->hw.switch_info;
for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
@ -268,7 +311,7 @@ ice_dcf_rm_sw_rule_to_vsi(struct ice_pf *pf,
struct ice_dcf_sw_rule_entry *s_entry) struct ice_dcf_sw_rule_entry *s_entry)
{ {
struct ice_aqc_sw_rules_elem *s_rule; struct ice_aqc_sw_rules_elem *s_rule;
enum ice_status status; int status;
s_rule = kzalloc(ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL); s_rule = kzalloc(ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL);
if (!s_rule) if (!s_rule)
@ -301,9 +344,9 @@ ice_dcf_rm_sw_rule_to_vsi_list(struct ice_pf *pf,
struct ice_dcf_vsi_list_info *vsi_list_info = s_entry->vsi_list_info; struct ice_dcf_vsi_list_info *vsi_list_info = s_entry->vsi_list_info;
struct ice_aqc_alloc_free_res_elem *res_buf; struct ice_aqc_alloc_free_res_elem *res_buf;
struct ice_aqc_sw_rules_elem *s_rule; struct ice_aqc_sw_rules_elem *s_rule;
enum ice_status status;
u16 rule_sz; u16 rule_sz;
u16 vsi_id; u16 vsi_id;
int status;
int i = 0; int i = 0;
if (!vsi_list_info) if (!vsi_list_info)
@ -367,7 +410,7 @@ ice_dcf_rm_vsi_from_list(struct ice_pf *pf,
u16 hw_vsi_id) u16 hw_vsi_id)
{ {
struct ice_aqc_sw_rules_elem *s_rule; struct ice_aqc_sw_rules_elem *s_rule;
enum ice_status status; int status;
if (!vsi_list_info || !vsi_list_info->vsi_count || if (!vsi_list_info || !vsi_list_info->vsi_count ||
!test_bit(hw_vsi_id, vsi_list_info->hw_vsi_map)) !test_bit(hw_vsi_id, vsi_list_info->hw_vsi_map))
@ -415,15 +458,15 @@ void ice_rm_dcf_sw_vsi_rule(struct ice_pf *pf, u16 hw_vsi_id)
s_entry->vsi_list_info, s_entry->vsi_list_info,
hw_vsi_id); hw_vsi_id);
if (ret && ret != -ENOENT) if (ret && ret != -ENOENT)
dev_err(ice_pf_to_dev(pf), ice_dev_err_errno(ice_pf_to_dev(pf), ret,
"Failed to remove VSI %u from VSI list : %d\n", "Failed to remove VSI %u from VSI list",
hw_vsi_id, ret); hw_vsi_id);
} else if (s_entry->fwd_id.hw_vsi_id == hw_vsi_id) { } else if (s_entry->fwd_id.hw_vsi_id == hw_vsi_id) {
ret = ice_dcf_rm_sw_rule_to_vsi(pf, s_entry); ret = ice_dcf_rm_sw_rule_to_vsi(pf, s_entry);
if (ret) if (ret)
dev_err(ice_pf_to_dev(pf), ice_dev_err_errno(ice_pf_to_dev(pf), ret,
"Failed to remove VSI %u switch rule : %d\n", "Failed to remove VSI %u switch rule",
hw_vsi_id, ret); hw_vsi_id);
} }
} }
@ -455,16 +498,16 @@ void ice_rm_all_dcf_sw_rules(struct ice_pf *pf)
rule_id = sw_rule->rule_id; rule_id = sw_rule->rule_id;
ret = ice_dcf_rm_sw_rule_to_vsi_list(pf, sw_rule); ret = ice_dcf_rm_sw_rule_to_vsi_list(pf, sw_rule);
if (ret) if (ret)
dev_err(ice_pf_to_dev(pf), ice_dev_err_errno(ice_pf_to_dev(pf), ret,
"Failed to remove switch rule 0x%04x with list id %u : %d\n", "Failed to remove switch rule 0x%04x with list id %u",
rule_id, list_id, ret); rule_id, list_id);
} else { } else {
rule_id = sw_rule->rule_id; rule_id = sw_rule->rule_id;
ret = ice_dcf_rm_sw_rule_to_vsi(pf, sw_rule); ret = ice_dcf_rm_sw_rule_to_vsi(pf, sw_rule);
if (ret) if (ret)
dev_err(ice_pf_to_dev(pf), ice_dev_err_errno(ice_pf_to_dev(pf), ret,
"Failed to remove switch rule 0x%04x : %d\n", "Failed to remove switch rule 0x%04x",
rule_id, ret); rule_id);
} }
/* clears rule filter management data if AdminQ command has error */ /* clears rule filter management data if AdminQ command has error */

View File

@ -6,9 +6,9 @@
struct ice_vf; struct ice_vf;
struct ice_pf; struct ice_pf;
struct ice_hw;
#define ICE_DCF_VFID0 0 #define ICE_DCF_VFID 0
#define ICE_DCF_VFID1 1
/* DCF mode states */ /* DCF mode states */
enum ice_dcf_state { enum ice_dcf_state {

2586
drivers/thirdparty/ice/ice_ddp.c vendored Normal file

File diff suppressed because it is too large Load Diff

466
drivers/thirdparty/ice/ice_ddp.h vendored Normal file
View File

@ -0,0 +1,466 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018-2021, Intel Corporation. */
#ifndef _ICE_DDP_H_
#define _ICE_DDP_H_
#include "ice_osdep.h"
#include "ice_adminq_cmd.h"
#include "ice_controlq.h"
#include "ice_flex_type.h"
#include "ice_protocol_type.h"
/* Package minimal version supported */
#define ICE_PKG_SUPP_VER_MAJ 1
#define ICE_PKG_SUPP_VER_MNR 3
/* Package format version */
#define ICE_PKG_FMT_VER_MAJ 1
#define ICE_PKG_FMT_VER_MNR 0
#define ICE_PKG_FMT_VER_UPD 0
#define ICE_PKG_FMT_VER_DFT 0
#define ICE_PKG_CNT 4
enum ice_ddp_state {
/* Indicates that this call to ice_init_pkg
* successfully loaded the requested DDP package
*/
ICE_DDP_PKG_SUCCESS = 0,
/* Generic error for already loaded errors, it is mapped later to
* the more specific one (one of the next 3)
*/
ICE_DDP_PKG_ALREADY_LOADED = -1,
/* Indicates that a DDP package of the same version has already been
* loaded onto the device by a previous call or by another PF
*/
ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2,
/* The device has a DDP package that is not supported by the driver */
ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3,
/* The device has a compatible package
* (but different from the request) already loaded
*/
ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4,
/* The firmware loaded on the device is not compatible with
* the DDP package loaded
*/
ICE_DDP_PKG_FW_MISMATCH = -5,
/* The DDP package file is invalid */
ICE_DDP_PKG_INVALID_FILE = -6,
/* The version of the DDP package provided is higher than
* the driver supports
*/
ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7,
/* The version of the DDP package provided is lower than the
* driver supports
*/
ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8,
/* Missing security manifest in DDP pkg */
ICE_DDP_PKG_NO_SEC_MANIFEST = -9,
/* The RSA signature of the DDP package file provided is invalid */
ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -10,
/* The DDP package file security revision is too low and not
* supported by firmware
*/
ICE_DDP_PKG_SECURE_VERSION_NBR_TOO_LOW = -11,
/* Manifest hash mismatch */
ICE_DDP_PKG_MANIFEST_INVALID = -12,
/* Buffer hash mismatches manifest */
ICE_DDP_PKG_BUFFER_INVALID = -13,
/* Other errors */
ICE_DDP_PKG_ERR = -14,
};
/* Package and segment headers and tables */
struct ice_pkg_hdr {
struct ice_pkg_ver pkg_format_ver;
__le32 seg_count;
__le32 seg_offset[];
};
/* Package signing algorithm types */
#define SEGMENT_SIGN_TYPE_INVALID 0x00000000
#define SEGMENT_SIGN_TYPE_RSA2K 0x00000001
#define SEGMENT_SIGN_TYPE_RSA3K 0x00000002
#define SEGMENT_SIGN_TYPE_RSA3K_SBB 0x00000003 /* Secure Boot Block */
/* generic segment */
struct ice_generic_seg_hdr {
#define SEGMENT_TYPE_INVALID 0x00000000
#define SEGMENT_TYPE_METADATA 0x00000001
#define SEGMENT_TYPE_ICE_E810 0x00000010
#define SEGMENT_TYPE_SIGNING 0x00001001
#define SEGMENT_TYPE_ICE_RUN_TIME_CFG 0x00000020
__le32 seg_type;
struct ice_pkg_ver seg_format_ver;
__le32 seg_size;
char seg_id[ICE_PKG_NAME_SIZE];
};
/* ice specific segment */
union ice_device_id {
struct {
__le16 device_id;
__le16 vendor_id;
} dev_vend_id;
__le32 id;
};
struct ice_device_id_entry {
union ice_device_id device;
union ice_device_id sub_device;
};
struct ice_seg {
struct ice_generic_seg_hdr hdr;
__le32 device_table_count;
struct ice_device_id_entry device_table[];
};
struct ice_nvm_table {
__le32 table_count;
__le32 vers[];
};
struct ice_buf {
#define ICE_PKG_BUF_SIZE 4096
u8 buf[ICE_PKG_BUF_SIZE];
};
struct ice_buf_table {
__le32 buf_count;
struct ice_buf buf_array[];
};
struct ice_run_time_cfg_seg {
struct ice_generic_seg_hdr hdr;
u8 rsvd[8];
struct ice_buf_table buf_table;
};
/* global metadata specific segment */
struct ice_global_metadata_seg {
struct ice_generic_seg_hdr hdr;
struct ice_pkg_ver pkg_ver;
__le32 rsvd;
char pkg_name[ICE_PKG_NAME_SIZE];
};
#define ICE_MIN_S_OFF 12
#define ICE_MAX_S_OFF 4095
#define ICE_MIN_S_SZ 1
#define ICE_MAX_S_SZ 4084
struct ice_sign_seg {
struct ice_generic_seg_hdr hdr;
__le32 seg_id;
__le32 sign_type;
__le32 signed_seg_idx;
__le32 signed_buf_start;
__le32 signed_buf_count;
#define ICE_SIGN_SEG_RESERVED_COUNT 44
u8 reserved[ICE_SIGN_SEG_RESERVED_COUNT];
struct ice_buf_table buf_tbl;
};
/* section information */
struct ice_section_entry {
__le32 type;
__le16 offset;
__le16 size;
};
#define ICE_MIN_S_COUNT 1
#define ICE_MAX_S_COUNT 511
#define ICE_MIN_S_DATA_END 12
#define ICE_MAX_S_DATA_END 4096
#define ICE_METADATA_BUF 0x80000000
struct ice_buf_hdr {
__le16 section_count;
__le16 data_end;
struct ice_section_entry section_entry[];
};
#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\
(ent_sz))
/* ice package section IDs */
#define ICE_SID_METADATA 1
#define ICE_SID_XLT0_SW 10
#define ICE_SID_XLT_KEY_BUILDER_SW 11
#define ICE_SID_XLT1_SW 12
#define ICE_SID_XLT2_SW 13
#define ICE_SID_PROFID_TCAM_SW 14
#define ICE_SID_PROFID_REDIR_SW 15
#define ICE_SID_FLD_VEC_SW 16
#define ICE_SID_CDID_KEY_BUILDER_SW 17
#define ICE_SID_CDID_REDIR_SW 18
#define ICE_SID_XLT0_ACL 20
#define ICE_SID_XLT_KEY_BUILDER_ACL 21
#define ICE_SID_XLT1_ACL 22
#define ICE_SID_XLT2_ACL 23
#define ICE_SID_PROFID_TCAM_ACL 24
#define ICE_SID_PROFID_REDIR_ACL 25
#define ICE_SID_FLD_VEC_ACL 26
#define ICE_SID_CDID_KEY_BUILDER_ACL 27
#define ICE_SID_CDID_REDIR_ACL 28
#define ICE_SID_XLT0_FD 30
#define ICE_SID_XLT_KEY_BUILDER_FD 31
#define ICE_SID_XLT1_FD 32
#define ICE_SID_XLT2_FD 33
#define ICE_SID_PROFID_TCAM_FD 34
#define ICE_SID_PROFID_REDIR_FD 35
#define ICE_SID_FLD_VEC_FD 36
#define ICE_SID_CDID_KEY_BUILDER_FD 37
#define ICE_SID_CDID_REDIR_FD 38
#define ICE_SID_XLT0_RSS 40
#define ICE_SID_XLT_KEY_BUILDER_RSS 41
#define ICE_SID_XLT1_RSS 42
#define ICE_SID_XLT2_RSS 43
#define ICE_SID_PROFID_TCAM_RSS 44
#define ICE_SID_PROFID_REDIR_RSS 45
#define ICE_SID_FLD_VEC_RSS 46
#define ICE_SID_CDID_KEY_BUILDER_RSS 47
#define ICE_SID_CDID_REDIR_RSS 48
#define ICE_SID_RXPARSER_CAM 50
#define ICE_SID_RXPARSER_NOMATCH_CAM 51
#define ICE_SID_RXPARSER_IMEM 52
#define ICE_SID_RXPARSER_XLT0_BUILDER 53
#define ICE_SID_RXPARSER_NODE_PTYPE 54
#define ICE_SID_RXPARSER_MARKER_PTYPE 55
#define ICE_SID_RXPARSER_BOOST_TCAM 56
#define ICE_SID_RXPARSER_PROTO_GRP 57
#define ICE_SID_RXPARSER_METADATA_INIT 58
#define ICE_SID_RXPARSER_XLT0 59
#define ICE_SID_TXPARSER_CAM 60
#define ICE_SID_TXPARSER_NOMATCH_CAM 61
#define ICE_SID_TXPARSER_IMEM 62
#define ICE_SID_TXPARSER_XLT0_BUILDER 63
#define ICE_SID_TXPARSER_NODE_PTYPE 64
#define ICE_SID_TXPARSER_MARKER_PTYPE 65
#define ICE_SID_TXPARSER_BOOST_TCAM 66
#define ICE_SID_TXPARSER_PROTO_GRP 67
#define ICE_SID_TXPARSER_METADATA_INIT 68
#define ICE_SID_TXPARSER_XLT0 69
#define ICE_SID_RXPARSER_INIT_REDIR 70
#define ICE_SID_TXPARSER_INIT_REDIR 71
#define ICE_SID_RXPARSER_MARKER_GRP 72
#define ICE_SID_TXPARSER_MARKER_GRP 73
#define ICE_SID_RXPARSER_LAST_PROTO 74
#define ICE_SID_TXPARSER_LAST_PROTO 75
#define ICE_SID_RXPARSER_PG_SPILL 76
#define ICE_SID_TXPARSER_PG_SPILL 77
#define ICE_SID_RXPARSER_NOMATCH_SPILL 78
#define ICE_SID_TXPARSER_NOMATCH_SPILL 79
#define ICE_SID_XLT0_PE 80
#define ICE_SID_XLT_KEY_BUILDER_PE 81
#define ICE_SID_XLT1_PE 82
#define ICE_SID_XLT2_PE 83
#define ICE_SID_PROFID_TCAM_PE 84
#define ICE_SID_PROFID_REDIR_PE 85
#define ICE_SID_FLD_VEC_PE 86
#define ICE_SID_CDID_KEY_BUILDER_PE 87
#define ICE_SID_CDID_REDIR_PE 88
#define ICE_SID_RXPARSER_FLAG_REDIR 97
/* Label Metadata section IDs */
#define ICE_SID_LBL_FIRST 0x80000010
#define ICE_SID_LBL_RXPARSER_IMEM 0x80000010
#define ICE_SID_LBL_TXPARSER_IMEM 0x80000011
#define ICE_SID_LBL_RESERVED_12 0x80000012
#define ICE_SID_LBL_RESERVED_13 0x80000013
#define ICE_SID_LBL_RXPARSER_MARKER 0x80000014
#define ICE_SID_LBL_TXPARSER_MARKER 0x80000015
#define ICE_SID_LBL_PTYPE 0x80000016
#define ICE_SID_LBL_PROTOCOL_ID 0x80000017
#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
#define ICE_SID_LBL_TXPARSER_TMEM 0x80000019
#define ICE_SID_LBL_RXPARSER_PG 0x8000001A
#define ICE_SID_LBL_TXPARSER_PG 0x8000001B
#define ICE_SID_LBL_RXPARSER_M_TCAM 0x8000001C
#define ICE_SID_LBL_TXPARSER_M_TCAM 0x8000001D
#define ICE_SID_LBL_SW_PROFID_TCAM 0x8000001E
#define ICE_SID_LBL_ACL_PROFID_TCAM 0x8000001F
#define ICE_SID_LBL_PE_PROFID_TCAM 0x80000020
#define ICE_SID_LBL_RSS_PROFID_TCAM 0x80000021
#define ICE_SID_LBL_FD_PROFID_TCAM 0x80000022
#define ICE_SID_LBL_FLAG 0x80000023
#define ICE_SID_LBL_REG 0x80000024
#define ICE_SID_LBL_SW_PTG 0x80000025
#define ICE_SID_LBL_ACL_PTG 0x80000026
#define ICE_SID_LBL_PE_PTG 0x80000027
#define ICE_SID_LBL_RSS_PTG 0x80000028
#define ICE_SID_LBL_FD_PTG 0x80000029
#define ICE_SID_LBL_SW_VSIG 0x8000002A
#define ICE_SID_LBL_ACL_VSIG 0x8000002B
#define ICE_SID_LBL_PE_VSIG 0x8000002C
#define ICE_SID_LBL_RSS_VSIG 0x8000002D
#define ICE_SID_LBL_FD_VSIG 0x8000002E
#define ICE_SID_LBL_PTYPE_META 0x8000002F
#define ICE_SID_LBL_SW_PROFID 0x80000030
#define ICE_SID_LBL_ACL_PROFID 0x80000031
#define ICE_SID_LBL_PE_PROFID 0x80000032
#define ICE_SID_LBL_RSS_PROFID 0x80000033
#define ICE_SID_LBL_FD_PROFID 0x80000034
#define ICE_SID_LBL_RXPARSER_MARKER_GRP 0x80000035
#define ICE_SID_LBL_TXPARSER_MARKER_GRP 0x80000036
#define ICE_SID_LBL_RXPARSER_PROTO 0x80000037
#define ICE_SID_LBL_TXPARSER_PROTO 0x80000038
/* The following define MUST be updated to reflect the last label section ID */
#define ICE_SID_LBL_LAST 0x80000038
/* Label ICE runtime configuration section IDs */
#define ICE_SID_TX_5_LAYER_TOPO 0x10
enum ice_block {
ICE_BLK_SW = 0,
ICE_BLK_ACL,
ICE_BLK_FD,
ICE_BLK_RSS,
ICE_BLK_PE,
ICE_BLK_COUNT
};
enum ice_sect {
ICE_XLT0 = 0,
ICE_XLT_KB,
ICE_XLT1,
ICE_XLT2,
ICE_PROF_TCAM,
ICE_PROF_REDIR,
ICE_VEC_TBL,
ICE_CDID_KB,
ICE_CDID_REDIR,
ICE_SECT_COUNT
};
/* package buffer building */
struct ice_buf_build {
struct ice_buf buf;
u16 reserved_section_table_entries;
};
struct ice_pkg_enum {
struct ice_buf_table *buf_table;
u32 buf_idx;
u32 type;
struct ice_buf_hdr *buf;
u32 sect_idx;
void *sect;
u32 sect_type;
u32 entry_idx;
void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
};
/* package Marker PType TCAM entry */
struct ice_marker_ptype_tcam_entry {
#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX 1024
__le16 addr;
__le16 ptype;
u8 keys[20];
};
struct ice_marker_ptype_tcam_section {
__le16 count;
__le16 reserved;
struct ice_marker_ptype_tcam_entry tcam[];
};
#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, 1) - \
sizeof(struct ice_marker_ptype_tcam_entry), \
sizeof(struct ice_marker_ptype_tcam_entry))
struct ice_hw;
int
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_change_lock(struct ice_hw *hw);
struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw);
void *
ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size);
int
ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count);
int
ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
unsigned long *bm, struct list_head *fv_list);
int
ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld);
u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld);
int
ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
int
ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
void ice_release_global_cfg_lock(struct ice_hw *hw);
struct ice_generic_seg_hdr *
ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
struct ice_pkg_hdr *pkg_hdr);
enum ice_ddp_state
ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len);
enum ice_ddp_state
ice_get_pkg_info(struct ice_hw *hw);
void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg);
struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg);
int
ice_acquire_global_cfg_lock(struct ice_hw *hw,
enum ice_aq_res_access_type access);
struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg);
struct ice_buf_hdr *
ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state);
bool
ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state);
void *
ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
u32 sect_type, u32 *offset,
void *(*handler)(u32 sect_type, void *section,
u32 index, u32 *offset));
void *
ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
u32 sect_type);
enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
enum ice_ddp_state
ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
bool ice_is_init_pkg_successful(enum ice_ddp_state state);
void ice_free_seg(struct ice_hw *hw);
struct ice_buf_build *
ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
void **section);
struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld);
void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld);
int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len);
#endif /* _ICE_DDP_H_ */

View File

@ -8,10 +8,8 @@
#include "ice_lib.h" #include "ice_lib.h"
#include "ice_fltr.h" #include "ice_fltr.h"
static struct dentry *ice_debugfs_root; static struct dentry *ice_debugfs_root;
static void ice_dump_pf(struct ice_pf *pf) static void ice_dump_pf(struct ice_pf *pf)
{ {
struct device *dev = ice_pf_to_dev(pf); struct device *dev = ice_pf_to_dev(pf);
@ -27,10 +25,10 @@ static void ice_dump_pf(struct ice_pf *pf)
dev_info(dev, "\tnum_lan_msix = %d\n", pf->num_lan_msix); dev_info(dev, "\tnum_lan_msix = %d\n", pf->num_lan_msix);
dev_info(dev, "\tnum_rdma_msix = %d\n", pf->num_rdma_msix); dev_info(dev, "\tnum_rdma_msix = %d\n", pf->num_rdma_msix);
dev_info(dev, "\trdma_base_vector = %d\n", pf->rdma_base_vector); dev_info(dev, "\trdma_base_vector = %d\n", pf->rdma_base_vector);
#ifdef HAVE_NETDEV_SB_DEV #ifdef HAVE_NDO_DFWD_OPS
dev_info(dev, "\tnum_macvlan = %d\n", pf->num_macvlan); dev_info(dev, "\tnum_macvlan = %d\n", pf->num_macvlan);
dev_info(dev, "\tmax_num_macvlan = %d\n", pf->max_num_macvlan); dev_info(dev, "\tmax_num_macvlan = %d\n", pf->max_num_macvlan);
#endif /* HAVE_NETDEV_SB_DEV */ #endif /* HAVE_NDO_DFWD_OPS */
dev_info(dev, "\tirq_tracker->num_entries = %d\n", dev_info(dev, "\tirq_tracker->num_entries = %d\n",
pf->irq_tracker->num_entries); pf->irq_tracker->num_entries);
dev_info(dev, "\tirq_tracker->end = %d\n", pf->irq_tracker->end); dev_info(dev, "\tirq_tracker->end = %d\n", pf->irq_tracker->end);
@ -38,9 +36,9 @@ static void ice_dump_pf(struct ice_pf *pf)
ice_get_valid_res_count(pf->irq_tracker)); ice_get_valid_res_count(pf->irq_tracker));
dev_info(dev, "\tnum_avail_sw_msix = %d\n", pf->num_avail_sw_msix); dev_info(dev, "\tnum_avail_sw_msix = %d\n", pf->num_avail_sw_msix);
dev_info(dev, "\tsriov_base_vector = %d\n", pf->sriov_base_vector); dev_info(dev, "\tsriov_base_vector = %d\n", pf->sriov_base_vector);
dev_info(dev, "\tnum_alloc_vfs = %d\n", pf->num_alloc_vfs); dev_info(dev, "\tnum_alloc_vfs = %d\n", ice_get_num_vfs(pf));
dev_info(dev, "\tnum_qps_per_vf = %d\n", pf->num_qps_per_vf); dev_info(dev, "\tnum_qps_per_vf = %d\n", pf->vfs.num_qps_per);
dev_info(dev, "\tnum_msix_per_vf = %d\n", pf->num_msix_per_vf); dev_info(dev, "\tnum_msix_per_vf = %d\n", pf->vfs.num_msix_per);
} }
static void ice_dump_pf_vsi_list(struct ice_pf *pf) static void ice_dump_pf_vsi_list(struct ice_pf *pf)
@ -55,13 +53,13 @@ static void ice_dump_pf_vsi_list(struct ice_pf *pf)
continue; continue;
dev_info(dev, "vsi[%d]:\n", i); dev_info(dev, "vsi[%d]:\n", i);
dev_info(dev, "\tvsi = %pK\n", vsi); dev_info(dev, "\tvsi = %p\n", vsi);
dev_info(dev, "\tvsi_num = %d\n", vsi->vsi_num); dev_info(dev, "\tvsi_num = %d\n", vsi->vsi_num);
dev_info(dev, "\ttype = %s\n", ice_vsi_type_str(vsi->type)); dev_info(dev, "\ttype = %s\n", ice_vsi_type_str(vsi->type));
if (vsi->type == ICE_VSI_VF) if (vsi->type == ICE_VSI_VF)
dev_info(dev, "\tvf_id = %d\n", vsi->vf_id); dev_info(dev, "\tvf_id = %d\n", vsi->vf->vf_id);
dev_info(dev, "\tback = %pK\n", vsi->back); dev_info(dev, "\tback = %p\n", vsi->back);
dev_info(dev, "\tnetdev = %pK\n", vsi->netdev); dev_info(dev, "\tnetdev = %p\n", vsi->netdev);
dev_info(dev, "\tmax_frame = %d\n", vsi->max_frame); dev_info(dev, "\tmax_frame = %d\n", vsi->max_frame);
dev_info(dev, "\trx_buf_len = %d\n", vsi->rx_buf_len); dev_info(dev, "\trx_buf_len = %d\n", vsi->rx_buf_len);
dev_info(dev, "\tnum_txq = %d\n", vsi->num_txq); dev_info(dev, "\tnum_txq = %d\n", vsi->num_txq);
@ -124,6 +122,61 @@ static void ice_dump_pf_fdir(struct ice_pf *pf)
hw->func_caps.fd_fltr_best_effort); hw->func_caps.fd_fltr_best_effort);
} }
/**
* ice_dump_rclk_status - print the PHY recovered clock status
* @pf: pointer to PF
*
* Print the PHY's recovered clock pin status.
*/
static void ice_dump_rclk_status(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
u8 phy, phy_pin, pin;
int phy_pins;
if (ice_is_e810(&pf->hw))
phy_pins = ICE_C827_RCLK_PINS_NUM;
else
/* E822-based devices have only one RCLK pin */
phy_pins = E822_CGU_RCLK_PHY_PINS_NUM;
for (phy_pin = 0; phy_pin < phy_pins; phy_pin++) {
const char *pin_name, *pin_state;
u8 port_num, flags;
u32 freq;
port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT;
if (ice_aq_get_phy_rec_clk_out(&pf->hw, phy_pin, &port_num,
&flags, &freq))
return;
if (ice_is_e810(&pf->hw)) {
int status = ice_get_pf_c827_idx(&pf->hw, &phy);
if (status) {
dev_err(dev,
"Could not find PF C827 PHY, status=%d\n",
status);
return;
}
pin = E810T_CGU_INPUT_C827(phy, phy_pin);
pin_name = ice_zl_pin_idx_to_name_e810t(pin);
} else {
/* e822-based devices for now have only one phy
* available (from Rimmon) and only one DPLL RCLK input
* pin
*/
pin_name = E822_CGU_RCLK_PIN_NAME;
}
pin_state =
flags & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN ?
"Enabled" : "Disabled";
dev_info(dev, "State for pin %s: %s\n", pin_name, pin_state);
}
}
/** /**
* ice_vsi_dump_ctxt - print the passed in VSI context structure * ice_vsi_dump_ctxt - print the passed in VSI context structure
* @dev: Device used for dev_info prints * @dev: Device used for dev_info prints
@ -216,6 +269,213 @@ static void ice_vsi_dump_ctxt(struct device *dev, struct ice_vsi_ctx *ctxt)
"enabled" : "disabled"); "enabled" : "disabled");
} }
#define ICE_E810T_NEVER_USE_PIN 0xff
#define ZL_VER_MAJOR_SHIFT 24
#define ZL_VER_MAJOR_MASK ICE_M(0xff, ZL_VER_MAJOR_SHIFT)
#define ZL_VER_MINOR_SHIFT 16
#define ZL_VER_MINOR_MASK ICE_M(0xff, ZL_VER_MINOR_SHIFT)
#define ZL_VER_REV_SHIFT 8
#define ZL_VER_REV_MASK ICE_M(0xff, ZL_VER_REV_SHIFT)
#define ZL_VER_BF_SHIFT 0
#define ZL_VER_BF_MASK ICE_M(0xff, ZL_VER_BF_SHIFT)
/**
* ice_get_dpll_status - get the detailed state of the clock generator
* @pf: pointer to PF
* @buff: buffer for the state to be printed
* @buff_size: size of the buffer
*
* This function reads current status of the ZL CGU and prints it to the buffer
* buff_size will be updated to reflect the number of bytes written to the
* buffer
*
* Return: 0 on success, error code otherwise
*/
static int
ice_get_dpll_status(struct ice_pf *pf, char *buff, size_t *buff_size)
{
u8 pin, synce_prio, ptp_prio, ver_major, ver_minor, rev, bugfix;
struct ice_aqc_get_cgu_abilities abilities = {0};
struct ice_aqc_get_cgu_input_config cfg = {0};
struct device *dev = ice_pf_to_dev(pf);
u32 cgu_id, cgu_cfg_ver, cgu_fw_ver;
size_t bytes_left = *buff_size;
struct ice_hw *hw = &pf->hw;
char pin_name[MAX_PIN_NAME];
int cnt = 0;
int status;
if (!ice_is_cgu_present(hw)) {
dev_err(dev, "CGU not present\n");
return -ENODEV;
}
memset(&abilities, 0, sizeof(struct ice_aqc_get_cgu_abilities));
status = ice_aq_get_cgu_abilities(hw, &abilities);
if (status) {
dev_err(dev,
"Failed to read CGU caps, status: %d, Error: 0x%02X\n",
status, hw->adminq.sq_last_status);
abilities.num_inputs = 7;
abilities.pps_dpll_idx = 1;
abilities.synce_dpll_idx = 0;
}
status = ice_aq_get_cgu_info(hw, &cgu_id, &cgu_cfg_ver, &cgu_fw_ver);
if (status)
return status;
if (abilities.cgu_part_num ==
ICE_ACQ_GET_LINK_TOPO_NODE_NR_ZL30632_80032) {
cnt = snprintf(buff, bytes_left, "Found ZL80032 CGU\n");
/* Read DPLL config version from AQ */
ver_major = (cgu_cfg_ver & ZL_VER_MAJOR_MASK)
>> ZL_VER_MAJOR_SHIFT;
ver_minor = (cgu_cfg_ver & ZL_VER_MINOR_MASK)
>> ZL_VER_MINOR_SHIFT;
rev = (cgu_cfg_ver & ZL_VER_REV_MASK) >> ZL_VER_REV_SHIFT;
bugfix = (cgu_cfg_ver & ZL_VER_BF_MASK) >> ZL_VER_BF_SHIFT;
cnt += snprintf(&buff[cnt], bytes_left - cnt,
"DPLL Config ver: %d.%d.%d.%d\n", ver_major,
ver_minor, rev, bugfix);
} else if (abilities.cgu_part_num ==
ICE_ACQ_GET_LINK_TOPO_NODE_NR_SI5383_5384) {
cnt = snprintf(buff, bytes_left, "Found SI5383/5384 CGU\n");
}
cnt += snprintf(&buff[cnt], bytes_left - cnt, "\nCGU Input status:\n");
cnt += snprintf(&buff[cnt], bytes_left - cnt,
" | | priority | |\n"
" input (idx) | state | EEC (%d) | PPS (%d) | ESync fail |\n",
abilities.synce_dpll_idx, abilities.pps_dpll_idx);
cnt += snprintf(&buff[cnt], bytes_left - cnt,
" ----------------------------------------------------------------\n");
for (pin = 0; pin < abilities.num_inputs; pin++) {
u8 esync_fail = 0;
u8 esync_en = 0;
char *pin_state;
u8 data;
status = ice_aq_get_input_pin_cfg(hw, &cfg, pin);
if (status)
data = ICE_CGU_IN_PIN_FAIL_FLAGS;
else
data = (cfg.status & ICE_CGU_IN_PIN_FAIL_FLAGS);
/* get either e810t pin names or generic ones */
ice_dpll_pin_idx_to_name(pf, pin, pin_name);
/* get pin priorities */
if (ice_aq_get_cgu_ref_prio(hw, abilities.synce_dpll_idx, pin,
&synce_prio))
synce_prio = ICE_E810T_NEVER_USE_PIN;
if (ice_aq_get_cgu_ref_prio(hw, abilities.pps_dpll_idx, pin,
&ptp_prio))
ptp_prio = ICE_E810T_NEVER_USE_PIN;
/* if all flags are set, the pin is invalid */
if (data == ICE_CGU_IN_PIN_FAIL_FLAGS) {
pin_state = ICE_DPLL_PIN_STATE_INVALID;
/* if some flags are set, the pin is validating */
} else if (data) {
pin_state = ICE_DPLL_PIN_STATE_VALIDATING;
/* if all flags are cleared, the pin is valid */
} else {
pin_state = ICE_DPLL_PIN_STATE_VALID;
esync_en = !!(cfg.flags2 &
ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN);
esync_fail = !!(cfg.status &
ICE_AQC_GET_CGU_IN_CFG_STATUS_ESYNC_FAIL);
}
cnt += snprintf(&buff[cnt], bytes_left - cnt,
" %12s (%d) | %10s | %3d | %3d | %4s |\n",
pin_name, pin, pin_state, synce_prio, ptp_prio,
esync_en ? esync_fail ?
"true" : "false" : "N/A");
}
if (!test_bit(ICE_FLAG_DPLL_MONITOR, pf->flags)) {
cnt += snprintf(&buff[cnt], bytes_left - cnt,
"\nDPLL Monitoring disabled\n");
} else {
/* SYNCE DPLL status */
ice_dpll_pin_idx_to_name(pf, pf->synce_ref_pin, pin_name);
cnt += snprintf(&buff[cnt], bytes_left - cnt, "\nEEC DPLL:\n");
cnt += snprintf(&buff[cnt], bytes_left - cnt,
"\tCurrent reference:\t%s\n", pin_name);
cnt += snprintf(&buff[cnt], bytes_left - cnt,
"\tStatus:\t\t\t%s\n",
ice_cgu_state_to_name(pf->synce_dpll_state));
ice_dpll_pin_idx_to_name(pf, pf->ptp_ref_pin, pin_name);
cnt += snprintf(&buff[cnt], bytes_left - cnt, "\nPPS DPLL:\n");
cnt += snprintf(&buff[cnt], bytes_left - cnt,
"\tCurrent reference:\t%s\n", pin_name);
cnt += snprintf(&buff[cnt], bytes_left - cnt,
"\tStatus:\t\t\t%s\n",
ice_cgu_state_to_name(pf->ptp_dpll_state));
if (pf->ptp_dpll_state != ICE_CGU_STATE_INVALID)
cnt += snprintf(&buff[cnt], bytes_left - cnt,
"\tPhase offset [ns]:\t\t\t%lld\n",
pf->ptp_dpll_phase_offset);
}
*buff_size = cnt;
return 0;
}
/**
* ice_debugfs_cgu_read - debugfs interface for reading DPLL status
* @filp: the opened file
* @user_buf: where to find the user's data
* @count: the length of the user's data
* @ppos: file position offset
*
* Return: number of bytes read
*/
static ssize_t ice_debugfs_cgu_read(struct file *filp, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ice_pf *pf = filp->private_data;
size_t buffer_size = PAGE_SIZE;
char *kbuff;
int err;
if (*ppos != 0)
return 0;
kbuff = (char *)get_zeroed_page(GFP_KERNEL);
if (!kbuff)
return -ENOMEM;
err = ice_get_dpll_status(pf, kbuff, &buffer_size);
if (err) {
err = -EIO;
goto err;
}
err = simple_read_from_buffer(user_buf, count, ppos, kbuff,
buffer_size);
err:
free_page((unsigned long)kbuff);
return err;
}
static const struct file_operations ice_debugfs_cgu_fops = {
.owner = THIS_MODULE,
.llseek = default_llseek,
.open = simple_open,
.read = ice_debugfs_cgu_read,
};
static const char *module_id_to_name(u16 module_id) static const char *module_id_to_name(u16 module_id)
{ {
switch (module_id) { switch (module_id) {
@ -314,7 +574,7 @@ static void ice_fwlog_dump_cfg(struct ice_hw *hw)
{ {
struct device *dev = ice_pf_to_dev((struct ice_pf *)(hw->back)); struct device *dev = ice_pf_to_dev((struct ice_pf *)(hw->back));
struct ice_fwlog_cfg *cfg; struct ice_fwlog_cfg *cfg;
enum ice_status status; int status;
u16 i; u16 i;
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
@ -409,7 +669,6 @@ ice_debugfs_command_write(struct file *filp, const char __user *buf,
} }
ret = ice_aq_get_vsi_params(hw, vsi_ctx, NULL); ret = ice_aq_get_vsi_params(hw, vsi_ctx, NULL);
if (ret) { if (ret) {
ret = -EINVAL;
devm_kfree(dev, vsi_ctx); devm_kfree(dev, vsi_ctx);
goto command_help; goto command_help;
} }
@ -496,6 +755,9 @@ ice_debugfs_command_write(struct file *filp, const char __user *buf,
pf->globr_count); pf->globr_count);
dev_info(dev, "emp reset count: %d\n", pf->empr_count); dev_info(dev, "emp reset count: %d\n", pf->empr_count);
dev_info(dev, "pf reset count: %d\n", pf->pfr_count); dev_info(dev, "pf reset count: %d\n", pf->pfr_count);
} else if ((!strncmp(argv[1], "rclk_status", 11))) {
if (ice_is_feature_supported(pf, ICE_F_PHY_RCLK))
ice_dump_rclk_status(pf);
} }
#ifdef CONFIG_DCB #ifdef CONFIG_DCB
@ -562,12 +824,8 @@ ice_debugfs_command_write(struct file *filp, const char __user *buf,
if (ret) if (ret)
goto command_help; goto command_help;
ice_cgu_cfg_ts_pll(pf, false, (enum ice_time_ref_freq)time_ref_freq, ice_cfg_cgu_pll_e822(hw, time_ref_freq, time_ref_sel);
(enum ice_cgu_time_ref_sel)time_ref_sel, ice_ptp_update_incval(pf, time_ref_freq, src_tmr_mode);
(enum ice_src_tmr_mode)src_tmr_mode);
ice_cgu_cfg_ts_pll(pf, true, (enum ice_time_ref_freq)time_ref_freq,
(enum ice_cgu_time_ref_sel)time_ref_sel,
(enum ice_src_tmr_mode)src_tmr_mode);
} else { } else {
command_help: command_help:
dev_info(dev, "unknown or invalid command '%s'\n", cmd_buf); dev_info(dev, "unknown or invalid command '%s'\n", cmd_buf);
@ -597,6 +855,8 @@ command_help:
#ifdef ICE_ADD_PROBES #ifdef ICE_ADD_PROBES
dev_info(dev, "\t dump arfs_stats\n"); dev_info(dev, "\t dump arfs_stats\n");
#endif /* ICE_ADD_PROBES */ #endif /* ICE_ADD_PROBES */
if (ice_is_feature_supported(pf, ICE_F_PHY_RCLK))
dev_info(dev, "\t dump rclk_status\n");
ret = -EINVAL; ret = -EINVAL;
goto command_write_done; goto command_write_done;
} }
@ -635,6 +895,13 @@ void ice_debugfs_pf_init(struct ice_pf *pf)
if (!pfile) if (!pfile)
goto create_failed; goto create_failed;
/* Expose external CGU debugfs interface if CGU available*/
if (ice_is_feature_supported(pf, ICE_F_CGU)) {
if (!debugfs_create_file("cgu", 0400, pf->ice_debugfs_pf, pf,
&ice_debugfs_cgu_fops))
goto create_failed;
}
return; return;
create_failed: create_failed:

12
drivers/thirdparty/ice/ice_defs.h vendored Normal file
View File

@ -0,0 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018-2021, Intel Corporation. */
#ifndef _ICE_DEFS_H_
#define _ICE_DEFS_H_
#define ICE_BYTES_PER_WORD 2
#define ICE_BYTES_PER_DWORD 4
#define ICE_MAX_TRAFFIC_CLASS 8
#define ICE_CHNL_MAX_TC 16
#endif /* _ICE_DEFS_H_ */

View File

@ -4,8 +4,8 @@
#ifndef _ICE_DEVIDS_H_ #ifndef _ICE_DEVIDS_H_
#define _ICE_DEVIDS_H_ #define _ICE_DEVIDS_H_
/* Device IDs */ /* Device IDs */
#define ICE_DEV_ID_E822_SI_DFLT 0x1888
/* Intel(R) Ethernet Connection E823-L for backplane */ /* Intel(R) Ethernet Connection E823-L for backplane */
#define ICE_DEV_ID_E823L_BACKPLANE 0x124C #define ICE_DEV_ID_E823L_BACKPLANE 0x124C
/* Intel(R) Ethernet Connection E823-L for SFP */ /* Intel(R) Ethernet Connection E823-L for SFP */
@ -22,6 +22,13 @@
#define ICE_DEV_ID_E810C_QSFP 0x1592 #define ICE_DEV_ID_E810C_QSFP 0x1592
/* Intel(R) Ethernet Controller E810-C for SFP */ /* Intel(R) Ethernet Controller E810-C for SFP */
#define ICE_DEV_ID_E810C_SFP 0x1593 #define ICE_DEV_ID_E810C_SFP 0x1593
#define ICE_SUBDEV_ID_E810T 0x000E
#define ICE_SUBDEV_ID_E810T2 0x000F
#define ICE_SUBDEV_ID_E810T3 0x02E9
#define ICE_SUBDEV_ID_E810T4 0x02EA
#define ICE_SUBDEV_ID_E810T5 0x0010
#define ICE_SUBDEV_ID_E810T6 0x0012
#define ICE_SUBDEV_ID_E810T7 0x0011
/* Intel(R) Ethernet Controller E810-XXV for backplane */ /* Intel(R) Ethernet Controller E810-XXV for backplane */
#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599 #define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
/* Intel(R) Ethernet Controller E810-XXV for QSFP */ /* Intel(R) Ethernet Controller E810-XXV for QSFP */
@ -56,5 +63,12 @@
#define ICE_DEV_ID_E822L_10G_BASE_T 0x1899 #define ICE_DEV_ID_E822L_10G_BASE_T 0x1899
/* Intel(R) Ethernet Connection E822-L 1GbE */ /* Intel(R) Ethernet Connection E822-L 1GbE */
#define ICE_DEV_ID_E822L_SGMII 0x189A #define ICE_DEV_ID_E822L_SGMII 0x189A
/* Intel(R) Ethernet Connection E825-C for backplane */
#define ICE_DEV_ID_E825C_BACKPLANE 0x579C
/* Intel(R) Ethernet Connection E825-C for QSFP */
#define ICE_DEV_ID_E825C_QSFP 0x579D
/* Intel(R) Ethernet Connection E825-C for SFP */
#define ICE_DEV_ID_E825C_SFP 0x579E
/* Intel(R) Ethernet Connection E825-C 1GbE */
#define ICE_DEV_ID_E825C_SGMII 0x579F
#endif /* _ICE_DEVIDS_H_ */ #endif /* _ICE_DEVIDS_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -7,10 +7,10 @@
#if IS_ENABLED(CONFIG_NET_DEVLINK) #if IS_ENABLED(CONFIG_NET_DEVLINK)
struct ice_pf *ice_allocate_pf(struct device *dev); struct ice_pf *ice_allocate_pf(struct device *dev);
int ice_devlink_register(struct ice_pf *pf); void ice_devlink_register(struct ice_pf *pf);
void ice_devlink_unregister(struct ice_pf *pf); void ice_devlink_unregister(struct ice_pf *pf);
void ice_devlink_params_publish(struct ice_pf *pf); int ice_devlink_register_params(struct ice_pf *pf);
void ice_devlink_params_unpublish(struct ice_pf *pf); void ice_devlink_unregister_params(struct ice_pf *pf);
int ice_devlink_create_pf_port(struct ice_pf *pf); int ice_devlink_create_pf_port(struct ice_pf *pf);
void ice_devlink_destroy_pf_port(struct ice_pf *pf); void ice_devlink_destroy_pf_port(struct ice_pf *pf);
#ifdef HAVE_DEVLINK_PORT_ATTR_PCI_VF #ifdef HAVE_DEVLINK_PORT_ATTR_PCI_VF
@ -23,10 +23,10 @@ static inline struct ice_pf *ice_allocate_pf(struct device *dev)
return devm_kzalloc(dev, sizeof(struct ice_pf), GFP_KERNEL); return devm_kzalloc(dev, sizeof(struct ice_pf), GFP_KERNEL);
} }
static inline int ice_devlink_register(struct ice_pf *pf) { return 0; } static inline void ice_devlink_register(struct ice_pf *pf) { }
static inline void ice_devlink_unregister(struct ice_pf *pf) { } static inline void ice_devlink_unregister(struct ice_pf *pf) { }
static inline void ice_devlink_params_publish(struct ice_pf *pf) { } static inline int ice_devlink_register_params(struct ice_pf *pf) { return 0; }
static inline void ice_devlink_params_unpublish(struct ice_pf *pf) { } static inline void ice_devlink_unregister_params(struct ice_pf *pf) { }
static inline int ice_devlink_create_pf_port(struct ice_pf *pf) { return 0; } static inline int ice_devlink_create_pf_port(struct ice_pf *pf) { return 0; }
static inline void ice_devlink_destroy_pf_port(struct ice_pf *pf) { } static inline void ice_devlink_destroy_pf_port(struct ice_pf *pf) { }
#ifdef HAVE_DEVLINK_PORT_ATTR_PCI_VF #ifdef HAVE_DEVLINK_PORT_ATTR_PCI_VF
@ -43,4 +43,15 @@ static inline void ice_devlink_init_regions(struct ice_pf *pf) { }
static inline void ice_devlink_destroy_regions(struct ice_pf *pf) { } static inline void ice_devlink_destroy_regions(struct ice_pf *pf) { }
#endif #endif
int ice_devlink_tc_params_register(struct ice_vsi *vsi);
void ice_devlink_tc_params_unregister(struct ice_vsi *vsi);
#ifdef HAVE_DEVLINK_HEALTH
void ice_devlink_init_mdd_reporter(struct ice_pf *pf);
void ice_devlink_destroy_mdd_reporter(struct ice_pf *pf);
void ice_devlink_report_mdd_event(struct ice_pf *pf, enum ice_mdd_src src,
u8 pf_num, u16 vf_num, u8 event, u16 queue);
void ice_devlink_clear_after_reset(struct ice_pf *pf);
#endif /* HAVE_DEVLINK_HEALTH */
#endif /* _ICE_DEVLINK_H_ */ #endif /* _ICE_DEVLINK_H_ */

View File

@ -11,6 +11,102 @@
#include "ice_pf_vsi_vlan_ops.h" #include "ice_pf_vsi_vlan_ops.h"
#include "ice_tc_lib.h" #include "ice_tc_lib.h"
/**
* ice_eswitch_add_vf_mac_rule - add adv rule with VF's MAC
* @pf: pointer to PF struct
* @vf: pointer to VF struct
* @mac: VF's MAC address
*
* This function adds advanced rule that forwards packets with
* VF's MAC address (src MAC) to the corresponding switchdev ctrl VSI queue.
*/
int
ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac)
{
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
struct ice_adv_rule_info rule_info = {0};
struct ice_adv_lkup_elem *list;
struct ice_hw *hw = &pf->hw;
const u16 lkups_cnt = 1;
int err;
list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
if (!list)
return -ENOMEM;
list[0].type = ICE_MAC_OFOS;
ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac);
eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr);
rule_info.sw_act.flag |= ICE_FLTR_TX;
rule_info.sw_act.vsi_handle = ctrl_vsi->idx;
rule_info.sw_act.fltr_act = ICE_FWD_TO_Q;
rule_info.rx = false;
rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id +
ctrl_vsi->rxq_map[vf->vf_id];
rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
rule_info.flags_info.act_valid = true;
rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN;
rule_info.add_dir_lkup = false;
err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
vf->repr->mac_rule);
if (err)
dev_err(ice_pf_to_dev(pf), "Unable to add VF mac rule in switchdev mode for VF %d",
vf->vf_id);
else
vf->repr->rule_added = true;
kfree(list);
return err;
}
/**
* ice_eswitch_replay_vf_mac_rule - replay adv rule with VF's MAC
* @vf: pointer to vF struct
*
* This function replays VF's MAC rule after reset.
*/
void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf)
{
int err;
if (!ice_is_switchdev_running(vf->pf))
return;
if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
err = ice_eswitch_add_vf_mac_rule(vf->pf, vf,
vf->hw_lan_addr.addr);
if (err) {
dev_err(ice_pf_to_dev(vf->pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
vf->hw_lan_addr.addr, vf->vf_id, err);
return;
}
vf->num_mac++;
ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
}
}
/**
* ice_eswitch_del_vf_mac_rule - delete adv rule with VF's MAC
* @vf: pointer to the VF struct
*
* Delete the advanced rule that was used to forward packets with the VF's MAC
* address (src MAC) to the corresponding switchdev ctrl VSI queue.
*/
void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf)
{
if (!ice_is_switchdev_running(vf->pf))
return;
if (!vf->repr->rule_added)
return;
ice_rem_adv_rule_by_id(&vf->pf->hw, vf->repr->mac_rule);
vf->repr->rule_added = false;
}
/** /**
* ice_eswitch_setup_env - configure switchdev HW filters * ice_eswitch_setup_env - configure switchdev HW filters
* @pf: pointer to PF struct * @pf: pointer to PF struct
@ -21,8 +117,8 @@
static int ice_eswitch_setup_env(struct ice_pf *pf) static int ice_eswitch_setup_env(struct ice_pf *pf)
{ {
struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
struct net_device *uplink_netdev = uplink_vsi->netdev;
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
struct ice_port_info *pi = pf->hw.port_info;
struct ice_vsi_vlan_ops *vlan_ops; struct ice_vsi_vlan_ops *vlan_ops;
bool rule_added = false; bool rule_added = false;
@ -33,40 +129,33 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx); ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
netif_addr_lock_bh(uplink_netdev);
__dev_uc_unsync(uplink_netdev, NULL);
__dev_mc_unsync(uplink_netdev, NULL);
netif_addr_unlock_bh(uplink_netdev);
if (ice_vsi_add_vlan_zero(uplink_vsi)) if (ice_vsi_add_vlan_zero(uplink_vsi))
goto err_def_rx; goto err_def_rx;
if (!ice_is_dflt_vsi_in_use(uplink_vsi->vsw)) { if (!ice_is_vsi_dflt_vsi(uplink_vsi)) {
if (ice_set_dflt_vsi(uplink_vsi->vsw, uplink_vsi)) if (ice_set_dflt_vsi(uplink_vsi))
goto err_def_rx; goto err_def_rx;
rule_added = true; rule_added = true;
} }
if (ice_cfg_dflt_vsi(pi, ctrl_vsi->idx, true, ICE_FLTR_TX))
goto err_def_tx;
if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override)) if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
goto err_override_uplink; goto err_override_uplink;
if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override)) if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
goto err_override_control; goto err_override_control;
if (ice_fltr_update_flags_dflt_rule(ctrl_vsi, pi->dflt_tx_vsi_rule_id,
ICE_FLTR_TX,
ICE_SINGLE_ACT_LB_ENABLE))
goto err_update_action;
return 0; return 0;
err_update_action:
ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
err_override_control: err_override_control:
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
err_override_uplink: err_override_uplink:
ice_cfg_dflt_vsi(pi, ctrl_vsi->idx, false, ICE_FLTR_TX);
err_def_tx:
if (rule_added) if (rule_added)
ice_clear_dflt_vsi(uplink_vsi->vsw); ice_clear_dflt_vsi(uplink_vsi);
err_def_rx: err_def_rx:
ice_fltr_add_mac_and_broadcast(uplink_vsi, ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr, uplink_vsi->port_info->mac.perm_addr,
@ -86,12 +175,10 @@ ice_eswitch_release_env(struct ice_pf *pf)
{ {
struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
struct ice_port_info *pi = pf->hw.port_info;
ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
ice_cfg_dflt_vsi(pi, ctrl_vsi->idx, false, ICE_FLTR_TX); ice_clear_dflt_vsi(uplink_vsi);
ice_clear_dflt_vsi(uplink_vsi->vsw);
ice_fltr_add_mac_and_broadcast(uplink_vsi, ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr, uplink_vsi->port_info->mac.perm_addr,
ICE_FWD_TO_VSI); ICE_FWD_TO_VSI);
@ -124,17 +211,28 @@ ice_eswitch_remap_ring(struct ice_ring *ring, struct ice_q_vector *q_vector,
* will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to
* number of VFs. * number of VFs.
*/ */
static void static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
{ {
struct ice_vsi *vsi = pf->switchdev.control_vsi; struct ice_vsi *vsi = pf->switchdev.control_vsi;
int q_id; int q_id;
lockdep_assert_held(&pf->vfs.table_lock);
ice_for_each_txq(vsi, q_id) { ice_for_each_txq(vsi, q_id) {
struct ice_repr *repr = pf->vf[q_id].repr; struct ice_q_vector *q_vector;
struct ice_q_vector *q_vector = repr->q_vector; struct ice_ring *tx_ring;
struct ice_ring *tx_ring = vsi->tx_rings[q_id]; struct ice_ring *rx_ring;
struct ice_ring *rx_ring = vsi->rx_rings[q_id]; struct ice_repr *repr;
struct ice_vf *vf;
vf = ice_get_vf_by_id(pf, q_id);
if (WARN_ON(!vf))
continue;
repr = vf->repr;
q_vector = repr->q_vector;
tx_ring = vsi->tx_rings[q_id];
rx_ring = vsi->rx_rings[q_id];
q_vector->vsi = vsi; q_vector->vsi = vsi;
q_vector->reg_idx = vsi->q_vectors[0]->reg_idx; q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
@ -150,6 +248,38 @@ ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
q_vector->num_ring_rx = 1; q_vector->num_ring_rx = 1;
q_vector->rx.ring = rx_ring; q_vector->rx.ring = rx_ring;
ice_eswitch_remap_ring(rx_ring, q_vector, repr->netdev); ice_eswitch_remap_ring(rx_ring, q_vector, repr->netdev);
ice_put_vf(vf);
}
}
/**
* ice_eswitch_release_reprs - clear PR VSIs configuration
* @pf: poiner to PF struct
* @ctrl_vsi: pointer to switchdev control VSI
*/
static void
ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
{
struct ice_vf *vf;
unsigned int bkt;
lockdep_assert_held(&pf->vfs.table_lock);
ice_for_each_vf(pf, bkt, vf) {
struct ice_vsi *vsi = vf->repr->src_vsi;
/* Skip VFs that aren't configured */
if (!vf->repr->dst)
continue;
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
metadata_dst_free(vf->repr->dst);
vf->repr->dst = NULL;
ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
ICE_FWD_TO_VSI);
netif_napi_del(&vf->repr->q_vector->napi);
} }
} }
@ -161,11 +291,13 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
{ {
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
int max_vsi_num = 0; int max_vsi_num = 0;
int i; struct ice_vf *vf;
unsigned int bkt;
ice_for_each_vf(pf, i) { lockdep_assert_held(&pf->vfs.table_lock);
struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
struct ice_vf *vf = &pf->vf[i]; ice_for_each_vf(pf, bkt, vf) {
struct ice_vsi *vsi = vf->repr->src_vsi;
ice_remove_vsi_fltr(&pf->hw, vsi->idx); ice_remove_vsi_fltr(&pf->hw, vsi->idx);
vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
@ -182,6 +314,7 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
vf->hw_lan_addr.addr, vf->hw_lan_addr.addr,
ICE_FWD_TO_VSI); ICE_FWD_TO_VSI);
metadata_dst_free(vf->repr->dst); metadata_dst_free(vf->repr->dst);
vf->repr->dst = NULL;
goto err; goto err;
} }
@ -190,6 +323,7 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
vf->hw_lan_addr.addr, vf->hw_lan_addr.addr,
ICE_FWD_TO_VSI); ICE_FWD_TO_VSI);
metadata_dst_free(vf->repr->dst); metadata_dst_free(vf->repr->dst);
vf->repr->dst = NULL;
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
goto err; goto err;
} }
@ -203,20 +337,13 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
netif_keep_dst(vf->repr->netdev); netif_keep_dst(vf->repr->netdev);
} }
kfree(ctrl_vsi->target_netdevs); ice_for_each_vf(pf, bkt, vf) {
ctrl_vsi->target_netdevs = kcalloc(max_vsi_num + 1,
sizeof(*ctrl_vsi->target_netdevs),
GFP_KERNEL);
if (!ctrl_vsi->target_netdevs)
goto err;
ice_for_each_vf(pf, i) {
struct ice_repr *repr = pf->vf[i].repr;
struct ice_vsi *vsi = repr->src_vsi;
struct metadata_dst *dst; struct metadata_dst *dst;
struct ice_repr *repr;
struct ice_vsi *vsi;
ctrl_vsi->target_netdevs[vsi->vsi_num] = repr->netdev; repr = vf->repr;
vsi = repr->src_vsi;
dst = repr->dst; dst = repr->dst;
dst->u.port_info.port_id = vsi->vsi_num; dst->u.port_info.port_id = vsi->vsi_num;
@ -227,43 +354,11 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
return 0; return 0;
err: err:
for (i = i - 1; i >= 0; i--) { ice_eswitch_release_reprs(pf, ctrl_vsi);
struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
struct ice_vf *vf = &pf->vf[i];
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
metadata_dst_free(vf->repr->dst);
ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
ICE_FWD_TO_VSI);
}
return -ENODEV; return -ENODEV;
} }
/**
* ice_eswitch_release_reprs - clear PR VSIs configuration
* @pf: poiner to PF struct
* @ctrl_vsi: pointer to switchdev control VSI
*/
static void ice_eswitch_release_reprs(struct ice_pf *pf,
struct ice_vsi *ctrl_vsi)
{
int i;
kfree(ctrl_vsi->target_netdevs);
ice_for_each_vf(pf, i) {
struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
struct ice_vf *vf = &pf->vf[i];
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
metadata_dst_free(vf->repr->dst);
ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
ICE_FWD_TO_VSI);
netif_napi_del(&vf->repr->q_vector->napi);
}
}
/** /**
* ice_eswitch_update_repr - reconfigure VF port representor * ice_eswitch_update_repr - reconfigure VF port representor
* @vsi: VF VSI for which port representor is configured * @vsi: VF VSI for which port representor is configured
@ -278,16 +373,17 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi)
if (!ice_is_switchdev_running(pf)) if (!ice_is_switchdev_running(pf))
return; return;
vf = &pf->vf[vsi->vf_id]; vf = vsi->vf;
repr = vf->repr; repr = vf->repr;
repr->src_vsi = vsi; repr->src_vsi = vsi;
repr->dst->u.port_info.port_id = vsi->vsi_num; repr->dst->u.port_info.port_id = vsi->vsi_num;
ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof); ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
if (ret) { if (ret) {
ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI); ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", vsi->vf_id); ICE_FWD_TO_VSI);
return; dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor",
vf->vf_id);
} }
} }
@ -308,7 +404,8 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
np = netdev_priv(netdev); np = netdev_priv(netdev);
vsi = np->vsi; vsi = np->vsi;
if (ice_is_reset_in_progress(vsi->back->state)) if (ice_is_reset_in_progress(vsi->back->state) ||
test_bit(ICE_VF_DIS, vsi->back->state))
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
repr = ice_netdev_to_repr(netdev); repr = ice_netdev_to_repr(netdev);
@ -325,8 +422,9 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
* @skb: pointer to send buffer * @skb: pointer to send buffer
* @off: pointer to offload struct * @off: pointer to offload struct
*/ */
void ice_eswitch_set_target_vsi(struct sk_buff *skb, void
struct ice_tx_offload_params *off) ice_eswitch_set_target_vsi(struct sk_buff *skb,
struct ice_tx_offload_params *off)
{ {
struct metadata_dst *dst = skb_metadata_dst(skb); struct metadata_dst *dst = skb_metadata_dst(skb);
u64 cd_cmd, dst_vsi; u64 cd_cmd, dst_vsi;
@ -374,20 +472,22 @@ ice_eswitch_port_start_xmit(struct sk_buff __always_unused *skb,
static struct ice_vsi * static struct ice_vsi *
ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{ {
return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, ICE_INVAL_VFID, NULL, 0); return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, NULL, NULL, 0);
} }
/** /**
* ice_eswitch_napi_del - remove NAPI handle for all port representors * ice_eswitch_napi_del - remove NAPI handle for all port representors
* @pf: pointer to PF structure * @pf: pointer to PF structure
*/ */
static void ice_eswitch_napi_del(struct ice_pf *pf) static void ice_eswitch_napi_del(struct ice_pf *pf)
{ {
int i; struct ice_vf *vf;
unsigned int bkt;
ice_for_each_vf(pf, i) lockdep_assert_held(&pf->vfs.table_lock);
netif_napi_del(&pf->vf[i].repr->q_vector->napi);
ice_for_each_vf(pf, bkt, vf)
netif_napi_del(&vf->repr->q_vector->napi);
} }
/** /**
@ -396,10 +496,13 @@ static void ice_eswitch_napi_del(struct ice_pf *pf)
*/ */
static void ice_eswitch_napi_enable(struct ice_pf *pf) static void ice_eswitch_napi_enable(struct ice_pf *pf)
{ {
int i; struct ice_vf *vf;
unsigned int bkt;
ice_for_each_vf(pf, i) lockdep_assert_held(&pf->vfs.table_lock);
napi_enable(&pf->vf[i].repr->q_vector->napi);
ice_for_each_vf(pf, bkt, vf)
napi_enable(&vf->repr->q_vector->napi);
} }
/** /**
@ -408,39 +511,25 @@ static void ice_eswitch_napi_enable(struct ice_pf *pf)
*/ */
static void ice_eswitch_napi_disable(struct ice_pf *pf) static void ice_eswitch_napi_disable(struct ice_pf *pf)
{ {
int i; struct ice_vf *vf;
unsigned int bkt;
ice_for_each_vf(pf, i) lockdep_assert_held(&pf->vfs.table_lock);
napi_disable(&pf->vf[i].repr->q_vector->napi);
}
/** ice_for_each_vf(pf, bkt, vf)
* ice_eswitch_set_rxdid - configure rxdid on all rx queues from VSI napi_disable(&vf->repr->q_vector->napi);
* @vsi: vsi to setup rxdid on
* @rxdid: flex descriptor id
*/
static void ice_eswitch_set_rxdid(struct ice_vsi *vsi, u32 rxdid)
{
struct ice_hw *hw = &vsi->back->hw;
int i;
ice_for_each_rxq(vsi, i) {
struct ice_ring *ring = vsi->rx_rings[i];
u16 pf_q = vsi->rxq_map[ring->q_index];
ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
}
} }
/** /**
* ice_eswitch_enable_switchdev - configure eswitch in switchdev mode * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode
* @pf: pointer to PF structure * @pf: pointer to PF structure
*/ */
static int static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
ice_eswitch_enable_switchdev(struct ice_pf *pf)
{ {
struct ice_vsi *ctrl_vsi; struct ice_vsi *ctrl_vsi;
lockdep_assert_held(&pf->vfs.table_lock);
pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info); pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
if (!pf->switchdev.control_vsi) if (!pf->switchdev.control_vsi)
return -ENODEV; return -ENODEV;
@ -466,8 +555,6 @@ ice_eswitch_enable_switchdev(struct ice_pf *pf)
ice_eswitch_napi_enable(pf); ice_eswitch_napi_enable(pf);
ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2);
return 0; return 0;
err_setup_reprs: err_setup_reprs:
@ -487,10 +574,13 @@ static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
{ {
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
lockdep_assert_held(&pf->vfs.table_lock);
ice_eswitch_napi_disable(pf); ice_eswitch_napi_disable(pf);
ice_eswitch_release_env(pf); ice_eswitch_release_env(pf);
ice_vsi_release(ctrl_vsi); ice_rem_adv_rule_for_vsi(&pf->hw, ctrl_vsi->idx);
ice_eswitch_release_reprs(pf, ctrl_vsi); ice_eswitch_release_reprs(pf, ctrl_vsi);
ice_vsi_release(ctrl_vsi);
ice_repr_rem_from_all_vfs(pf); ice_repr_rem_from_all_vfs(pf);
} }
@ -502,8 +592,9 @@ static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
* @mode: eswitch mode to switch to * @mode: eswitch mode to switch to
* @extack: pointer to extack structure * @extack: pointer to extack structure
*/ */
int ice_eswitch_mode_set(struct devlink *devlink, u16 mode, int
struct netlink_ext_ack *extack) ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
#else #else
int ice_eswitch_mode_set(struct devlink *devlink, u16 mode) int ice_eswitch_mode_set(struct devlink *devlink, u16 mode)
#endif /* HAVE_DEVLINK_ESWITCH_OPS_EXTACK */ #endif /* HAVE_DEVLINK_ESWITCH_OPS_EXTACK */
@ -513,15 +604,15 @@ int ice_eswitch_mode_set(struct devlink *devlink, u16 mode)
if (pf->eswitch_mode == mode) if (pf->eswitch_mode == mode)
return 0; return 0;
if (pf->num_alloc_vfs) { if (ice_has_vfs(pf)) {
dev_info(ice_pf_to_dev(pf), dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
"Changing eswitch mode is allowed only if there is no VFs created");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
switch (mode) { switch (mode) {
case DEVLINK_ESWITCH_MODE_LEGACY: case DEVLINK_ESWITCH_MODE_LEGACY:
dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy", pf->hw.pf_id); dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy",
pf->hw.pf_id);
break; break;
case DEVLINK_ESWITCH_MODE_SWITCHDEV: case DEVLINK_ESWITCH_MODE_SWITCHDEV:
{ {
@ -532,15 +623,15 @@ int ice_eswitch_mode_set(struct devlink *devlink, u16 mode)
} }
#endif /* NETIF_F_HW_TC */ #endif /* NETIF_F_HW_TC */
#ifdef HAVE_NETDEV_SB_DEV #ifdef HAVE_NDO_DFWD_OPS
if (ice_is_offloaded_macvlan_ena(pf)) { if (ice_is_offloaded_macvlan_ena(pf)) {
dev_err(ice_pf_to_dev(pf), "switchdev cannot be configured - L2 Forwarding Offload is currently enabled.\n"); dev_err(ice_pf_to_dev(pf), "switchdev cannot be configured - L2 Forwarding Offload is currently enabled.\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
#endif /* HAVE_NETDEV_SB_DEV */ #endif /* HAVE_NDO_DFWD_OPS */
dev_info(ice_pf_to_dev(pf), dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
"PF %d changed eswitch mode to switchdev", pf->hw.pf_id); pf->hw.pf_id);
break; break;
} }
default: default:
@ -557,34 +648,6 @@ int ice_eswitch_mode_set(struct devlink *devlink, u16 mode)
} }
#endif /* HAVE_METADATA_PORT_INFO */ #endif /* HAVE_METADATA_PORT_INFO */
/**
* ice_eswitch_get_target_netdev - return port representor netdev
* @rx_ring: pointer to rx ring
* @rx_desc: pointer to rx descriptor
*
* When working in switchdev mode context (when control vsi is used), this
* function returns netdev of appropriate port representor. For non-switchdev
* context, regular netdev associated with rx ring is returned.
*/
struct net_device *
ice_eswitch_get_target_netdev(struct ice_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc)
{
struct ice_32b_rx_flex_desc_nic_2 *desc;
struct ice_vsi *vsi = rx_ring->vsi;
struct ice_vsi *control_vsi;
u16 target_vsi_id;
control_vsi = vsi->back->switchdev.control_vsi;
if (vsi != control_vsi)
return rx_ring->netdev;
desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc;
target_vsi_id = le16_to_cpu(desc->src_vsi);
return vsi->target_netdevs[target_vsi_id];
}
/** /**
* ice_eswitch_mode_get - get current eswitch mode * ice_eswitch_mode_get - get current eswitch mode
* @devlink: pointer to devlink structure * @devlink: pointer to devlink structure
@ -616,6 +679,8 @@ bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
*/ */
void ice_eswitch_release(struct ice_pf *pf) void ice_eswitch_release(struct ice_pf *pf)
{ {
lockdep_assert_held(&pf->vfs.table_lock);
if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY) if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
return; return;
@ -631,6 +696,8 @@ int ice_eswitch_configure(struct ice_pf *pf)
{ {
int status; int status;
lockdep_assert_held(&pf->vfs.table_lock);
if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running) if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running)
return 0; return 0;
@ -648,17 +715,17 @@ int ice_eswitch_configure(struct ice_pf *pf)
*/ */
static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf) static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
{ {
struct ice_repr *repr; struct ice_vf *vf;
int i; unsigned int bkt;
lockdep_assert_held(&pf->vfs.table_lock);
if (test_bit(ICE_DOWN, pf->state)) if (test_bit(ICE_DOWN, pf->state))
return; return;
ice_for_each_vf(pf, i) { ice_for_each_vf(pf, bkt, vf)
repr = pf->vf[i].repr; if (vf->repr)
if (repr) ice_repr_start_tx_queues(vf->repr);
ice_repr_start_tx_queues(repr);
}
} }
/** /**
@ -667,17 +734,17 @@ static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
*/ */
void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
{ {
struct ice_repr *repr; struct ice_vf *vf;
int i; unsigned int bkt;
lockdep_assert_held(&pf->vfs.table_lock);
if (test_bit(ICE_DOWN, pf->state)) if (test_bit(ICE_DOWN, pf->state))
return; return;
ice_for_each_vf(pf, i) { ice_for_each_vf(pf, bkt, vf)
repr = pf->vf[i].repr; if (vf->repr)
if (repr) ice_repr_stop_tx_queues(vf->repr);
ice_repr_stop_tx_queues(repr);
}
} }
/** /**
@ -711,7 +778,6 @@ int ice_eswitch_rebuild(struct ice_pf *pf)
return status; return status;
ice_eswitch_napi_enable(pf); ice_eswitch_napi_enable(pf);
ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2);
ice_eswitch_start_all_tx_queues(pf); ice_eswitch_start_all_tx_queues(pf);
return 0; return 0;

View File

@ -11,10 +11,11 @@ int ice_eswitch_configure(struct ice_pf *pf);
int ice_eswitch_rebuild(struct ice_pf *pf); int ice_eswitch_rebuild(struct ice_pf *pf);
int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode); int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode);
void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf); void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf);
int ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf,
const u8 *mac);
void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf);
void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf);
struct net_device *
ice_eswitch_get_target_netdev(struct ice_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc);
#ifdef HAVE_METADATA_PORT_INFO #ifdef HAVE_METADATA_PORT_INFO
void ice_eswitch_set_target_vsi(struct sk_buff *skb, void ice_eswitch_set_target_vsi(struct sk_buff *skb,
struct ice_tx_offload_params *off); struct ice_tx_offload_params *off);
@ -57,6 +58,15 @@ static inline
void ice_eswitch_set_target_vsi(struct sk_buff *skb, struct ice_tx_offload_params *off) { } void ice_eswitch_set_target_vsi(struct sk_buff *skb, struct ice_tx_offload_params *off) { }
static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { } static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { }
static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { } static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { }
static inline void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf) { }
static inline void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf) { }
static inline int
ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf,
const u8 *mac)
{
return 0;
}
static inline int static inline int
ice_eswitch_configure(struct ice_pf *pf) ice_eswitch_configure(struct ice_pf *pf)
@ -81,12 +91,5 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{ {
return 0; return 0;
} }
static inline struct net_device *
ice_eswitch_get_target_netdev(struct ice_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc)
{
return NULL;
}
#endif /* CONFIG_NET_DEVLINK */ #endif /* CONFIG_NET_DEVLINK */
#endif #endif

File diff suppressed because it is too large Load Diff

View File

@ -136,7 +136,7 @@ struct ice_stats {
#define ICE_RXQ_NAPI_POLL PICK("rx_queue_%u_napi_poll_cnt", "rx_q-%u_napi_poll_count") #define ICE_RXQ_NAPI_POLL PICK("rx_queue_%u_napi_poll_cnt", "rx_q-%u_napi_poll_count")
#endif /* ICE_ADD_PROBES */ #endif /* ICE_ADD_PROBES */
#ifdef HAVE_NETDEV_SB_DEV #ifdef HAVE_NDO_DFWD_OPS
#ifdef ICE_ADD_PROBES #ifdef ICE_ADD_PROBES
/* macvlan stats */ /* macvlan stats */
#define L2_FWD_TX_PKTS1 PICK("l2-fwd-%s-tx_pkts", "tx-l2-forward_q-%s_pkts") #define L2_FWD_TX_PKTS1 PICK("l2-fwd-%s-tx_pkts", "tx-l2-forward_q-%s_pkts")
@ -148,7 +148,7 @@ struct ice_stats {
#define L2_FWD_RX_PKTS2 PICK("l2-fwd-%i-rx_pkts", "rx-l2-forward_q-%i_pkts") #define L2_FWD_RX_PKTS2 PICK("l2-fwd-%i-rx_pkts", "rx-l2-forward_q-%i_pkts")
#define L2_FWD_RX_BYTES2 PICK("l2-fwd-%i-rx_bytes", "rx-l2-forward_q-%i_bytes") #define L2_FWD_RX_BYTES2 PICK("l2-fwd-%i-rx_bytes", "rx-l2-forward_q-%i_bytes")
#endif /* ICE_ADD_PROBES */ #endif /* ICE_ADD_PROBES */
#endif /* HAVE_NETDEV_SB_DEV */ #endif /* HAVE_NDO_DFWD_OPS */
#ifdef ADQ_PERF_COUNTERS #ifdef ADQ_PERF_COUNTERS
/* ADQ stats */ /* ADQ stats */
@ -193,4 +193,334 @@ struct ice_stats {
#define ICE_RXQ_KEEP_STATE_BP_BUDGET8 PICK("rx_%u.keep_state_bp_budget8", "rx_q-%u_keep_state_bp_budget8") #define ICE_RXQ_KEEP_STATE_BP_BUDGET8 PICK("rx_%u.keep_state_bp_budget8", "rx_q-%u_keep_state_bp_budget8")
#define ICE_RXQ_KEEP_STATE_BP_BUDGET64 PICK("rx_%u.keep_state_bp_budget64", "rx_q-%u_keep_state_bp_budget64") #define ICE_RXQ_KEEP_STATE_BP_BUDGET64 PICK("rx_%u.keep_state_bp_budget64", "rx_q-%u_keep_state_bp_budget64")
#endif /* ADQ_PERF_COUNTERS */ #endif /* ADQ_PERF_COUNTERS */
/* PTP stats */
#define ICE_TX_HWTSTAMP_SKIPPED "tx_hwtstamp_skipped"
#define ICE_TX_HWTSTAMP_TIMEOUTS "tx_hwtstamp_timeouts"
#define ICE_TX_HWTSTAMP_FLUSHED "tx_hwtstamp_flushed"
#define ICE_TX_HWTSTAMP_DISCARDED "tx_hwtstamp_discarded"
#define ICE_LATE_CACHED_PHC_UPDATES "late_cached_phc_updates"
struct ice_phy_type_to_ethtool {
u64 aq_link_speed;
enum ethtool_link_mode_bit_indices link_mode;
bool ethtool_link_mode_supported;
u8 phy_type_idx;
};
/* Macro to make PHY type to ethtool link mode table entry.
* The index is the PHY type.
*/
#define ICE_PHY_TYPE(PHY_TYPE_IDX, LINK_SPEED, ETHTOOL_LINK_MODE) \
{ ICE_AQ_LINK_SPEED_ ## LINK_SPEED, \
ETHTOOL_LINK_MODE_ ## ETHTOOL_LINK_MODE ## _BIT, \
true, \
PHY_TYPE_IDX }
/* PHY types that do not have a supported ethtool link mode are initialized as:
* { false, PHY_TYPE_IDX, ICE_AQ_LINK_SPEED_UNKNOWN , 0 }
*/
#define ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(PHY_TYPE_IDX) \
{ ICE_AQ_LINK_SPEED_UNKNOWN, \
(enum ethtool_link_mode_bit_indices)0, \
false, \
PHY_TYPE_IDX }
#define ICE_PHY_TYPE_LOW_SIZE (ICE_PHY_TYPE_LOW_MAX_INDEX + 1)
/* Lookup table mapping PHY type low to link speed and ethtool link modes */
static
struct ice_phy_type_to_ethtool phy_type_low_lkup[ICE_PHY_TYPE_LOW_SIZE] = {
/* ICE_PHY_TYPE_LOW_100BASE_TX */
ICE_PHY_TYPE(0, 100MB, 100baseT_Full),
/* ICE_PHY_TYPE_LOW_100M_SGMII */
ICE_PHY_TYPE(1, 100MB, 100baseT_Full),
/* ICE_PHY_TYPE_LOW_1000BASE_T */
ICE_PHY_TYPE(2, 1000MB, 1000baseT_Full),
#ifdef HAVE_ETHTOOL_NEW_1G_BITS
/* ICE_PHY_TYPE_LOW_1000BASE_SX */
ICE_PHY_TYPE(3, 1000MB, 1000baseX_Full),
/* ICE_PHY_TYPE_LOW_1000BASE_LX */
ICE_PHY_TYPE(4, 1000MB, 1000baseX_Full),
#else
/* ICE_PHY_TYPE_LOW_1000BASE_SX */
ICE_PHY_TYPE(3, 1000MB, 1000baseT_Full),
/* ICE_PHY_TYPE_LOW_1000BASE_LX */
ICE_PHY_TYPE(4, 1000MB, 1000baseT_Full),
#endif /* HAVE_ETHTOOL_NEW_1G_BITS */
/* ICE_PHY_TYPE_LOW_1000BASE_KX */
ICE_PHY_TYPE(5, 1000MB, 1000baseKX_Full),
/* ICE_PHY_TYPE_LOW_1G_SGMII */
ICE_PHY_TYPE(6, 1000MB, 1000baseT_Full),
#ifdef HAVE_ETHTOOL_NEW_2500MB_BITS
/* ICE_PHY_TYPE_LOW_2500BASE_T */
ICE_PHY_TYPE(7, 2500MB, 2500baseT_Full),
#else
/* ICE_PHY_TYPE_LOW_2500BASE_T */
ICE_PHY_TYPE(7, 2500MB, 2500baseX_Full),
#endif /* HAVE_ETHTOOL_NEW_2500MB_BITS */
/* ICE_PHY_TYPE_LOW_2500BASE_X */
ICE_PHY_TYPE(8, 2500MB, 2500baseX_Full),
/* ICE_PHY_TYPE_LOW_2500BASE_KX */
ICE_PHY_TYPE(9, 2500MB, 2500baseX_Full),
#ifdef HAVE_ETHTOOL_5G_BITS
/* ICE_PHY_TYPE_LOW_5GBASE_T */
ICE_PHY_TYPE(10, 5GB, 5000baseT_Full),
/* ICE_PHY_TYPE_LOW_5GBASE_KR */
ICE_PHY_TYPE(11, 5GB, 5000baseT_Full),
#else /* HAVE_ETHTOOL_5G_BITS */
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(10),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(11),
#endif /* HAVE_ETHTOOL_5G_BITS */
/* ICE_PHY_TYPE_LOW_10GBASE_T */
ICE_PHY_TYPE(12, 10GB, 10000baseT_Full),
#ifdef HAVE_ETHTOOL_NEW_10G_BITS
/* ICE_PHY_TYPE_LOW_10G_SFI_DA */
ICE_PHY_TYPE(13, 10GB, 10000baseCR_Full),
/* ICE_PHY_TYPE_LOW_10GBASE_SR */
ICE_PHY_TYPE(14, 10GB, 10000baseSR_Full),
/* ICE_PHY_TYPE_LOW_10GBASE_LR */
ICE_PHY_TYPE(15, 10GB, 10000baseLR_Full),
#else
/* ICE_PHY_TYPE_LOW_10G_SFI_DA */
ICE_PHY_TYPE(13, 10GB, 10000baseT_Full),
/* ICE_PHY_TYPE_LOW_10GBASE_SR */
ICE_PHY_TYPE(14, 10GB, 10000baseT_Full),
/* ICE_PHY_TYPE_LOW_10GBASE_LR */
ICE_PHY_TYPE(15, 10GB, 10000baseT_Full),
#endif /* HAVE_ETHTOOL_NEW_10G_BITS */
/* ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 */
ICE_PHY_TYPE(16, 10GB, 10000baseKR_Full),
#ifdef HAVE_ETHTOOL_NEW_10G_BITS
/* ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC */
ICE_PHY_TYPE(17, 10GB, 10000baseCR_Full),
#else
/* ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC */
ICE_PHY_TYPE(17, 10GB, 10000baseT_Full),
#endif
/* ICE_PHY_TYPE_LOW_10G_SFI_C2C */
ICE_PHY_TYPE(18, 10GB, 10000baseKR_Full),
#ifdef HAVE_ETHTOOL_25G_BITS
/* ICE_PHY_TYPE_LOW_25GBASE_T */
ICE_PHY_TYPE(19, 25GB, 25000baseCR_Full),
/* ICE_PHY_TYPE_LOW_25GBASE_CR */
ICE_PHY_TYPE(20, 25GB, 25000baseCR_Full),
/* ICE_PHY_TYPE_LOW_25GBASE_CR_S */
ICE_PHY_TYPE(21, 25GB, 25000baseCR_Full),
/* ICE_PHY_TYPE_LOW_25GBASE_CR1 */
ICE_PHY_TYPE(22, 25GB, 25000baseCR_Full),
/* ICE_PHY_TYPE_LOW_25GBASE_SR */
ICE_PHY_TYPE(23, 25GB, 25000baseSR_Full),
/* ICE_PHY_TYPE_LOW_25GBASE_LR */
ICE_PHY_TYPE(24, 25GB, 25000baseSR_Full),
/* ICE_PHY_TYPE_LOW_25GBASE_KR */
ICE_PHY_TYPE(25, 25GB, 25000baseKR_Full),
/* ICE_PHY_TYPE_LOW_25GBASE_KR_S */
ICE_PHY_TYPE(26, 25GB, 25000baseKR_Full),
/* ICE_PHY_TYPE_LOW_25GBASE_KR1 */
ICE_PHY_TYPE(27, 25GB, 25000baseKR_Full),
/* ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC */
ICE_PHY_TYPE(28, 25GB, 25000baseSR_Full),
/* ICE_PHY_TYPE_LOW_25G_AUI_C2C */
ICE_PHY_TYPE(29, 25GB, 25000baseCR_Full),
#else /* HAVE_ETHTOOL_25G_BITS */
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(19),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(20),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(21),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(22),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(23),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(24),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(25),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(26),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(27),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(28),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(29),
#endif /* HAVE_ETHTOOL_25G_BITS */
/* ICE_PHY_TYPE_LOW_40GBASE_CR4 */
ICE_PHY_TYPE(30, 40GB, 40000baseCR4_Full),
/* ICE_PHY_TYPE_LOW_40GBASE_SR4 */
ICE_PHY_TYPE(31, 40GB, 40000baseSR4_Full),
/* ICE_PHY_TYPE_LOW_40GBASE_LR4 */
ICE_PHY_TYPE(32, 40GB, 40000baseLR4_Full),
/* ICE_PHY_TYPE_LOW_40GBASE_KR4 */
ICE_PHY_TYPE(33, 40GB, 40000baseKR4_Full),
/* ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC */
ICE_PHY_TYPE(34, 40GB, 40000baseSR4_Full),
/* ICE_PHY_TYPE_LOW_40G_XLAUI */
ICE_PHY_TYPE(35, 40GB, 40000baseCR4_Full),
#ifdef HAVE_ETHTOOL_50G_BITS
/* ICE_PHY_TYPE_LOW_50GBASE_CR2 */
ICE_PHY_TYPE(36, 50GB, 50000baseCR2_Full),
#else
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(36),
#endif /* HAVE_ETHTOOL_50G_BITS */
#ifdef HAVE_ETHTOOL_NEW_50G_BITS
/* ICE_PHY_TYPE_LOW_50GBASE_SR2 */
ICE_PHY_TYPE(37, 50GB, 50000baseSR2_Full),
/* ICE_PHY_TYPE_LOW_50GBASE_LR2 */
ICE_PHY_TYPE(38, 50GB, 50000baseSR2_Full),
#else
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(37),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(38),
#endif /* HAVE_ETHTOOL_NEW_50G_BITS */
#ifdef HAVE_ETHTOOL_50G_BITS
/* ICE_PHY_TYPE_LOW_50GBASE_KR2 */
ICE_PHY_TYPE(39, 50GB, 50000baseKR2_Full),
#ifdef HAVE_ETHTOOL_NEW_50G_BITS
/* ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC */
ICE_PHY_TYPE(40, 50GB, 50000baseSR2_Full),
#else
/* ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC */
ICE_PHY_TYPE(40, 50GB, 50000baseCR2_Full),
#endif /* HAVE_ETHTOOL_NEW_50G_BITS */
/* ICE_PHY_TYPE_LOW_50G_LAUI2 */
ICE_PHY_TYPE(41, 50GB, 50000baseCR2_Full),
#ifdef HAVE_ETHTOOL_NEW_50G_BITS
/* ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC */
ICE_PHY_TYPE(42, 50GB, 50000baseSR2_Full),
#else
/* ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC */
ICE_PHY_TYPE(42, 50GB, 50000baseCR2_Full),
#endif
/* ICE_PHY_TYPE_LOW_50G_AUI2 */
ICE_PHY_TYPE(43, 50GB, 50000baseCR2_Full),
#ifdef HAVE_ETHTOOL_200G_BITS
/* ICE_PHY_TYPE_LOW_50GBASE_CP */
ICE_PHY_TYPE(44, 50GB, 50000baseCR_Full),
/* ICE_PHY_TYPE_LOW_50GBASE_SR */
ICE_PHY_TYPE(45, 50GB, 50000baseSR_Full),
#else
/* ICE_PHY_TYPE_LOW_50GBASE_CP */
ICE_PHY_TYPE(44, 50GB, 50000baseCR2_Full),
/* ICE_PHY_TYPE_LOW_50GBASE_SR */
ICE_PHY_TYPE(45, 50GB, 50000baseCR2_Full),
#endif /* HAVE_ETHTOOL_200G_BITS */
#else /* HAVE_ETHTOOL_50G_BITS */
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(39),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(40),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(41),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(42),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(43),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(44),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(45),
#endif /* HAVE_ETHTOOL_50G_BITS */
#ifdef HAVE_ETHTOOL_NEW_50G_BITS
#ifdef HAVE_ETHTOOL_200G_BITS
/* ICE_PHY_TYPE_LOW_50GBASE_FR */
ICE_PHY_TYPE(46, 50GB, 50000baseLR_ER_FR_Full),
/* ICE_PHY_TYPE_LOW_50GBASE_LR */
ICE_PHY_TYPE(47, 50GB, 50000baseLR_ER_FR_Full),
#else
/* ICE_PHY_TYPE_LOW_50GBASE_FR */
ICE_PHY_TYPE(46, 50GB, 50000baseSR2_Full),
/* ICE_PHY_TYPE_LOW_50GBASE_LR */
ICE_PHY_TYPE(47, 50GB, 50000baseSR2_Full),
#endif /* HAVE_ETHTOOL_200G_BITS */
#else
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(46),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(47),
#endif /* HAVE_ETHTOOL_NEW_50G_BITS */
#ifdef HAVE_ETHTOOL_50G_BITS
#ifdef HAVE_ETHTOOL_200G_BITS
/* ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4 */
ICE_PHY_TYPE(48, 50GB, 50000baseKR_Full),
/* ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC */
ICE_PHY_TYPE(49, 50GB, 50000baseSR_Full),
/* ICE_PHY_TYPE_LOW_50G_AUI1 */
ICE_PHY_TYPE(50, 50GB, 50000baseCR_Full),
#else
/* ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4 */
ICE_PHY_TYPE(48, 50GB, 50000baseKR2_Full),
/* ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC */
ICE_PHY_TYPE(49, 50GB, 50000baseCR2_Full),
/* ICE_PHY_TYPE_LOW_50G_AUI1 */
ICE_PHY_TYPE(50, 50GB, 50000baseCR2_Full),
#endif /* HAVE_ETHTOOL_200G_BITS */
#else
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(48),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(49),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(50),
#endif /* HAVE_ETHTOOL_50G_BITS */
#ifdef HAVE_ETHTOOL_100G_BITS
/* ICE_PHY_TYPE_LOW_100GBASE_CR4 */
ICE_PHY_TYPE(51, 100GB, 100000baseCR4_Full),
/* ICE_PHY_TYPE_LOW_100GBASE_SR4 */
ICE_PHY_TYPE(52, 100GB, 100000baseSR4_Full),
/* ICE_PHY_TYPE_LOW_100GBASE_LR4 */
ICE_PHY_TYPE(53, 100GB, 100000baseLR4_ER4_Full),
/* ICE_PHY_TYPE_LOW_100GBASE_KR4 */
ICE_PHY_TYPE(54, 100GB, 100000baseKR4_Full),
/* ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC */
ICE_PHY_TYPE(55, 100GB, 100000baseCR4_Full),
/* ICE_PHY_TYPE_LOW_100G_CAUI4 */
ICE_PHY_TYPE(56, 100GB, 100000baseCR4_Full),
/* ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC */
ICE_PHY_TYPE(57, 100GB, 100000baseSR4_Full),
/* ICE_PHY_TYPE_LOW_100G_AUI4 */
ICE_PHY_TYPE(58, 100GB, 100000baseCR4_Full),
/* ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 */
ICE_PHY_TYPE(59, 100GB, 100000baseCR4_Full),
/* ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 */
ICE_PHY_TYPE(60, 100GB, 100000baseKR4_Full),
#ifdef HAVE_ETHTOOL_NEW_100G_BITS
/* ICE_PHY_TYPE_LOW_100GBASE_CP2 */
ICE_PHY_TYPE(61, 100GB, 100000baseCR2_Full),
/* ICE_PHY_TYPE_LOW_100GBASE_SR2 */
ICE_PHY_TYPE(62, 100GB, 100000baseSR2_Full),
#else
/* ICE_PHY_TYPE_LOW_100GBASE_CP2 */
ICE_PHY_TYPE(61, 100GB, 100000baseCR4_Full),
/* ICE_PHY_TYPE_LOW_100GBASE_SR2 */
ICE_PHY_TYPE(62, 100GB, 100000baseSR4_Full),
#endif /* HAVE_ETHTOOL_NEW_100G_BITS */
/* ICE_PHY_TYPE_LOW_100GBASE_DR */
ICE_PHY_TYPE(63, 100GB, 100000baseLR4_ER4_Full),
#else /* HAVE_ETHTOOL_100G_BITS */
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(51),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(52),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(53),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(54),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(55),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(56),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(57),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(58),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(59),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(60),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(61),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(62),
ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(63),
#endif /* HAVE_ETHTOOL_100G_BITS */
};
#ifdef HAVE_ETHTOOL_100G_BITS
#define ICE_PHY_TYPE_HIGH_SIZE (ICE_PHY_TYPE_HIGH_MAX_INDEX + 1)
/* Lookup table mapping PHY type high to link speed and ethtool link modes */
static
struct ice_phy_type_to_ethtool phy_type_high_lkup[ICE_PHY_TYPE_HIGH_SIZE] = {
#ifdef HAVE_ETHTOOL_NEW_100G_BITS
/* ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 */
ICE_PHY_TYPE(0, 100GB, 100000baseKR2_Full),
/* ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC */
ICE_PHY_TYPE(1, 100GB, 100000baseSR2_Full),
/* ICE_PHY_TYPE_HIGH_100G_CAUI2 */
ICE_PHY_TYPE(2, 100GB, 100000baseCR2_Full),
/* ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC */
ICE_PHY_TYPE(3, 100GB, 100000baseSR2_Full),
/* ICE_PHY_TYPE_HIGH_100G_AUI2 */
ICE_PHY_TYPE(4, 100GB, 100000baseCR2_Full),
#else
/* ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 */
ICE_PHY_TYPE(0, 100GB, 100000baseKR4_Full),
/* ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC */
ICE_PHY_TYPE(1, 100GB, 100000baseCR4_Full),
/* ICE_PHY_TYPE_HIGH_100G_CAUI2 */
ICE_PHY_TYPE(2, 100GB, 100000baseCR4_Full),
/* ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC */
ICE_PHY_TYPE(3, 100GB, 100000baseCR4_Full),
/* ICE_PHY_TYPE_HIGH_100G_AUI2 */
ICE_PHY_TYPE(4, 100GB, 100000baseCR4_Full),
#endif /* HAVE_ETHTOOL_NEW_100G_BITS */
};
#endif /* HAVE_ETHTOOL_100G_BITS */
#endif /* !_ICE_ETHTOOL_H_ */ #endif /* !_ICE_ETHTOOL_H_ */

View File

@ -216,7 +216,6 @@ int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd)
else else
fsp->ring_cookie = rule->orig_q_index; fsp->ring_cookie = rule->orig_q_index;
idx = ice_ethtool_flow_to_fltr(fsp->flow_type); idx = ice_ethtool_flow_to_fltr(fsp->flow_type);
if (idx == ICE_FLTR_PTYPE_NONF_NONE) { if (idx == ICE_FLTR_PTYPE_NONF_NONE) {
dev_err(ice_hw_to_dev(hw), "Missing input index for flow_type %d\n", dev_err(ice_hw_to_dev(hw), "Missing input index for flow_type %d\n",
@ -308,13 +307,13 @@ ice_fdir_remap_entries(struct ice_fd_hw_prof *prof, int tun, int idx)
} }
/** /**
* ice_fdir_rem_adq_chnl - remove a ADQ channel from HW filter rules * ice_fdir_rem_adq_chnl - remove an ADQ channel from HW filter rules
* @hw: hardware structure containing filter list * @hw: hardware structure containing filter list
* @vsi_idx: VSI handle * @vsi_idx: VSI handle
*/ */
void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx) void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx)
{ {
enum ice_status status; int status;
int flow; int flow;
if (!hw->fdir_prof) if (!hw->fdir_prof)
@ -324,7 +323,7 @@ void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx)
struct ice_fd_hw_prof *prof = hw->fdir_prof[flow]; struct ice_fd_hw_prof *prof = hw->fdir_prof[flow];
int tun, i; int tun, i;
if (!prof) if (!prof || !prof->cnt)
continue; continue;
for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
@ -349,8 +348,7 @@ void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx)
*/ */
status = ice_flow_rem_vsi_prof(hw, ICE_BLK_FD, vsi_idx, prof_id); status = ice_flow_rem_vsi_prof(hw, ICE_BLK_FD, vsi_idx, prof_id);
if (status) { if (status) {
dev_err(ice_hw_to_dev(hw), dev_err(ice_hw_to_dev(hw), "ice_flow_rem_vsi_prof() failed status=%d\n",
"ice_flow_rem_vsi_prof() failed status=%d\n",
status); status);
} }
} }
@ -662,7 +660,6 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
struct ice_flow_prof *prof = NULL; struct ice_flow_prof *prof = NULL;
struct ice_fd_hw_prof *hw_prof; struct ice_fd_hw_prof *hw_prof;
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
enum ice_status status;
u64 entry1_h = 0; u64 entry1_h = 0;
u64 entry2_h = 0; u64 entry2_h = 0;
#ifdef NETIF_F_HW_TC #ifdef NETIF_F_HW_TC
@ -718,24 +715,22 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
* actions (NULL) and zero actions 0. * actions (NULL) and zero actions 0.
*/ */
prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
TNL_SEG_CNT(tun), NULL, 0, &prof); TNL_SEG_CNT(tun), NULL, 0, &prof);
if (status) if (err)
return ice_status_to_errno(status); return err;
status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
main_vsi->idx, ICE_FLOW_PRIO_NORMAL, err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
seg, NULL, 0, &entry1_h); main_vsi->idx, ICE_FLOW_PRIO_NORMAL, seg, NULL,
if (status) { 0, &entry1_h);
err = ice_status_to_errno(status); if (err)
goto err_prof; goto err_prof;
}
status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, seg, NULL,
seg, NULL, 0, &entry2_h); 0, &entry2_h);
if (status) { if (err)
err = ice_status_to_errno(status);
goto err_entry; goto err_entry;
}
hw_prof->fdir_seg[tun] = seg; hw_prof->fdir_seg[tun] = seg;
hw_prof->entry_h[0][tun] = entry1_h; hw_prof->entry_h[0][tun] = entry1_h;
@ -755,11 +750,10 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
entry1_h = 0; entry1_h = 0;
vsi_h = main_vsi->tc_map_vsi[idx]->idx; vsi_h = main_vsi->tc_map_vsi[idx]->idx;
status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
main_vsi->idx, vsi_h, vsi_h, ICE_FLOW_PRIO_NORMAL, seg, NULL,
ICE_FLOW_PRIO_NORMAL, seg, NULL, 0, 0, &entry1_h);
&entry1_h); if (err) {
if (status) {
dev_err(dev, "Could not add Channel VSI %d to flow group\n", dev_err(dev, "Could not add Channel VSI %d to flow group\n",
idx); idx);
goto err_unroll; goto err_unroll;
@ -906,7 +900,7 @@ ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow)
if (!seg) if (!seg)
return -ENOMEM; return -ENOMEM;
tun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX, tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX,
GFP_KERNEL); GFP_KERNEL);
if (!tun_seg) { if (!tun_seg) {
devm_kfree(dev, seg); devm_kfree(dev, seg);
@ -1340,7 +1334,7 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
if (!seg) if (!seg)
return -ENOMEM; return -ENOMEM;
tun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX, tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX,
GFP_KERNEL); GFP_KERNEL);
if (!tun_seg) { if (!tun_seg) {
devm_kfree(dev, seg); devm_kfree(dev, seg);
@ -1489,7 +1483,6 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
struct ice_fltr_desc desc; struct ice_fltr_desc desc;
struct ice_vsi *ctrl_vsi; struct ice_vsi *ctrl_vsi;
enum ice_status status;
u8 *pkt, *frag_pkt; u8 *pkt, *frag_pkt;
bool has_frag; bool has_frag;
int err; int err;
@ -1508,11 +1501,10 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
} }
ice_fdir_get_prgm_desc(hw, input, &desc, add); ice_fdir_get_prgm_desc(hw, input, &desc, add);
status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); err = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
if (status) { if (err)
err = ice_status_to_errno(status);
goto err_free_all; goto err_free_all;
}
err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
if (err) if (err)
goto err_free_all; goto err_free_all;
@ -1522,12 +1514,11 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
if (has_frag) { if (has_frag) {
/* does not return error */ /* does not return error */
ice_fdir_get_prgm_desc(hw, input, &desc, add); ice_fdir_get_prgm_desc(hw, input, &desc, add);
status = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true, err = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true,
is_tun); is_tun);
if (status) { if (err)
err = ice_status_to_errno(status);
goto err_frag; goto err_frag;
}
err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt); err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt);
if (err) if (err)
goto err_frag; goto err_frag;
@ -1623,6 +1614,28 @@ int ice_fdir_create_dflt_rules(struct ice_pf *pf)
return err; return err;
} }
/**
* ice_fdir_del_all_fltrs - Delete all flow director filters
* @vsi: the VSI being changed
*
* This function needs to be called while holding hw->fdir_fltr_lock
*/
void ice_fdir_del_all_fltrs(struct ice_vsi *vsi)
{
struct ice_fdir_fltr *f_rule, *tmp;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) {
if (!f_rule->acl_fltr)
ice_fdir_write_all_fltr(pf, f_rule, false);
ice_fdir_update_cntrs(hw, f_rule->flow_type, f_rule->acl_fltr,
false);
list_del(&f_rule->fltr_node);
devm_kfree(ice_pf_to_dev(pf), f_rule);
}
}
/** /**
* ice_vsi_manage_fdir - turn on/off flow director * ice_vsi_manage_fdir - turn on/off flow director
* @vsi: the VSI being changed * @vsi: the VSI being changed
@ -1630,7 +1643,6 @@ int ice_fdir_create_dflt_rules(struct ice_pf *pf)
*/ */
void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena) void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena)
{ {
struct ice_fdir_fltr *f_rule, *tmp;
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
enum ice_fltr_ptype flow; enum ice_fltr_ptype flow;
@ -1644,14 +1656,8 @@ void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena)
mutex_lock(&hw->fdir_fltr_lock); mutex_lock(&hw->fdir_fltr_lock);
if (!test_and_clear_bit(ICE_FLAG_FD_ENA, pf->flags)) if (!test_and_clear_bit(ICE_FLAG_FD_ENA, pf->flags))
goto release_lock; goto release_lock;
list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) {
if (!f_rule->acl_fltr) ice_fdir_del_all_fltrs(vsi);
ice_fdir_write_all_fltr(pf, f_rule, false);
ice_fdir_update_cntrs(hw, f_rule->flow_type, f_rule->acl_fltr,
false);
list_del(&f_rule->fltr_node);
devm_kfree(ice_pf_to_dev(pf), f_rule);
}
if (hw->fdir_prof) if (hw->fdir_prof)
for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX;
@ -1682,7 +1688,7 @@ ice_del_acl_ethtool(struct ice_hw *hw, struct ice_fdir_fltr *fltr)
u64 entry; u64 entry;
entry = ice_flow_find_entry(hw, ICE_BLK_ACL, fltr->fltr_id); entry = ice_flow_find_entry(hw, ICE_BLK_ACL, fltr->fltr_id);
return ice_status_to_errno(ice_flow_rem_entry(hw, ICE_BLK_ACL, entry)); return ice_flow_rem_entry(hw, ICE_BLK_ACL, entry);
} }
/** /**
@ -1842,7 +1848,7 @@ ice_update_ring_dest_vsi(struct ice_vsi *vsi, u16 *dest_vsi, u32 *ring)
* specified * specified
*/ */
if ((*ring < ch->base_q) || if ((*ring < ch->base_q) ||
(*ring > (ch->base_q + ch->num_rxq))) (*ring >= (ch->base_q + ch->num_rxq)))
continue; continue;
/* update the dest_vsi based on channel */ /* update the dest_vsi based on channel */
@ -1937,6 +1943,9 @@ ice_ntuple_set_input_set(struct ice_vsi *vsi, enum ice_block blk,
else else
return -EINVAL; return -EINVAL;
/* zero input so filter comparisons are safer */
memset(input, 0, sizeof(struct ice_fdir_fltr));
pf = vsi->back; pf = vsi->back;
hw = &pf->hw; hw = &pf->hw;
@ -2095,7 +2104,6 @@ int ice_add_ntuple_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* Do not program filters during reset */ /* Do not program filters during reset */
if (ice_is_reset_in_progress(pf->state)) { if (ice_is_reset_in_progress(pf->state)) {
dev_err(dev, "Device is resetting - adding ntuple filters not supported during reset\n"); dev_err(dev, "Device is resetting - adding ntuple filters not supported during reset\n");

File diff suppressed because it is too large Load Diff

View File

@ -4,11 +4,22 @@
#ifndef _ICE_FDIR_H_ #ifndef _ICE_FDIR_H_
#define _ICE_FDIR_H_ #define _ICE_FDIR_H_
#include "ice_common.h" #include "ice_type.h"
#define ICE_FDIR_GTPU_IP_INNER_PKT_OFF 50 #define ICE_FDIR_GTPU_IP_INNER_PKT_OFF 50
#define ICE_FDIR_GTPU_EH_INNER_PKT_OFF 58 #define ICE_FDIR_GTPU_EH_INNER_PKT_OFF 58
#define ICE_FDIR_IPV4_GRE_INNER_PKT_OFF 38
#define ICE_FDIR_IPV6_GRE_INNER_PKT_OFF 58
#define ICE_FDIR_V4_V4_GTPOGRE_PKT_OFF 74
#define ICE_FDIR_V4_V6_GTPOGRE_PKT_OFF 94
#define ICE_FDIR_V6_V4_GTPOGRE_PKT_OFF 94
#define ICE_FDIR_V6_V6_GTPOGRE_PKT_OFF 114
#define ICE_FDIR_V4_V4_GTPOGRE_EH_PKT_OFF 82
#define ICE_FDIR_V4_V6_GTPOGRE_EH_PKT_OFF 102
#define ICE_FDIR_V6_V4_GTPOGRE_EH_PKT_OFF 102
#define ICE_FDIR_V6_V6_GTPOGRE_EH_PKT_OFF 122
#define ICE_FDIR_IPV4_L2TPV2_PPP_PKT_OFF 52
#define ICE_FDIR_IPV6_L2TPV2_PPP_PKT_OFF 72
#define ICE_FDIR_TUN_PKT_OFF 50 #define ICE_FDIR_TUN_PKT_OFF 50
#define ICE_FDIR_MAX_RAW_PKT_SIZE (512 + ICE_FDIR_TUN_PKT_OFF) #define ICE_FDIR_MAX_RAW_PKT_SIZE (512 + ICE_FDIR_TUN_PKT_OFF)
@ -64,6 +75,10 @@
#define ICE_IPV4_GTPU_QFI_OFFSET 56 #define ICE_IPV4_GTPU_QFI_OFFSET 56
#define ICE_IPV6_GTPU_TEID_OFFSET 66 #define ICE_IPV6_GTPU_TEID_OFFSET 66
#define ICE_IPV6_GTPU_QFI_OFFSET 76 #define ICE_IPV6_GTPU_QFI_OFFSET 76
#define ICE_IPV4_GTPOGRE_TEID_OFFSET 70
#define ICE_IPV4_GTPOGRE_QFI_OFFSET 80
#define ICE_IPV6_GTPOGRE_TEID_OFFSET 90
#define ICE_IPV6_GTPOGRE_QFI_OFFSET 100
#define ICE_IPV4_L2TPV3_SESS_ID_OFFSET 34 #define ICE_IPV4_L2TPV3_SESS_ID_OFFSET 34
#define ICE_IPV6_L2TPV3_SESS_ID_OFFSET 54 #define ICE_IPV6_L2TPV3_SESS_ID_OFFSET 54
#define ICE_IPV4_ESP_SPI_OFFSET 34 #define ICE_IPV4_ESP_SPI_OFFSET 34
@ -72,9 +87,13 @@
#define ICE_IPV6_AH_SPI_OFFSET 58 #define ICE_IPV6_AH_SPI_OFFSET 58
#define ICE_IPV4_NAT_T_ESP_SPI_OFFSET 42 #define ICE_IPV4_NAT_T_ESP_SPI_OFFSET 42
#define ICE_IPV6_NAT_T_ESP_SPI_OFFSET 62 #define ICE_IPV6_NAT_T_ESP_SPI_OFFSET 62
#define ICE_IPV4_VXLAN_VNI_OFFSET 45 #define ICE_IPV4_VXLAN_VNI_OFFSET 46
#define ICE_ECPRI_TP0_PC_ID_OFFSET 18 #define ICE_ECPRI_TP0_PC_ID_OFFSET 18
#define ICE_IPV4_UDP_ECPRI_TP0_PC_ID_OFFSET 46 #define ICE_IPV4_UDP_ECPRI_TP0_PC_ID_OFFSET 46
#define ICE_IPV4_L2TPV2_SESS_ID_OFFSET 46
#define ICE_IPV6_L2TPV2_SESS_ID_OFFSET 66
#define ICE_IPV4_L2TPV2_LEN_SESS_ID_OFFSET 48
#define ICE_IPV6_L2TPV2_LEN_SESS_ID_OFFSET 68
#define ICE_FDIR_MAX_FLTRS 16384 #define ICE_FDIR_MAX_FLTRS 16384
@ -201,6 +220,16 @@ struct ice_fdir_ecpri {
__be16 pc_id; __be16 pc_id;
}; };
struct ice_fdir_l2tpv2 {
__be16 flags_version;
__be16 length;
__be16 tunnel_id;
__be16 session_id;
__be16 ns;
__be16 nr;
__be16 offset_size;
};
struct ice_fdir_extra { struct ice_fdir_extra {
u8 dst_mac[ETH_ALEN]; /* dest MAC address */ u8 dst_mac[ETH_ALEN]; /* dest MAC address */
u8 src_mac[ETH_ALEN]; /* src MAC address */ u8 src_mac[ETH_ALEN]; /* src MAC address */
@ -240,6 +269,9 @@ struct ice_fdir_fltr {
struct ice_fdir_ecpri ecpri_data; struct ice_fdir_ecpri ecpri_data;
struct ice_fdir_ecpri ecpri_mask; struct ice_fdir_ecpri ecpri_mask;
struct ice_fdir_l2tpv2 l2tpv2_data;
struct ice_fdir_l2tpv2 l2tpv2_mask;
struct ice_fdir_extra ext_data; struct ice_fdir_extra ext_data;
struct ice_fdir_extra ext_mask; struct ice_fdir_extra ext_mask;
@ -274,29 +306,34 @@ struct ice_fdir_base_pkt {
const u8 *tun_pkt; const u8 *tun_pkt;
}; };
enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id); bool
enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id); ice_fdir_comp_rules_basic(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b);
bool
ice_fdir_comp_rules_extended(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b);
int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id);
int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id);
void void
ice_set_fd_desc_val(struct ice_fd_fltr_desc_ctx *fd_fltr_ctx, ice_set_fd_desc_val(struct ice_fd_fltr_desc_ctx *fd_fltr_ctx,
struct ice_fltr_desc *fdir_desc); struct ice_fltr_desc *fdir_desc);
void ice_set_dflt_val_fd_desc(struct ice_fd_fltr_desc_ctx *fd_fltr_ctx); void ice_set_dflt_val_fd_desc(struct ice_fd_fltr_desc_ctx *fd_fltr_ctx);
enum ice_status int
ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr); ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
enum ice_status int
ice_free_fd_guar_item(struct ice_hw *hw, u16 cntr_id, u16 num_fltr); ice_free_fd_guar_item(struct ice_hw *hw, u16 cntr_id, u16 num_fltr);
enum ice_status int
ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr); ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
enum ice_status int
ice_free_fd_shrd_item(struct ice_hw *hw, u16 cntr_id, u16 num_fltr); ice_free_fd_shrd_item(struct ice_hw *hw, u16 cntr_id, u16 num_fltr);
enum ice_status ice_clear_vsi_fd_table(struct ice_hw *hw, u16 vsi_num); int ice_clear_vsi_fd_table(struct ice_hw *hw, u16 vsi_num);
enum ice_status ice_clear_pf_fd_table(struct ice_hw *hw); int ice_clear_pf_fd_table(struct ice_hw *hw);
void void
ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input, ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input,
struct ice_fltr_desc *fdesc, bool add); struct ice_fltr_desc *fdesc, bool add);
enum ice_status int
ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
u8 *pkt, bool frag, bool tun); u8 *pkt, bool frag, bool tun);
enum ice_status int
ice_fdir_get_prgm_pkt(struct ice_fdir_fltr *input, u8 *pkt, bool frag); ice_fdir_get_prgm_pkt(struct ice_fdir_fltr *input, u8 *pkt, bool frag);
int ice_get_fdir_cnt_all(struct ice_hw *hw); int ice_get_fdir_cnt_all(struct ice_hw *hw);
bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input); bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input);

File diff suppressed because it is too large Load Diff

View File

@ -6,25 +6,10 @@
#include "ice_type.h" #include "ice_type.h"
/* Package minimal version supported */ int
#define ICE_PKG_SUPP_VER_MAJ 1
#define ICE_PKG_SUPP_VER_MNR 3
/* Package format version */
#define ICE_PKG_FMT_VER_MAJ 1
#define ICE_PKG_FMT_VER_MNR 0
#define ICE_PKG_FMT_VER_UPD 0
#define ICE_PKG_FMT_VER_DFT 0
#define ICE_PKG_CNT 4
enum ice_status
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_change_lock(struct ice_hw *hw);
enum ice_status
ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
u8 *prot, u16 *off); u8 *prot, u16 *off);
enum ice_status int
ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type, ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type,
u16 *value); u16 *value);
void void
@ -32,81 +17,86 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
unsigned long *bm); unsigned long *bm);
void void
ice_init_prof_result_bm(struct ice_hw *hw); ice_init_prof_result_bm(struct ice_hw *hw);
enum ice_status int
ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
unsigned long *bm, struct list_head *fv_list);
enum ice_status
ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld);
enum ice_status
ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, struct ice_sq_cd *cd); u16 buf_size, struct ice_sq_cd *cd);
bool bool
ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type, ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
u16 *port); u16 *port);
enum ice_status int
ice_is_create_tunnel_possible(struct ice_hw *hw, enum ice_tunnel_type type, ice_is_create_tunnel_possible(struct ice_hw *hw, enum ice_tunnel_type type,
u16 port); u16 port);
bool ice_is_tunnel_empty(struct ice_hw *hw); bool ice_is_tunnel_empty(struct ice_hw *hw);
enum ice_status int
ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port); ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port);
enum ice_status ice_set_dvm_boost_entries(struct ice_hw *hw); int ice_set_dvm_boost_entries(struct ice_hw *hw);
enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all); int ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all);
bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index); bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index);
bool bool
ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type); ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type);
enum ice_status ice_replay_tunnels(struct ice_hw *hw); int ice_replay_tunnels(struct ice_hw *hw);
/* RX parser PType functions */ /* RX parser PType functions */
bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype); bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype);
/* XLT1/PType group functions */ /* XLT1/PType group functions */
enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk); int ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk);
void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg); void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg);
/* XLT2/VSI group functions */ /* XLT2/VSI group functions */
enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk); int ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk);
enum ice_status int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
const struct ice_ptype_attributes *attr, u16 attr_cnt, unsigned long *ptypes, const struct ice_ptype_attributes *attr,
struct ice_fv_word *es, u16 *masks); u16 attr_cnt, struct ice_fv_word *es, u16 *masks, bool fd_swap);
void ice_init_all_prof_masks(struct ice_hw *hw); void ice_init_all_prof_masks(struct ice_hw *hw);
void ice_shutdown_all_prof_masks(struct ice_hw *hw); void ice_shutdown_all_prof_masks(struct ice_hw *hw);
struct ice_prof_map * struct ice_prof_map *
ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id); ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id);
enum ice_status int
ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig); ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig);
enum ice_status int
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
enum ice_status int
ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
enum ice_status int
ice_flow_assoc_hw_prof(struct ice_hw *hw, enum ice_block blk,
u16 dest_vsi_handle, u16 fdir_vsi_handle, int id);
int
ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt); ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt);
enum ice_status int
ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt); ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt);
enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len); int ice_init_hw_tbls(struct ice_hw *hw);
enum ice_status
ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
enum ice_status ice_init_hw_tbls(struct ice_hw *hw);
void ice_free_seg(struct ice_hw *hw);
void ice_fill_blk_tbls(struct ice_hw *hw); void ice_fill_blk_tbls(struct ice_hw *hw);
void ice_clear_hw_tbls(struct ice_hw *hw); void ice_clear_hw_tbls(struct ice_hw *hw);
void ice_free_hw_tbls(struct ice_hw *hw); void ice_free_hw_tbls(struct ice_hw *hw);
enum ice_status int
ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count, ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
u64 id); u64 id);
enum ice_status int
ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count, ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
u64 id); u64 id);
enum ice_status int
ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id); ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
struct ice_buf_build *
ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
void **section);
struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld);
void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld);
enum ice_status int
ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
u16 len); u16 len);
void ice_fill_blk_tbls(struct ice_hw *hw);
/* To support tunneling entries by PF, the package will append the PF number to
* the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
*/
#define ICE_TNL_PRE "TNL_"
/* For supporting double VLAN mode, it is necessary to enable or disable certain
* boost tcam entries. The metadata labels names that match the following
* prefixes will be saved to allow enabling double VLAN mode.
*/
#define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */
#define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */
void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val);
void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable);
#endif /* _ICE_FLEX_PIPE_H_ */ #endif /* _ICE_FLEX_PIPE_H_ */

View File

@ -13,6 +13,7 @@ struct ice_fv_word {
u8 resvrd; u8 resvrd;
} __packed; } __packed;
#define ICE_MAX_NUM_PROFILES 256 #define ICE_MAX_NUM_PROFILES 256
#define ICE_MAX_FV_WORDS 48 #define ICE_MAX_FV_WORDS 48
@ -20,249 +21,6 @@ struct ice_fv {
struct ice_fv_word ew[ICE_MAX_FV_WORDS]; struct ice_fv_word ew[ICE_MAX_FV_WORDS];
}; };
/* Package and segment headers and tables */
struct ice_pkg_hdr {
struct ice_pkg_ver pkg_format_ver;
__le32 seg_count;
__le32 seg_offset[];
};
/* generic segment */
struct ice_generic_seg_hdr {
#define SEGMENT_TYPE_METADATA 0x00000001
#define SEGMENT_TYPE_ICE 0x00000010
__le32 seg_type;
struct ice_pkg_ver seg_format_ver;
__le32 seg_size;
char seg_id[ICE_PKG_NAME_SIZE];
};
/* ice specific segment */
union ice_device_id {
struct {
__le16 device_id;
__le16 vendor_id;
} dev_vend_id;
__le32 id;
};
struct ice_device_id_entry {
union ice_device_id device;
union ice_device_id sub_device;
};
struct ice_seg {
struct ice_generic_seg_hdr hdr;
__le32 device_table_count;
struct ice_device_id_entry device_table[];
};
struct ice_nvm_table {
__le32 table_count;
__le32 vers[];
};
struct ice_buf {
#define ICE_PKG_BUF_SIZE 4096
u8 buf[ICE_PKG_BUF_SIZE];
};
struct ice_buf_table {
__le32 buf_count;
struct ice_buf buf_array[];
};
/* global metadata specific segment */
struct ice_global_metadata_seg {
struct ice_generic_seg_hdr hdr;
struct ice_pkg_ver pkg_ver;
__le32 rsvd;
char pkg_name[ICE_PKG_NAME_SIZE];
};
#define ICE_MIN_S_OFF 12
#define ICE_MAX_S_OFF 4095
#define ICE_MIN_S_SZ 1
#define ICE_MAX_S_SZ 4084
/* section information */
struct ice_section_entry {
__le32 type;
__le16 offset;
__le16 size;
};
#define ICE_MIN_S_COUNT 1
#define ICE_MAX_S_COUNT 511
#define ICE_MIN_S_DATA_END 12
#define ICE_MAX_S_DATA_END 4096
#define ICE_METADATA_BUF 0x80000000
struct ice_buf_hdr {
__le16 section_count;
__le16 data_end;
struct ice_section_entry section_entry[];
};
#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\
(ent_sz))
/* ice package section IDs */
#define ICE_SID_METADATA 1
#define ICE_SID_XLT0_SW 10
#define ICE_SID_XLT_KEY_BUILDER_SW 11
#define ICE_SID_XLT1_SW 12
#define ICE_SID_XLT2_SW 13
#define ICE_SID_PROFID_TCAM_SW 14
#define ICE_SID_PROFID_REDIR_SW 15
#define ICE_SID_FLD_VEC_SW 16
#define ICE_SID_CDID_KEY_BUILDER_SW 17
#define ICE_SID_CDID_REDIR_SW 18
#define ICE_SID_XLT0_ACL 20
#define ICE_SID_XLT_KEY_BUILDER_ACL 21
#define ICE_SID_XLT1_ACL 22
#define ICE_SID_XLT2_ACL 23
#define ICE_SID_PROFID_TCAM_ACL 24
#define ICE_SID_PROFID_REDIR_ACL 25
#define ICE_SID_FLD_VEC_ACL 26
#define ICE_SID_CDID_KEY_BUILDER_ACL 27
#define ICE_SID_CDID_REDIR_ACL 28
#define ICE_SID_XLT0_FD 30
#define ICE_SID_XLT_KEY_BUILDER_FD 31
#define ICE_SID_XLT1_FD 32
#define ICE_SID_XLT2_FD 33
#define ICE_SID_PROFID_TCAM_FD 34
#define ICE_SID_PROFID_REDIR_FD 35
#define ICE_SID_FLD_VEC_FD 36
#define ICE_SID_CDID_KEY_BUILDER_FD 37
#define ICE_SID_CDID_REDIR_FD 38
#define ICE_SID_XLT0_RSS 40
#define ICE_SID_XLT_KEY_BUILDER_RSS 41
#define ICE_SID_XLT1_RSS 42
#define ICE_SID_XLT2_RSS 43
#define ICE_SID_PROFID_TCAM_RSS 44
#define ICE_SID_PROFID_REDIR_RSS 45
#define ICE_SID_FLD_VEC_RSS 46
#define ICE_SID_CDID_KEY_BUILDER_RSS 47
#define ICE_SID_CDID_REDIR_RSS 48
#define ICE_SID_RXPARSER_CAM 50
#define ICE_SID_RXPARSER_NOMATCH_CAM 51
#define ICE_SID_RXPARSER_IMEM 52
#define ICE_SID_RXPARSER_XLT0_BUILDER 53
#define ICE_SID_RXPARSER_NODE_PTYPE 54
#define ICE_SID_RXPARSER_MARKER_PTYPE 55
#define ICE_SID_RXPARSER_BOOST_TCAM 56
#define ICE_SID_RXPARSER_PROTO_GRP 57
#define ICE_SID_RXPARSER_METADATA_INIT 58
#define ICE_SID_RXPARSER_XLT0 59
#define ICE_SID_TXPARSER_CAM 60
#define ICE_SID_TXPARSER_NOMATCH_CAM 61
#define ICE_SID_TXPARSER_IMEM 62
#define ICE_SID_TXPARSER_XLT0_BUILDER 63
#define ICE_SID_TXPARSER_NODE_PTYPE 64
#define ICE_SID_TXPARSER_MARKER_PTYPE 65
#define ICE_SID_TXPARSER_BOOST_TCAM 66
#define ICE_SID_TXPARSER_PROTO_GRP 67
#define ICE_SID_TXPARSER_METADATA_INIT 68
#define ICE_SID_TXPARSER_XLT0 69
#define ICE_SID_RXPARSER_INIT_REDIR 70
#define ICE_SID_TXPARSER_INIT_REDIR 71
#define ICE_SID_RXPARSER_MARKER_GRP 72
#define ICE_SID_TXPARSER_MARKER_GRP 73
#define ICE_SID_RXPARSER_LAST_PROTO 74
#define ICE_SID_TXPARSER_LAST_PROTO 75
#define ICE_SID_RXPARSER_PG_SPILL 76
#define ICE_SID_TXPARSER_PG_SPILL 77
#define ICE_SID_RXPARSER_NOMATCH_SPILL 78
#define ICE_SID_TXPARSER_NOMATCH_SPILL 79
#define ICE_SID_XLT0_PE 80
#define ICE_SID_XLT_KEY_BUILDER_PE 81
#define ICE_SID_XLT1_PE 82
#define ICE_SID_XLT2_PE 83
#define ICE_SID_PROFID_TCAM_PE 84
#define ICE_SID_PROFID_REDIR_PE 85
#define ICE_SID_FLD_VEC_PE 86
#define ICE_SID_CDID_KEY_BUILDER_PE 87
#define ICE_SID_CDID_REDIR_PE 88
/* Label Metadata section IDs */
#define ICE_SID_LBL_FIRST 0x80000010
#define ICE_SID_LBL_RXPARSER_IMEM 0x80000010
#define ICE_SID_LBL_TXPARSER_IMEM 0x80000011
#define ICE_SID_LBL_RESERVED_12 0x80000012
#define ICE_SID_LBL_RESERVED_13 0x80000013
#define ICE_SID_LBL_RXPARSER_MARKER 0x80000014
#define ICE_SID_LBL_TXPARSER_MARKER 0x80000015
#define ICE_SID_LBL_PTYPE 0x80000016
#define ICE_SID_LBL_PROTOCOL_ID 0x80000017
#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
#define ICE_SID_LBL_TXPARSER_TMEM 0x80000019
#define ICE_SID_LBL_RXPARSER_PG 0x8000001A
#define ICE_SID_LBL_TXPARSER_PG 0x8000001B
#define ICE_SID_LBL_RXPARSER_M_TCAM 0x8000001C
#define ICE_SID_LBL_TXPARSER_M_TCAM 0x8000001D
#define ICE_SID_LBL_SW_PROFID_TCAM 0x8000001E
#define ICE_SID_LBL_ACL_PROFID_TCAM 0x8000001F
#define ICE_SID_LBL_PE_PROFID_TCAM 0x80000020
#define ICE_SID_LBL_RSS_PROFID_TCAM 0x80000021
#define ICE_SID_LBL_FD_PROFID_TCAM 0x80000022
#define ICE_SID_LBL_FLAG 0x80000023
#define ICE_SID_LBL_REG 0x80000024
#define ICE_SID_LBL_SW_PTG 0x80000025
#define ICE_SID_LBL_ACL_PTG 0x80000026
#define ICE_SID_LBL_PE_PTG 0x80000027
#define ICE_SID_LBL_RSS_PTG 0x80000028
#define ICE_SID_LBL_FD_PTG 0x80000029
#define ICE_SID_LBL_SW_VSIG 0x8000002A
#define ICE_SID_LBL_ACL_VSIG 0x8000002B
#define ICE_SID_LBL_PE_VSIG 0x8000002C
#define ICE_SID_LBL_RSS_VSIG 0x8000002D
#define ICE_SID_LBL_FD_VSIG 0x8000002E
#define ICE_SID_LBL_PTYPE_META 0x8000002F
#define ICE_SID_LBL_SW_PROFID 0x80000030
#define ICE_SID_LBL_ACL_PROFID 0x80000031
#define ICE_SID_LBL_PE_PROFID 0x80000032
#define ICE_SID_LBL_RSS_PROFID 0x80000033
#define ICE_SID_LBL_FD_PROFID 0x80000034
#define ICE_SID_LBL_RXPARSER_MARKER_GRP 0x80000035
#define ICE_SID_LBL_TXPARSER_MARKER_GRP 0x80000036
#define ICE_SID_LBL_RXPARSER_PROTO 0x80000037
#define ICE_SID_LBL_TXPARSER_PROTO 0x80000038
/* The following define MUST be updated to reflect the last label section ID */
#define ICE_SID_LBL_LAST 0x80000038
enum ice_block {
ICE_BLK_SW = 0,
ICE_BLK_ACL,
ICE_BLK_FD,
ICE_BLK_RSS,
ICE_BLK_PE,
ICE_BLK_COUNT
};
enum ice_sect {
ICE_XLT0 = 0,
ICE_XLT_KB,
ICE_XLT1,
ICE_XLT2,
ICE_PROF_TCAM,
ICE_PROF_REDIR,
ICE_VEC_TBL,
ICE_CDID_KB,
ICE_CDID_REDIR,
ICE_SECT_COUNT
};
/* Packet Type (PTYPE) values */ /* Packet Type (PTYPE) values */
#define ICE_PTYPE_MAC_PAY 1 #define ICE_PTYPE_MAC_PAY 1
#define ICE_MAC_PTP 2 #define ICE_MAC_PTP 2
@ -413,36 +171,84 @@ enum ice_sect {
#define ICE_MAC_IPV6_PFCP_SESSION 354 #define ICE_MAC_IPV6_PFCP_SESSION 354
#define ICE_MAC_IPV4_L2TPV3 360 #define ICE_MAC_IPV4_L2TPV3 360
#define ICE_MAC_IPV6_L2TPV3 361 #define ICE_MAC_IPV6_L2TPV3 361
#define ICE_MAC_IPV4_L2TPV2_CONTROL 392 #define ICE_MAC_IPV4_L2TPV2_CONTROL 396
#define ICE_MAC_IPV6_L2TPV2_CONTROL 393 #define ICE_MAC_IPV6_L2TPV2_CONTROL 397
#define ICE_MAC_IPV4_L2TPV2 394 #define ICE_MAC_IPV4_L2TPV2 398
#define ICE_MAC_IPV6_L2TPV2 395 #define ICE_MAC_IPV6_L2TPV2 399
#define ICE_MAC_IPV4_PPPOL2TPV2 396 #define ICE_MAC_IPV4_PPPOL2TPV2 400
#define ICE_MAC_IPV6_PPPOL2TPV2 397 #define ICE_MAC_IPV6_PPPOL2TPV2 401
#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_FRAG 398 #define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_FRAG 402
#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_PAY 399 #define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_PAY 403
#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_UDP_PAY 400 #define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_UDP_PAY 404
#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_TCP 401 #define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_TCP 405
#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_SCTP 402 #define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_SCTP 406
#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_ICMP 403 #define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_ICMP 407
#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_FRAG 404 #define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_FRAG 408
#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_PAY 405 #define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_PAY 409
#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_UDP_PAY 406 #define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_UDP_PAY 410
#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_TCP 407 #define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_TCP 411
#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_SCTP 408 #define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_SCTP 412
#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_ICMPV6 409 #define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_ICMPV6 413
#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_FRAG 410 #define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_FRAG 414
#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_PAY 411 #define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_PAY 415
#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_UDP_PAY 412 #define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_UDP_PAY 416
#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_TCP 413 #define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_TCP 417
#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_SCTP 414 #define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_SCTP 418
#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_ICMP 415 #define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_ICMP 419
#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_FRAG 416 #define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_FRAG 420
#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_PAY 417 #define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_PAY 421
#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_UDP_PAY 418 #define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_UDP_PAY 422
#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_TCP 419 #define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_TCP 423
#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_SCTP 420 #define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_SCTP 424
#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_ICMPV6 421 #define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_ICMPV6 425
#define MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG 450
#define MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY 451
#define MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY 452
#define MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP 453
#define MAC_IPV4_TUN_IPV4_GTPU_IPV4_SCTP 454
#define MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP 455
#define MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG 456
#define MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY 457
#define MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY 458
#define MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP 459
#define MAC_IPV4_TUN_IPV4_GTPU_IPV6_SCTP 460
#define MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6 461
#define MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG 462
#define MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY 463
#define MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY 464
#define MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP 465
#define MAC_IPV4_TUN_IPV6_GTPU_IPV4_SCTP 466
#define MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP 467
#define MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG 468
#define MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY 469
#define MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY 470
#define MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP 471
#define MAC_IPV4_TUN_IPV6_GTPU_IPV6_SCTP 472
#define MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6 473
#define MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG 474
#define MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY 475
#define MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY 476
#define MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP 477
#define MAC_IPV6_TUN_IPV4_GTPU_IPV4_SCTP 478
#define MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP 479
#define MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG 480
#define MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY 481
#define MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY 482
#define MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP 483
#define MAC_IPV6_TUN_IPV4_GTPU_IPV6_SCTP 484
#define MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6 485
#define MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG 486
#define MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY 487
#define MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY 488
#define MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP 489
#define MAC_IPV6_TUN_IPV6_GTPU_IPV4_SCTP 490
#define MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP 491
#define MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG 492
#define MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY 493
#define MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY 494
#define MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP 495
#define MAC_IPV6_TUN_IPV6_GTPU_IPV6_SCTP 496
#define MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6 497
/* Attributes that can modify PTYPE definitions. /* Attributes that can modify PTYPE definitions.
* *
@ -569,10 +375,18 @@ struct ice_sw_fv_list_entry {
* fields of the packet are now little endian. * fields of the packet are now little endian.
*/ */
struct ice_boost_key_value { struct ice_boost_key_value {
#define ICE_BOOST_REMAINING_HV_KEY 15 #define ICE_BOOST_REMAINING_HV_KEY 15
u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY]; u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY];
__le16 hv_dst_port_key; union {
__le16 hv_src_port_key; struct {
__le16 hv_dst_port_key;
__le16 hv_src_port_key;
} /* udp_tunnel */;
struct {
__le16 hv_vlan_id_key;
__le16 hv_etype_key;
} vlan;
};
u8 tcam_search_key; u8 tcam_search_key;
} __packed; } __packed;
@ -605,24 +419,6 @@ struct ice_boost_tcam_section {
sizeof(struct ice_boost_tcam_entry), \ sizeof(struct ice_boost_tcam_entry), \
sizeof(struct ice_boost_tcam_entry)) sizeof(struct ice_boost_tcam_entry))
/* package Marker PType TCAM entry */
struct ice_marker_ptype_tcam_entry {
#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX 1024
__le16 addr;
__le16 ptype;
u8 keys[20];
};
struct ice_marker_ptype_tcam_section {
__le16 count;
__le16 reserved;
struct ice_marker_ptype_tcam_entry tcam[];
};
#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, 1) - \
sizeof(struct ice_marker_ptype_tcam_entry), \
sizeof(struct ice_marker_ptype_tcam_entry))
struct ice_xlt1_section { struct ice_xlt1_section {
__le16 count; __le16 count;
__le16 offset; __le16 offset;
@ -641,34 +437,16 @@ struct ice_prof_redir_section {
u8 redir_value[]; u8 redir_value[];
}; };
/* package buffer building */
struct ice_buf_build {
struct ice_buf buf;
u16 reserved_section_table_entries;
};
struct ice_pkg_enum {
struct ice_buf_table *buf_table;
u32 buf_idx;
u32 type;
struct ice_buf_hdr *buf;
u32 sect_idx;
void *sect;
u32 sect_type;
u32 entry_idx;
void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
};
/* Tunnel enabling */ /* Tunnel enabling */
enum ice_tunnel_type { enum ice_tunnel_type {
TNL_VXLAN = 0, TNL_VXLAN = 0,
TNL_GENEVE, TNL_GENEVE,
TNL_ECPRI, TNL_GRETAP,
TNL_GTP, TNL_GTP,
TNL_GTPC,
TNL_GTPU,
TNL_ECPRI,
TNL_LAST = 0xFF, TNL_LAST = 0xFF,
TNL_ALL = 0xFF, TNL_ALL = 0xFF,
}; };
@ -748,8 +526,8 @@ struct ice_ptg_ptype {
u8 ptg; u8 ptg;
}; };
#define ICE_MAX_TCAM_PER_PROFILE 32 #define ICE_MAX_TCAM_PER_PROFILE 64
#define ICE_MAX_PTG_PER_PROFILE 32 #define ICE_MAX_PTG_PER_PROFILE 64
struct ice_prof_map { struct ice_prof_map {
struct list_head list; struct list_head list;
@ -942,11 +720,14 @@ struct ice_chs_chg {
#define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT #define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT
enum ice_prof_type { enum ice_prof_type {
ICE_PROF_INVALID = 0x0,
ICE_PROF_NON_TUN = 0x1, ICE_PROF_NON_TUN = 0x1,
ICE_PROF_TUN_UDP = 0x2, ICE_PROF_TUN_UDP = 0x2,
ICE_PROF_TUN_GRE = 0x4, ICE_PROF_TUN_GRE = 0x4,
ICE_PROF_TUN_PPPOE = 0x8, ICE_PROF_TUN_GTPU = 0x8,
ICE_PROF_TUN_ALL = 0xE, ICE_PROF_TUN_GTPC = 0x10,
ICE_PROF_TUN_PPPOE = 0x20,
ICE_PROF_TUN_ALL = 0x3E,
ICE_PROF_ALL = 0xFF, ICE_PROF_ALL = 0xFF,
}; };

75
drivers/thirdparty/ice/ice_flg_rd.c vendored Normal file
View File

@ -0,0 +1,75 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018-2021, Intel Corporation. */
#include "ice_common.h"
#include "ice_parser_util.h"
#define ICE_FLG_RD_TABLE_SIZE 64
/**
* ice_flg_rd_dump - dump a flag redirect item info
* @hw: pointer to the hardware structure
* @item: flag redirect item to dump
*/
void ice_flg_rd_dump(struct ice_hw *hw, struct ice_flg_rd_item *item)
{
dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx);
dev_info(ice_hw_to_dev(hw), "expose = %d\n", item->expose);
dev_info(ice_hw_to_dev(hw), "intr_flg_id = %d\n", item->intr_flg_id);
}
/** The function parses a 8 bits Flag Redirect Table entry with below format:
* BIT 0: Expose (rdi->expose)
* BIT 1-6: Internal Flag ID (rdi->intr_flg_id)
* BIT 7: reserved
*/
static void _flg_rd_parse_item(struct ice_hw *hw, u16 idx, void *item,
void *data, int size)
{
struct ice_flg_rd_item *rdi = (struct ice_flg_rd_item *)item;
u8 d8 = *(u8 *)data;
rdi->idx = idx;
rdi->expose = (d8 & 0x1) != 0;
rdi->intr_flg_id = (u8)((d8 >> 1) & 0x3f);
if (hw->debug_mask & ICE_DBG_PARSER)
ice_flg_rd_dump(hw, rdi);
}
/**
* ice_flg_rd_table_get - create a flag redirect table
* @hw: pointer to the hardware structure
*/
struct ice_flg_rd_item *ice_flg_rd_table_get(struct ice_hw *hw)
{
return (struct ice_flg_rd_item *)
ice_parser_create_table(hw, ICE_SID_RXPARSER_FLAG_REDIR,
sizeof(struct ice_flg_rd_item),
ICE_FLG_RD_TABLE_SIZE,
ice_parser_sect_item_get,
_flg_rd_parse_item, false);
}
/**
* ice_flg_redirect - redirect a parser flag to packet flag
* @table: flag redirect table
* @psr_flg: parser flag to redirect
*/
u64 ice_flg_redirect(struct ice_flg_rd_item *table, u64 psr_flg)
{
u64 flg = 0;
int i;
for (i = 0; i < 64; i++) {
struct ice_flg_rd_item *item = &table[i];
if (!item->expose)
continue;
if (psr_flg & (1ul << item->intr_flg_id))
flg |= (1ul << i);
}
return flg;
}

16
drivers/thirdparty/ice/ice_flg_rd.h vendored Normal file
View File

@ -0,0 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018-2021, Intel Corporation. */
#ifndef _ICE_FLG_RD_H_
#define _ICE_FLG_RD_H_
struct ice_flg_rd_item {
u16 idx;
bool expose;
u8 intr_flg_id;
};
void ice_flg_rd_dump(struct ice_hw *hw, struct ice_flg_rd_item *item);
struct ice_flg_rd_item *ice_flg_rd_table_get(struct ice_hw *hw);
u64 ice_flg_redirect(struct ice_flg_rd_item *table, u64 psr_flg);
#endif /* _ICE_FLG_RD_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -148,6 +148,23 @@
#define ICE_FLOW_HASH_NAT_T_ESP_IPV6_SPI \ #define ICE_FLOW_HASH_NAT_T_ESP_IPV6_SPI \
(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_NAT_T_ESP_SPI) (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
#define ICE_FLOW_HASH_L2TPV2_SESS_ID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID))
#define ICE_FLOW_HASH_L2TPV2_SESS_ID_ETH \
(ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_L2TPV2_SESS_ID)
#define ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID))
#define ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID_ETH \
(ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID)
#define ICE_FLOW_FIELD_IPV4_SRC_OFFSET 12
#define ICE_FLOW_FIELD_IPV4_DST_OFFSET 16
#define ICE_FLOW_FIELD_IPV6_SRC_OFFSET 8
#define ICE_FLOW_FIELD_IPV6_DST_OFFSET 24
#define ICE_FLOW_FIELD_SRC_PORT_OFFSET 0
#define ICE_FLOW_FIELD_DST_PORT_OFFSET 2
/* Protocol header fields within a packet segment. A segment consists of one or /* Protocol header fields within a packet segment. A segment consists of one or
* more protocol headers that make up a logical group of protocol headers. Each * more protocol headers that make up a logical group of protocol headers. Each
* logical group of protocol headers encapsulates or is encapsulated using/by * logical group of protocol headers encapsulates or is encapsulated using/by
@ -227,6 +244,7 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_IPV4_DA, ICE_FLOW_FIELD_IDX_IPV4_DA,
ICE_FLOW_FIELD_IDX_IPV6_SA, ICE_FLOW_FIELD_IDX_IPV6_SA,
ICE_FLOW_FIELD_IDX_IPV6_DA, ICE_FLOW_FIELD_IDX_IPV6_DA,
ICE_FLOW_FIELD_IDX_IPV4_CHKSUM,
ICE_FLOW_FIELD_IDX_IPV4_ID, ICE_FLOW_FIELD_IDX_IPV4_ID,
ICE_FLOW_FIELD_IDX_IPV6_ID, ICE_FLOW_FIELD_IDX_IPV6_ID,
ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA, ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA,
@ -243,6 +261,9 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
ICE_FLOW_FIELD_IDX_TCP_FLAGS, ICE_FLOW_FIELD_IDX_TCP_FLAGS,
ICE_FLOW_FIELD_IDX_TCP_CHKSUM,
ICE_FLOW_FIELD_IDX_UDP_CHKSUM,
ICE_FLOW_FIELD_IDX_SCTP_CHKSUM,
/* ARP */ /* ARP */
ICE_FLOW_FIELD_IDX_ARP_SIP, ICE_FLOW_FIELD_IDX_ARP_SIP,
ICE_FLOW_FIELD_IDX_ARP_DIP, ICE_FLOW_FIELD_IDX_ARP_DIP,
@ -263,8 +284,10 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI,
/* GTPU_UP */ /* GTPU_UP */
ICE_FLOW_FIELD_IDX_GTPU_UP_TEID, ICE_FLOW_FIELD_IDX_GTPU_UP_TEID,
ICE_FLOW_FIELD_IDX_GTPU_UP_QFI,
/* GTPU_DWN */ /* GTPU_DWN */
ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID, ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID,
ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI,
/* PPPOE */ /* PPPOE */
ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID, ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID,
/* PFCP */ /* PFCP */
@ -283,11 +306,14 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID, ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID,
/* UDP_ECPRI_TP0 */ /* UDP_ECPRI_TP0 */
ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID, ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID,
/* L2TPV2 SESSION ID*/
ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID,
/* L2TPV2_LEN SESSION ID */
ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID,
/* The total number of enums must not exceed 64 */ /* The total number of enums must not exceed 64 */
ICE_FLOW_FIELD_IDX_MAX ICE_FLOW_FIELD_IDX_MAX
}; };
/* Flow headers and fields for AVF support */ /* Flow headers and fields for AVF support */
enum ice_flow_avf_hdr_field { enum ice_flow_avf_hdr_field {
/* Values 0 - 28 are reserved for future use */ /* Values 0 - 28 are reserved for future use */
@ -348,6 +374,10 @@ enum ice_rss_cfg_hdr_type {
/* take inner headers as inputset for packet with outer ipv6. */ /* take inner headers as inputset for packet with outer ipv6. */
ICE_RSS_INNER_HEADERS_W_OUTER_IPV6, ICE_RSS_INNER_HEADERS_W_OUTER_IPV6,
/* take outer headers first then inner headers as inputset */ /* take outer headers first then inner headers as inputset */
/* take inner as inputset for GTPoGRE with outer ipv4 + gre. */
ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE,
/* take inner as inputset for GTPoGRE with outer ipv6 + gre. */
ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE,
ICE_RSS_ANY_HEADERS ICE_RSS_ANY_HEADERS
}; };
@ -389,7 +419,6 @@ struct ice_flow_seg_xtrct {
u16 mask; /* Mask for field */ u16 mask; /* Mask for field */
}; };
enum ice_flow_fld_match_type { enum ice_flow_fld_match_type {
ICE_FLOW_FLD_TYPE_REG, /* Value, mask */ ICE_FLOW_FLD_TYPE_REG, /* Value, mask */
ICE_FLOW_FLD_TYPE_RANGE, /* Value, mask, last (upper bound) */ ICE_FLOW_FLD_TYPE_RANGE, /* Value, mask, last (upper bound) */
@ -452,8 +481,8 @@ struct ice_flow_entry {
u8 acts_cnt; u8 acts_cnt;
}; };
#define ICE_FLOW_ENTRY_HNDL(e) ((u64)e) #define ICE_FLOW_ENTRY_HNDL(e) ((u64)(uintptr_t)(e))
#define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(h)) #define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(uintptr_t)(h))
struct ice_flow_prof { struct ice_flow_prof {
struct list_head l_entry; struct list_head l_entry;
@ -484,11 +513,18 @@ struct ice_flow_prof {
struct ice_flow_action *acts; struct ice_flow_action *acts;
}; };
struct ice_rss_raw_cfg {
struct ice_parser_profile prof;
bool raw_ena;
bool symm;
};
struct ice_rss_cfg { struct ice_rss_cfg {
struct list_head l_entry; struct list_head l_entry;
/* bitmap of VSIs added to the RSS entry */ /* bitmap of VSIs added to the RSS entry */
DECLARE_BITMAP(vsis, ICE_MAX_VSI); DECLARE_BITMAP(vsis, ICE_MAX_VSI);
struct ice_rss_hash_cfg hash; struct ice_rss_hash_cfg hash;
struct ice_rss_raw_cfg raw;
}; };
enum ice_flow_action_type { enum ice_flow_action_type {
@ -525,26 +561,30 @@ struct ice_flow_action {
u64 u64
ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
struct ice_flow_seg_info *segs, u8 segs_cnt); struct ice_flow_seg_info *segs, u8 segs_cnt);
enum ice_status int
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_action *acts, u8 acts_cnt, struct ice_flow_action *acts, u8 acts_cnt,
struct ice_flow_prof **prof); struct ice_flow_prof **prof);
enum ice_status int
ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id); ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
enum ice_status int
ice_flow_set_hw_prof(struct ice_hw *hw, u16 dest_vsi_handle,
u16 fdir_vsi_handle, struct ice_parser_profile *prof,
enum ice_block blk);
int
ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle, ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
u16 vsig); u16 vsig);
enum ice_status int
ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id, ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
u8 *hw_prof); u8 *hw_prof);
u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id); u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id);
enum ice_status int
ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
u64 entry_id, u16 vsi, enum ice_flow_priority prio, u64 entry_id, u16 vsi, enum ice_flow_priority prio,
void *data, struct ice_flow_action *acts, u8 acts_cnt, void *data, struct ice_flow_action *acts, u8 acts_cnt,
u64 *entry_h); u64 *entry_h);
enum ice_status int
ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h); ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h);
void void
ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld, ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
@ -555,18 +595,20 @@ ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
void void
ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
u16 val_loc, u16 mask_loc); u16 val_loc, u16 mask_loc);
enum ice_status ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk, int ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk,
u16 vsi_handle, u64 prof_id); u16 vsi_handle, u64 prof_id);
void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle); void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle); int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
enum ice_status int
ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds); ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle); int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
enum ice_status int
ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg); const struct ice_rss_hash_cfg *cfg);
enum ice_status int
ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg); const struct ice_rss_hash_cfg *cfg);
void ice_rss_update_raw_symm(struct ice_hw *hw,
struct ice_rss_raw_cfg *cfg, u64 id);
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs); u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
#endif /* _ICE_FLOW_H_ */ #endif /* _ICE_FLOW_H_ */

View File

@ -54,10 +54,18 @@ ice_fltr_add_entry_to_list(struct device *dev, struct ice_fltr_info *info,
* *
* Set VSI with all associated VLANs to given promiscuous mode(s) * Set VSI with all associated VLANs to given promiscuous mode(s)
*/ */
enum ice_status int
ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, u8 promisc_mask) ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, u8 promisc_mask)
{ {
return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false); struct ice_pf *pf = (struct ice_pf *)hw->back;
int result;
result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false);
if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error setting promisc mode on VSI %i (rc=%d\n",
vsi->vsi_num, result);
return result;
} }
/** /**
@ -68,10 +76,18 @@ ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, u8 promisc
* *
* Clear VSI with all associated VLANs to given promiscuous mode(s) * Clear VSI with all associated VLANs to given promiscuous mode(s)
*/ */
enum ice_status int
ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, u8 promisc_mask) ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, u8 promisc_mask)
{ {
return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true); struct ice_pf *pf = (struct ice_pf *)hw->back;
int result;
result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true);
if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error setting promisc mode on VSI %i (rc=%d\n",
vsi->vsi_num, result);
return result;
} }
/** /**
@ -82,11 +98,19 @@ ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, u8 promi
* @vid: VLAN ID to clear VLAN promiscuous * @vid: VLAN ID to clear VLAN promiscuous
* @lport: logical port number to clear mode * @lport: logical port number to clear mode
*/ */
enum ice_status int
ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid, u8 lport) u16 vid, u8 lport)
{ {
return ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid); struct ice_pf *pf = (struct ice_pf *)hw->back;
int result;
result = ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error clearing promisc mode on VSI %i for VID %u (rc=%d)\n",
ice_get_hw_vsi_num(hw, vsi_handle), vid, result);
return result;
} }
/** /**
@ -97,11 +121,19 @@ ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
* @vid: VLAN ID to set VLAN promiscuous * @vid: VLAN ID to set VLAN promiscuous
* @lport: logical port number to set promiscuous mode * @lport: logical port number to set promiscuous mode
*/ */
enum ice_status int
ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid, u8 lport) u16 vid, u8 lport)
{ {
return ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid); struct ice_pf *pf = (struct ice_pf *)hw->back;
int result;
result = ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error setting promisc mode on VSI %i for VID %u (rc=%d)\n",
ice_get_hw_vsi_num(hw, vsi_handle), vid, result);
return result;
} }
/** /**
@ -109,8 +141,7 @@ ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
* @vsi: pointer to VSI struct * @vsi: pointer to VSI struct
* @list: list of filters * @list: list of filters
*/ */
enum ice_status int ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list)
ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list)
{ {
return ice_add_mac(&vsi->back->hw, list); return ice_add_mac(&vsi->back->hw, list);
} }
@ -120,8 +151,7 @@ ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct * @vsi: pointer to VSI struct
* @list: list of filters * @list: list of filters
*/ */
enum ice_status int ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list)
ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list)
{ {
return ice_remove_mac(&vsi->back->hw, list); return ice_remove_mac(&vsi->back->hw, list);
} }
@ -131,8 +161,7 @@ ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct * @vsi: pointer to VSI struct
* @list: list of filters * @list: list of filters
*/ */
static enum ice_status static int ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list)
ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list)
{ {
return ice_add_vlan(&vsi->back->hw, list); return ice_add_vlan(&vsi->back->hw, list);
} }
@ -142,7 +171,7 @@ ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct * @vsi: pointer to VSI struct
* @list: list of filters * @list: list of filters
*/ */
static enum ice_status static int
ice_fltr_remove_vlan_list(struct ice_vsi *vsi, struct list_head *list) ice_fltr_remove_vlan_list(struct ice_vsi *vsi, struct list_head *list)
{ {
return ice_remove_vlan(&vsi->back->hw, list); return ice_remove_vlan(&vsi->back->hw, list);
@ -153,7 +182,7 @@ ice_fltr_remove_vlan_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct * @vsi: pointer to VSI struct
* @list: list of filters * @list: list of filters
*/ */
static enum ice_status static int
ice_fltr_add_mac_vlan_list(struct ice_vsi *vsi, struct list_head *list) ice_fltr_add_mac_vlan_list(struct ice_vsi *vsi, struct list_head *list)
{ {
return ice_add_mac_vlan(&vsi->back->hw, list); return ice_add_mac_vlan(&vsi->back->hw, list);
@ -164,7 +193,7 @@ ice_fltr_add_mac_vlan_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct * @vsi: pointer to VSI struct
* @list: list of filters * @list: list of filters
*/ */
static enum ice_status static int
ice_fltr_remove_mac_vlan_list(struct ice_vsi *vsi, struct list_head *list) ice_fltr_remove_mac_vlan_list(struct ice_vsi *vsi, struct list_head *list)
{ {
return ice_remove_mac_vlan(&vsi->back->hw, list); return ice_remove_mac_vlan(&vsi->back->hw, list);
@ -175,8 +204,7 @@ ice_fltr_remove_mac_vlan_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct * @vsi: pointer to VSI struct
* @list: list of filters * @list: list of filters
*/ */
static enum ice_status static int ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list)
ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list)
{ {
return ice_add_eth_mac(&vsi->back->hw, list); return ice_add_eth_mac(&vsi->back->hw, list);
} }
@ -186,8 +214,7 @@ ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct * @vsi: pointer to VSI struct
* @list: list of filters * @list: list of filters
*/ */
static enum ice_status static int ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list)
ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list)
{ {
return ice_remove_eth_mac(&vsi->back->hw, list); return ice_remove_eth_mac(&vsi->back->hw, list);
} }
@ -321,18 +348,17 @@ ice_fltr_add_eth_to_list(struct ice_vsi *vsi, struct list_head *list,
* @action: action to be performed on filter match * @action: action to be performed on filter match
* @mac_action: pointer to add or remove MAC function * @mac_action: pointer to add or remove MAC function
*/ */
static enum ice_status static int
ice_fltr_prepare_mac(struct ice_vsi *vsi, const u8 *mac, ice_fltr_prepare_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action, enum ice_sw_fwd_act_type action,
enum ice_status (*mac_action)(struct ice_vsi *, int (*mac_action)(struct ice_vsi *, struct list_head *))
struct list_head *))
{ {
enum ice_status result;
LIST_HEAD(tmp_list); LIST_HEAD(tmp_list);
int result;
if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action)) { if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action)) {
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
return ICE_ERR_NO_MEMORY; return -ENOMEM;
} }
result = mac_action(vsi, &tmp_list); result = mac_action(vsi, &tmp_list);
@ -347,21 +373,21 @@ ice_fltr_prepare_mac(struct ice_vsi *vsi, const u8 *mac,
* @action: action to be performed on filter match * @action: action to be performed on filter match
* @mac_action: pointer to add or remove MAC function * @mac_action: pointer to add or remove MAC function
*/ */
static enum ice_status static int
ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action, enum ice_sw_fwd_act_type action,
enum ice_status(*mac_action) int(*mac_action)
(struct ice_vsi *, struct list_head *)) (struct ice_vsi *, struct list_head *))
{ {
u8 broadcast[ETH_ALEN]; u8 broadcast[ETH_ALEN];
enum ice_status result;
LIST_HEAD(tmp_list); LIST_HEAD(tmp_list);
int result;
eth_broadcast_addr(broadcast); eth_broadcast_addr(broadcast);
if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action) || if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action) ||
ice_fltr_add_mac_to_list(vsi, &tmp_list, broadcast, action)) { ice_fltr_add_mac_to_list(vsi, &tmp_list, broadcast, action)) {
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
return ICE_ERR_NO_MEMORY; return -ENOMEM;
} }
result = mac_action(vsi, &tmp_list); result = mac_action(vsi, &tmp_list);
@ -375,16 +401,15 @@ ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
* @vlan: VLAN filter details * @vlan: VLAN filter details
* @vlan_action: pointer to add or remove VLAN function * @vlan_action: pointer to add or remove VLAN function
*/ */
static enum ice_status static int
ice_fltr_prepare_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan, ice_fltr_prepare_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan,
enum ice_status (*vlan_action)(struct ice_vsi *, int (*vlan_action)(struct ice_vsi *, struct list_head *))
struct list_head *))
{ {
enum ice_status result;
LIST_HEAD(tmp_list); LIST_HEAD(tmp_list);
int result;
if (ice_fltr_add_vlan_to_list(vsi, &tmp_list, vlan)) if (ice_fltr_add_vlan_to_list(vsi, &tmp_list, vlan))
return ICE_ERR_NO_MEMORY; return -ENOMEM;
result = vlan_action(vsi, &tmp_list); result = vlan_action(vsi, &tmp_list);
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
@ -399,18 +424,18 @@ ice_fltr_prepare_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan,
* @action: action to be performed on filter match * @action: action to be performed on filter match
* @mac_vlan_action: pointer to add or remove MAC VLAN function * @mac_vlan_action: pointer to add or remove MAC VLAN function
*/ */
static enum ice_status static int
ice_fltr_prepare_mac_vlan(struct ice_vsi *vsi, const u8 *mac, u16 vlan_id, ice_fltr_prepare_mac_vlan(struct ice_vsi *vsi, const u8 *mac, u16 vlan_id,
enum ice_sw_fwd_act_type action, enum ice_sw_fwd_act_type action,
enum ice_status (mac_vlan_action)(struct ice_vsi *, int (mac_vlan_action)(struct ice_vsi *,
struct list_head *)) struct list_head *))
{ {
enum ice_status result;
LIST_HEAD(tmp_list); LIST_HEAD(tmp_list);
int result;
if (ice_fltr_add_mac_vlan_to_list(vsi, &tmp_list, mac, vlan_id, if (ice_fltr_add_mac_vlan_to_list(vsi, &tmp_list, mac, vlan_id,
action)) action))
return ICE_ERR_NO_MEMORY; return -ENOMEM;
result = mac_vlan_action(vsi, &tmp_list); result = mac_vlan_action(vsi, &tmp_list);
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
@ -425,17 +450,16 @@ ice_fltr_prepare_mac_vlan(struct ice_vsi *vsi, const u8 *mac, u16 vlan_id,
* @action: action to be performed on filter match * @action: action to be performed on filter match
* @eth_action: pointer to add or remove ethertype function * @eth_action: pointer to add or remove ethertype function
*/ */
static enum ice_status static int
ice_fltr_prepare_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, ice_fltr_prepare_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action, enum ice_sw_fwd_act_type action,
enum ice_status (*eth_action)(struct ice_vsi *, int (*eth_action)(struct ice_vsi *, struct list_head *))
struct list_head *))
{ {
enum ice_status result;
LIST_HEAD(tmp_list); LIST_HEAD(tmp_list);
int result;
if (ice_fltr_add_eth_to_list(vsi, &tmp_list, ethertype, flag, action)) if (ice_fltr_add_eth_to_list(vsi, &tmp_list, ethertype, flag, action))
return ICE_ERR_NO_MEMORY; return -ENOMEM;
result = eth_action(vsi, &tmp_list); result = eth_action(vsi, &tmp_list);
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
@ -448,8 +472,9 @@ ice_fltr_prepare_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
* @mac: MAC to add * @mac: MAC to add
* @action: action to be performed on filter match * @action: action to be performed on filter match
*/ */
enum ice_status ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac, int
enum ice_sw_fwd_act_type action) ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action)
{ {
return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_add_mac_list); return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_add_mac_list);
} }
@ -460,7 +485,7 @@ enum ice_status ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
* @mac: MAC to add * @mac: MAC to add
* @action: action to be performed on filter match * @action: action to be performed on filter match
*/ */
enum ice_status int
ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action) enum ice_sw_fwd_act_type action)
{ {
@ -474,8 +499,9 @@ ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
* @mac: filter MAC to remove * @mac: filter MAC to remove
* @action: action to remove * @action: action to remove
*/ */
enum ice_status ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac, int
enum ice_sw_fwd_act_type action) ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action)
{ {
return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_remove_mac_list); return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_remove_mac_list);
} }
@ -485,7 +511,7 @@ enum ice_status ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
* @vsi: pointer to VSI struct * @vsi: pointer to VSI struct
* @vlan: VLAN filter details * @vlan: VLAN filter details
*/ */
enum ice_status ice_fltr_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan) int ice_fltr_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
{ {
return ice_fltr_prepare_vlan(vsi, vlan, ice_fltr_add_vlan_list); return ice_fltr_prepare_vlan(vsi, vlan, ice_fltr_add_vlan_list);
} }
@ -495,7 +521,7 @@ enum ice_status ice_fltr_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
* @vsi: pointer to VSI struct * @vsi: pointer to VSI struct
* @vlan: VLAN filter details * @vlan: VLAN filter details
*/ */
enum ice_status ice_fltr_remove_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan) int ice_fltr_remove_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
{ {
return ice_fltr_prepare_vlan(vsi, vlan, ice_fltr_remove_vlan_list); return ice_fltr_prepare_vlan(vsi, vlan, ice_fltr_remove_vlan_list);
} }
@ -507,7 +533,7 @@ enum ice_status ice_fltr_remove_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
* @vlan_id: VLAN ID to add * @vlan_id: VLAN ID to add
* @action: action to be performed on filter match * @action: action to be performed on filter match
*/ */
enum ice_status int
ice_fltr_add_mac_vlan(struct ice_vsi *vsi, const u8 *mac, u16 vlan_id, ice_fltr_add_mac_vlan(struct ice_vsi *vsi, const u8 *mac, u16 vlan_id,
enum ice_sw_fwd_act_type action) enum ice_sw_fwd_act_type action)
{ {
@ -522,7 +548,7 @@ ice_fltr_add_mac_vlan(struct ice_vsi *vsi, const u8 *mac, u16 vlan_id,
* @vlan_id: filter MAC VLAN to remove * @vlan_id: filter MAC VLAN to remove
* @action: action to remove * @action: action to remove
*/ */
enum ice_status int
ice_fltr_remove_mac_vlan(struct ice_vsi *vsi, const u8 *mac, u16 vlan_id, ice_fltr_remove_mac_vlan(struct ice_vsi *vsi, const u8 *mac, u16 vlan_id,
enum ice_sw_fwd_act_type action) enum ice_sw_fwd_act_type action)
{ {
@ -537,8 +563,9 @@ ice_fltr_remove_mac_vlan(struct ice_vsi *vsi, const u8 *mac, u16 vlan_id,
* @flag: direction of packet to be filtered, Tx or Rx * @flag: direction of packet to be filtered, Tx or Rx
* @action: action to be performed on filter match * @action: action to be performed on filter match
*/ */
enum ice_status ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, int
enum ice_sw_fwd_act_type action) ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action)
{ {
return ice_fltr_prepare_eth(vsi, ethertype, flag, action, return ice_fltr_prepare_eth(vsi, ethertype, flag, action,
ice_fltr_add_eth_list); ice_fltr_add_eth_list);
@ -551,217 +578,11 @@ enum ice_status ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
* @flag: direction of filter * @flag: direction of filter
* @action: action to remove * @action: action to remove
*/ */
enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, int
u16 flag, enum ice_sw_fwd_act_type action) ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action)
{ {
return ice_fltr_prepare_eth(vsi, ethertype, flag, action, return ice_fltr_prepare_eth(vsi, ethertype, flag, action,
ice_fltr_remove_eth_list); ice_fltr_remove_eth_list);
} }
/**
* ice_fltr_update_rule_flags - update lan_en/lb_en flags
* @hw: pointer to hw
* @rule_id: id of rule being updated
* @recipe_id: recipe id of rule
* @act: current action field
* @type: Rx or Tx
* @src: source VSI
* @new_flags: combinations of lb_en and lan_en
*/
static enum ice_status
ice_fltr_update_rule_flags(struct ice_hw *hw, u16 rule_id, u16 recipe_id,
u32 act, u16 type, u16 src, u32 new_flags)
{
struct ice_aqc_sw_rules_elem *s_rule;
enum ice_status err;
u32 flags_mask;
s_rule = kzalloc(ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL);
if (!s_rule)
return ICE_ERR_NO_MEMORY;
flags_mask = ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
act &= ~flags_mask;
act |= (flags_mask & new_flags);
s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(recipe_id);
s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(rule_id);
s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
if (type & ICE_FLTR_RX) {
s_rule->pdata.lkup_tx_rx.src =
cpu_to_le16(hw->port_info->lport);
s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
} else {
s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(src);
s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
}
err = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
ice_aqc_opc_update_sw_rules, NULL);
kfree(s_rule);
return err;
}
/**
* ice_fltr_build_action - build action for rule
* @vsi_id: id of VSI which is use to build action
*/
static u32
ice_fltr_build_action(u16 vsi_id)
{
return ((vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M) |
ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
}
/**
* ice_fltr_find_adv_entry - find advanced rule
* @rules: list of rules
* @rule_id: id of wanted rule
*/
static struct ice_adv_fltr_mgmt_list_entry *
ice_fltr_find_adv_entry(struct list_head *rules, u16 rule_id)
{
struct ice_adv_fltr_mgmt_list_entry *entry;
list_for_each_entry(entry, rules, list_entry) {
if (entry->rule_info.fltr_rule_id == rule_id)
return entry;
}
return NULL;
}
/**
* ice_fltr_update_adv_rule_flags - update flags on advanced rule
* @vsi: pointer to VSI
* @recipe_id: id of recipe
* @entry: advanced rule entry
* @new_flags: flags to update
*/
static enum ice_status
ice_fltr_update_adv_rule_flags(struct ice_vsi *vsi, u16 recipe_id,
struct ice_adv_fltr_mgmt_list_entry *entry,
u32 new_flags)
{
struct ice_adv_rule_info *info = &entry->rule_info;
struct ice_sw_act_ctrl *act = &info->sw_act;
u32 action;
if (act->fltr_act != ICE_FWD_TO_VSI)
return ICE_ERR_NOT_SUPPORTED;
action = ice_fltr_build_action(act->fwd_id.hw_vsi_id);
return ice_fltr_update_rule_flags(&vsi->back->hw, info->fltr_rule_id,
recipe_id, action, info->sw_act.flag,
act->src, new_flags);
}
/**
* ice_fltr_find_regular_entry - find regular rule
* @rules: list of rules
* @rule_id: id of wanted rule
*/
static struct ice_fltr_mgmt_list_entry *
ice_fltr_find_regular_entry(struct list_head *rules, u16 rule_id)
{
struct ice_fltr_mgmt_list_entry *entry;
list_for_each_entry(entry, rules, list_entry) {
if (entry->fltr_info.fltr_rule_id == rule_id)
return entry;
}
return NULL;
}
/**
* ice_fltr_update_regular_rule - update flags on regular rule
* @vsi: pointer to VSI
* @recipe_id: id of recipe
* @entry: regular rule entry
* @new_flags: flags to update
*/
static enum ice_status
ice_fltr_update_regular_rule(struct ice_vsi *vsi, u16 recipe_id,
struct ice_fltr_mgmt_list_entry *entry,
u32 new_flags)
{
struct ice_fltr_info *info = &entry->fltr_info;
u32 action;
if (info->fltr_act != ICE_FWD_TO_VSI)
return ICE_ERR_NOT_SUPPORTED;
action = ice_fltr_build_action(info->fwd_id.hw_vsi_id);
return ice_fltr_update_rule_flags(&vsi->back->hw, info->fltr_rule_id,
recipe_id, action, info->flag,
info->src, new_flags);
}
/**
* ice_fltr_update_flags - update flags on rule
* @vsi: pointer to VSI
* @rule_id: id of rule
* @recipe_id: id of recipe
* @new_flags: flags to update
*
* Function updates flags on regular and advance rule.
*
* Flags should be a combination of ICE_SINGLE_ACT_LB_ENABLE and
* ICE_SINGLE_ACT_LAN_ENABLE.
*/
enum ice_status
ice_fltr_update_flags(struct ice_vsi *vsi, u16 rule_id, u16 recipe_id,
u32 new_flags)
{
struct ice_adv_fltr_mgmt_list_entry *adv_entry;
struct ice_fltr_mgmt_list_entry *regular_entry;
struct ice_hw *hw = &vsi->back->hw;
struct ice_sw_recipe *recp_list;
struct list_head *fltr_rules;
recp_list = &hw->switch_info->recp_list[recipe_id];
if (!recp_list)
return ICE_ERR_DOES_NOT_EXIST;
fltr_rules = &recp_list->filt_rules;
regular_entry = ice_fltr_find_regular_entry(fltr_rules, rule_id);
if (regular_entry)
return ice_fltr_update_regular_rule(vsi, recipe_id,
regular_entry, new_flags);
adv_entry = ice_fltr_find_adv_entry(fltr_rules, rule_id);
if (adv_entry)
return ice_fltr_update_adv_rule_flags(vsi, recipe_id,
adv_entry, new_flags);
return ICE_ERR_DOES_NOT_EXIST;
}
/**
* ice_fltr_update_flags_dflt_rule - update flags on default rule
* @vsi: pointer to VSI
* @rule_id: id of rule
* @direction: Tx or Rx
* @new_flags: flags to update
*
* Function updates flags on default rule with ICE_SW_LKUP_DFLT.
*
* Flags should be a combination of ICE_SINGLE_ACT_LB_ENABLE and
* ICE_SINGLE_ACT_LAN_ENABLE.
*/
enum ice_status
ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction,
u32 new_flags)
{
u32 action = ice_fltr_build_action(vsi->vsi_num);
struct ice_hw *hw = &vsi->back->hw;
return ice_fltr_update_rule_flags(hw, rule_id, ICE_SW_LKUP_DFLT, action,
direction, vsi->vsi_num, new_flags);
}

View File

@ -7,54 +7,47 @@
#include "ice_vlan.h" #include "ice_vlan.h"
void ice_fltr_free_list(struct device *dev, struct list_head *h); void ice_fltr_free_list(struct device *dev, struct list_head *h);
enum ice_status int
ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, u8 promisc_mask); ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
enum ice_status u8 promisc_mask);
ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, u8 promisc_mask); int
enum ice_status ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
u8 promisc_mask);
int
ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid, u8 lport); u16 vid, u8 lport);
enum ice_status int
ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid, u8 lport); u16 vid, u8 lport);
int int
ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list, ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list,
const u8 *mac, enum ice_sw_fwd_act_type action); const u8 *mac, enum ice_sw_fwd_act_type action);
enum ice_status int
ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac, ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action); enum ice_sw_fwd_act_type action);
enum ice_status int
ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action); enum ice_sw_fwd_act_type action);
enum ice_status int ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list);
ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list); int
enum ice_status
ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac, ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action); enum ice_sw_fwd_act_type action);
enum ice_status int ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list);
ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list);
enum ice_status ice_fltr_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); int ice_fltr_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
enum ice_status ice_fltr_remove_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); int ice_fltr_remove_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
enum ice_status int
ice_fltr_add_mac_vlan(struct ice_vsi *vsi, const u8 *mac, u16 vlan_id, ice_fltr_add_mac_vlan(struct ice_vsi *vsi, const u8 *mac, u16 vlan_id,
enum ice_sw_fwd_act_type action); enum ice_sw_fwd_act_type action);
enum ice_status int
ice_fltr_remove_mac_vlan(struct ice_vsi *vsi, const u8 *mac, u16 vlan_id, ice_fltr_remove_mac_vlan(struct ice_vsi *vsi, const u8 *mac, u16 vlan_id,
enum ice_sw_fwd_act_type action); enum ice_sw_fwd_act_type action);
enum ice_status int
ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action); enum ice_sw_fwd_act_type action);
enum ice_status int
ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action); enum ice_sw_fwd_act_type action);
void ice_fltr_remove_all(struct ice_vsi *vsi); void ice_fltr_remove_all(struct ice_vsi *vsi);
enum ice_status
ice_fltr_update_flags(struct ice_vsi *vsi, u16 rule_id, u16 recipe_id,
u32 new_flags);
enum ice_status
ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction,
u32 new_flags);
#endif #endif

View File

@ -21,6 +21,18 @@ struct ice_fwu_priv {
/* Track which NVM banks to activate at the end of the update */ /* Track which NVM banks to activate at the end of the update */
u8 activate_flags; u8 activate_flags;
/* Track the firmware response of the required reset to complete the
* flash update.
*
* 0 - ICE_AQC_NVM_POR_FLAG - A full power on is required
* 1 - ICE_AQC_NVM_PERST_FLAG - A cold PCIe reset is required
* 2 - ICE_AQC_NVM_EMPR_FLAG - An EMP reset is required
*/
u8 reset_level;
/* Track if EMP reset is available */
u8 emp_reset_available;
}; };
/** /**
@ -45,8 +57,8 @@ ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length)
struct device *dev = context->dev; struct device *dev = context->dev;
struct ice_pf *pf = priv->pf; struct ice_pf *pf = priv->pf;
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
enum ice_status status;
u8 *package_data; u8 *package_data;
int status;
dev_dbg(dev, "Sending PLDM record package data to firmware\n"); dev_dbg(dev, "Sending PLDM record package data to firmware\n");
@ -59,9 +71,8 @@ ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length)
kfree(package_data); kfree(package_data);
if (status) { if (status) {
dev_err(dev, "Failed to send record package data to firmware, err %s aq_err %s\n", dev_err(dev, "Failed to send record package data to firmware, err %d aq_err %s\n",
ice_stat_str(status), status, ice_aq_str(hw->adminq.sq_last_status));
ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to record package data to firmware"); NL_SET_ERR_MSG_MOD(extack, "Failed to record package data to firmware");
return -EIO; return -EIO;
} }
@ -209,8 +220,8 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
struct device *dev = context->dev; struct device *dev = context->dev;
struct ice_pf *pf = priv->pf; struct ice_pf *pf = priv->pf;
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
enum ice_status status;
size_t length; size_t length;
int status;
switch (component->identifier) { switch (component->identifier) {
case NVM_COMP_ID_OROM: case NVM_COMP_ID_OROM:
@ -246,9 +257,8 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
kfree(comp_tbl); kfree(comp_tbl);
if (status) { if (status) {
dev_err(dev, "Failed to transfer component table to firmware, err %s aq_err %s\n", dev_err(dev, "Failed to transfer component table to firmware, err %d aq_err %s\n",
ice_stat_str(status), status, ice_aq_str(hw->adminq.sq_last_status));
ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to transfer component table to firmware"); NL_SET_ERR_MSG_MOD(extack, "Failed to transfer component table to firmware");
return -EIO; return -EIO;
} }
@ -265,6 +275,7 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
* @block_size: size of the block to write, up to 4k * @block_size: size of the block to write, up to 4k
* @block: pointer to block of data to write * @block: pointer to block of data to write
* @last_cmd: whether this is the last command * @last_cmd: whether this is the last command
* @reset_level: storage for reset level required
* @extack: netlink extended ACK structure * @extack: netlink extended ACK structure
* *
* Write a block of data to a flash module, and await for the completion * Write a block of data to a flash module, and await for the completion
@ -272,18 +283,23 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
* *
* Note this function assumes the caller has acquired the NVM resource. * Note this function assumes the caller has acquired the NVM resource.
* *
* On successful return, reset level indicates the device reset required to
* complete the update.
*
* 0 - ICE_AQC_NVM_POR_FLAG - A full power on is required
* 1 - ICE_AQC_NVM_PERST_FLAG - A cold PCIe reset is required
* 2 - ICE_AQC_NVM_EMPR_FLAG - An EMP reset is required
*
* Returns: zero on success, or a negative error code on failure. * Returns: zero on success, or a negative error code on failure.
*/ */
static int int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, u16 block_size, u8 *block, bool last_cmd,
u16 block_size, u8 *block, bool last_cmd, u8 *reset_level, struct netlink_ext_ack *extack)
struct netlink_ext_ack *extack)
{ {
u16 completion_module, completion_retval; u16 completion_module, completion_retval;
struct device *dev = ice_pf_to_dev(pf); struct device *dev = ice_pf_to_dev(pf);
struct ice_rq_event_info event; struct ice_rq_event_info event;
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
enum ice_status status;
u32 completion_offset; u32 completion_offset;
int err; int err;
@ -292,11 +308,11 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
dev_dbg(dev, "Writing block of %u bytes for module 0x%02x at offset %u\n", dev_dbg(dev, "Writing block of %u bytes for module 0x%02x at offset %u\n",
block_size, module, offset); block_size, module, offset);
status = ice_aq_update_nvm(hw, module, offset, block_size, block, err = ice_aq_update_nvm(hw, module, offset, block_size, block,
last_cmd, 0, NULL); last_cmd, 0, NULL);
if (status) { if (err) {
dev_err(dev, "Failed to flash module 0x%02x with block of size %u at offset %u, err %s aq_err %s\n", dev_err(dev, "Failed to flash module 0x%02x with block of size %u at offset %u, err %d aq_err %s\n",
module, block_size, offset, ice_stat_str(status), module, block_size, offset, err,
ice_aq_str(hw->adminq.sq_last_status)); ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to program flash module"); NL_SET_ERR_MSG_MOD(extack, "Failed to program flash module");
return -EIO; return -EIO;
@ -310,8 +326,9 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
*/ */
err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write, 15*HZ, &event); err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write, 15*HZ, &event);
if (err) { if (err) {
dev_err(dev, "Timed out while trying to flash module 0x%02x with block of size %u at offset %u, err %d\n", ice_dev_err_errno(dev, err,
module, block_size, offset, err); "Timed out while trying to flash module 0x%02x with block of size %u at offset %u",
module, block_size, offset);
NL_SET_ERR_MSG_MOD(extack, "Timed out waiting for firmware"); NL_SET_ERR_MSG_MOD(extack, "Timed out waiting for firmware");
return -EIO; return -EIO;
} }
@ -337,13 +354,31 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
} }
if (completion_retval) { if (completion_retval) {
dev_err(dev, "Firmware ailed to flash module 0x%02x with block of size %u at offset %u, err %s\n", dev_err(dev, "Firmware failed to flash module 0x%02x with block of size %u at offset %u, err %s\n",
module, block_size, offset, module, block_size, offset,
ice_aq_str((enum ice_aq_err)completion_retval)); ice_aq_str((enum ice_aq_err)completion_retval));
NL_SET_ERR_MSG_MOD(extack, "Firmware failed to program flash module"); NL_SET_ERR_MSG_MOD(extack, "Firmware failed to program flash module");
return -EIO; return -EIO;
} }
/* For the last command to write the NVM bank, newer versions of
* firmware indicate the required level of reset to complete
* activation of firmware. If the firmware supports this, cache the
* response for indicating to the user later. Otherwise, assume that
* a full power cycle is required.
*/
if (reset_level && last_cmd && module == ICE_SR_1ST_NVM_BANK_PTR) {
if (hw->dev_caps.common_cap.pcie_reset_avoidance) {
*reset_level = (event.desc.params.nvm.cmd_flags &
ICE_AQC_NVM_RESET_LVL_M);
dev_dbg(dev, "Firmware reported required reset level as %u\n",
*reset_level);
} else {
*reset_level = ICE_AQC_NVM_POR_FLAG;
dev_dbg(dev, "Firmware doesn't support indicating required reset level. Assuming a power cycle is required\n");
}
}
return 0; return 0;
} }
@ -354,6 +389,7 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
* @component: the name of the component being updated * @component: the name of the component being updated
* @image: buffer of image data to write to the NVM * @image: buffer of image data to write to the NVM
* @length: length of the buffer * @length: length of the buffer
* @reset_level: storage for reset level required
* @extack: netlink extended ACK structure * @extack: netlink extended ACK structure
* *
* Loop over the data for a given NVM module and program it in 4 Kb * Loop over the data for a given NVM module and program it in 4 Kb
@ -366,7 +402,7 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
*/ */
static int static int
ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component, ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component,
const u8 *image, u32 length, const u8 *image, u32 length, u8 *reset_level,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct device *dev = ice_pf_to_dev(pf); struct device *dev = ice_pf_to_dev(pf);
@ -400,7 +436,8 @@ ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component,
memcpy(block, image + offset, block_size); memcpy(block, image + offset, block_size);
err = ice_write_one_nvm_block(pf, module, offset, block_size, err = ice_write_one_nvm_block(pf, module, offset, block_size,
block, last_cmd, extack); block, last_cmd, reset_level,
extack);
if (err) if (err)
break; break;
@ -423,6 +460,11 @@ ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component,
return err; return err;
} }
/* Length in seconds to wait before timing out when erasing a flash module.
* Yes, erasing really can take minutes to complete.
*/
#define ICE_FW_ERASE_TIMEOUT 300
/** /**
* ice_erase_nvm_module - Erase an NVM module and await firmware completion * ice_erase_nvm_module - Erase an NVM module and await firmware completion
* @pf: the PF data structure * @pf: the PF data structure
@ -446,7 +488,6 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
struct ice_rq_event_info event; struct ice_rq_event_info event;
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
struct devlink *devlink; struct devlink *devlink;
enum ice_status status;
int err; int err;
dev_dbg(dev, "Beginning erase of flash component '%s', module 0x%02x\n", component, module); dev_dbg(dev, "Beginning erase of flash component '%s', module 0x%02x\n", component, module);
@ -455,20 +496,19 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
devlink = priv_to_devlink(pf); devlink = priv_to_devlink(pf);
devlink_flash_update_status_notify(devlink, "Erasing", component, 0, 0); devlink_flash_update_timeout_notify(devlink, "Erasing", component, ICE_FW_ERASE_TIMEOUT);
status = ice_aq_erase_nvm(hw, module, NULL); err = ice_aq_erase_nvm(hw, module, NULL);
if (status) { if (err) {
dev_err(dev, "Failed to erase %s (module 0x%02x), err %s aq_err %s\n", dev_err(dev, "Failed to erase %s (module 0x%02x), err %d aq_err %s\n",
component, module, ice_stat_str(status), component, module, err,
ice_aq_str(hw->adminq.sq_last_status)); ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to erase flash module"); NL_SET_ERR_MSG_MOD(extack, "Failed to erase flash module");
err = -EIO; err = -EIO;
goto out_notify_devlink; goto out_notify_devlink;
} }
/* Yes, this really can take minutes to complete */ err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_erase, ICE_FW_ERASE_TIMEOUT * HZ, &event);
err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_erase, 300 * HZ, &event);
if (err) { if (err) {
dev_err(dev, "Timed out waiting for firmware to respond with erase completion for %s (module 0x%02x), err %d\n", dev_err(dev, "Timed out waiting for firmware to respond with erase completion for %s (module 0x%02x), err %d\n",
component, module, err); component, module, err);
@ -513,6 +553,7 @@ out_notify_devlink:
* ice_switch_flash_banks - Tell firmware to switch NVM banks * ice_switch_flash_banks - Tell firmware to switch NVM banks
* @pf: Pointer to the PF data structure * @pf: Pointer to the PF data structure
* @activate_flags: flags used for the activation command * @activate_flags: flags used for the activation command
* @emp_reset_available: on return, indicates if EMP reset is available
* @extack: netlink extended ACK structure * @extack: netlink extended ACK structure
* *
* Notify firmware to activate the newly written flash banks, and wait for the * Notify firmware to activate the newly written flash banks, and wait for the
@ -520,27 +561,44 @@ out_notify_devlink:
* *
* Returns: zero on success or an error code on failure. * Returns: zero on success or an error code on failure.
*/ */
static int ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags, static int
struct netlink_ext_ack *extack) ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
u8 *emp_reset_available, struct netlink_ext_ack *extack)
{ {
struct device *dev = ice_pf_to_dev(pf); struct device *dev = ice_pf_to_dev(pf);
struct ice_rq_event_info event; struct ice_rq_event_info event;
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
enum ice_status status;
u16 completion_retval; u16 completion_retval;
u8 response_flags;
int err; int err;
memset(&event, 0, sizeof(event)); memset(&event, 0, sizeof(event));
status = ice_nvm_write_activate(hw, activate_flags); err = ice_nvm_write_activate(hw, activate_flags, &response_flags);
if (status) { if (err) {
dev_err(dev, "Failed to switch active flash banks, err %s aq_err %s\n", dev_err(dev, "Failed to switch active flash banks, err %d aq_err %s\n",
ice_stat_str(status), err, ice_aq_str(hw->adminq.sq_last_status));
ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to switch active flash banks"); NL_SET_ERR_MSG_MOD(extack, "Failed to switch active flash banks");
return -EIO; return -EIO;
} }
/* Newer versions of firmware have support to indicate whether an EMP
* reset to reload firmware is available. For older firmware, EMP
* reset is always available.
*/
if (emp_reset_available) {
if (hw->dev_caps.common_cap.reset_restrict_support) {
*emp_reset_available =
response_flags & ICE_AQC_NVM_EMPR_ENA;
dev_dbg(dev, "Firmware indicated that EMP reset is %s\n",
*emp_reset_available ?
"available" : "not available");
} else {
*emp_reset_available = ICE_AQC_NVM_EMPR_ENA;
dev_dbg(dev, "Firmware does not support restricting EMP reset availability\n");
}
}
/* In most cases, we expect firmware to respond with a completion /* In most cases, we expect firmware to respond with a completion
* within a few milliseconds. However, it has been observed in * within a few milliseconds. However, it has been observed in
* practice that firmware may sometimes take longer. The wait time * practice that firmware may sometimes take longer. The wait time
@ -550,8 +608,8 @@ static int ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write_activate, 30*HZ, err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write_activate, 30*HZ,
&event); &event);
if (err) { if (err) {
dev_err(dev, "Timed out waiting for firmware to switch active flash banks, err %d\n", ice_dev_err_errno(dev, err,
err); "Timed out waiting for firmware to switch active flash banks");
NL_SET_ERR_MSG_MOD(extack, "Timed out waiting for firmware"); NL_SET_ERR_MSG_MOD(extack, "Timed out waiting for firmware");
return err; return err;
} }
@ -587,6 +645,7 @@ ice_flash_component(struct pldmfw *context, struct pldmfw_component *component)
struct netlink_ext_ack *extack = priv->extack; struct netlink_ext_ack *extack = priv->extack;
struct ice_pf *pf = priv->pf; struct ice_pf *pf = priv->pf;
const char *name; const char *name;
u8 *reset_level;
u16 module; u16 module;
u8 flag; u8 flag;
int err; int err;
@ -595,16 +654,19 @@ ice_flash_component(struct pldmfw *context, struct pldmfw_component *component)
case NVM_COMP_ID_OROM: case NVM_COMP_ID_OROM:
module = ICE_SR_1ST_OROM_BANK_PTR; module = ICE_SR_1ST_OROM_BANK_PTR;
flag = ICE_AQC_NVM_ACTIV_SEL_OROM; flag = ICE_AQC_NVM_ACTIV_SEL_OROM;
reset_level = NULL;
name = "fw.undi"; name = "fw.undi";
break; break;
case NVM_COMP_ID_NVM: case NVM_COMP_ID_NVM:
module = ICE_SR_1ST_NVM_BANK_PTR; module = ICE_SR_1ST_NVM_BANK_PTR;
flag = ICE_AQC_NVM_ACTIV_SEL_NVM; flag = ICE_AQC_NVM_ACTIV_SEL_NVM;
reset_level = &priv->reset_level;
name = "fw.mgmt"; name = "fw.mgmt";
break; break;
case NVM_COMP_ID_NETLIST: case NVM_COMP_ID_NETLIST:
module = ICE_SR_NETLIST_BANK_PTR; module = ICE_SR_NETLIST_BANK_PTR;
flag = ICE_AQC_NVM_ACTIV_SEL_NETLIST; flag = ICE_AQC_NVM_ACTIV_SEL_NETLIST;
reset_level = NULL;
name = "fw.netlist"; name = "fw.netlist";
break; break;
default: default:
@ -624,7 +686,8 @@ ice_flash_component(struct pldmfw *context, struct pldmfw_component *component)
return err; return err;
return ice_write_nvm_module(pf, module, name, component->component_data, return ice_write_nvm_module(pf, module, name, component->component_data,
component->component_size, extack); component->component_size, reset_level,
extack);
} }
/** /**
@ -642,33 +705,130 @@ static int ice_finalize_update(struct pldmfw *context)
struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context); struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context);
struct netlink_ext_ack *extack = priv->extack; struct netlink_ext_ack *extack = priv->extack;
struct ice_pf *pf = priv->pf; struct ice_pf *pf = priv->pf;
struct devlink *devlink;
int err; int err;
/* Finally, notify firmware to activate the written NVM banks */ /* Finally, notify firmware to activate the written NVM banks */
err = ice_switch_flash_banks(pf, priv->activate_flags, extack); err = ice_switch_flash_banks(pf, priv->activate_flags,
&priv->emp_reset_available, extack);
if (err) if (err)
return err; return err;
/* Perform an immediate reset only if PRESERVE_ALL is selected */ devlink = priv_to_devlink(pf);
if ((priv->activate_flags & ICE_AQC_NVM_PRESERVATION_M) == ICE_AQC_NVM_PRESERVE_ALL) {
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
enum ice_status status;
status = ice_aq_nvm_update_empr(hw); /* If the required reset is EMPR, but EMPR is disabled, report that
if (status) { * a reboot is required instead.
dev_err(dev, "Failed to trigger immediate device reset, err %s aq_err %s\n", */
ice_stat_str(status), if (priv->reset_level == ICE_AQC_NVM_EMPR_FLAG &&
ice_aq_str(hw->adminq.sq_last_status)); !priv->emp_reset_available) {
NL_SET_ERR_MSG_MOD(extack, "Failed to trigger immediate device reset"); dev_dbg(ice_pf_to_dev(pf), "Firmware indicated EMP reset as sufficient, but EMP reset is disabled\n");
return -EIO; priv->reset_level = ICE_AQC_NVM_PERST_FLAG;
}
} }
switch (priv->reset_level) {
case ICE_AQC_NVM_EMPR_FLAG:
devlink_flash_update_status_notify(devlink,
"Activate new firmware by devlink reload",
NULL, 0, 0);
break;
case ICE_AQC_NVM_PERST_FLAG:
devlink_flash_update_status_notify(devlink,
"Activate new firmware by rebooting the system",
NULL, 0, 0);
break;
case ICE_AQC_NVM_POR_FLAG:
default:
devlink_flash_update_status_notify(devlink,
"Activate new firmware by power cycling the system",
NULL, 0, 0);
break;
}
pf->fw_emp_reset_disabled = !priv->emp_reset_available;
return 0; return 0;
} }
static const struct pldmfw_ops ice_fwu_ops = { /* these are u32 so that we can store PCI_ANY_ID */
struct ice_pldm_pci_record_id {
int vendor;
int device;
int subsystem_vendor;
int subsystem_device;
};
/**
* ice_op_pci_match_record - Check if a PCI device matches the record
* @context: PLDM fw update structure
* @record: list of records extracted from the PLDM image
*
* Determine if the PCI device associated with this device matches the record
* data provided.
*
* Searches the descriptor TLVs and extracts the relevant descriptor data into
* a pldm_pci_record_id. This is then compared against the PCI device ID
* information.
*
* Returns: true if the device matches the record, false otherwise.
*/
static bool ice_op_pci_match_record(struct pldmfw *context,
struct pldmfw_record *record)
{
struct pci_dev *pdev = to_pci_dev(context->dev);
struct ice_pldm_pci_record_id id = {
.vendor = PCI_ANY_ID,
.device = PCI_ANY_ID,
.subsystem_vendor = PCI_ANY_ID,
.subsystem_device = PCI_ANY_ID,
};
struct pldmfw_desc_tlv *desc;
list_for_each_entry(desc, &record->descs, entry) {
u16 value;
int *ptr;
switch (desc->type) {
case PLDM_DESC_ID_PCI_VENDOR_ID:
ptr = &id.vendor;
break;
case PLDM_DESC_ID_PCI_DEVICE_ID:
ptr = &id.device;
break;
case PLDM_DESC_ID_PCI_SUBVENDOR_ID:
ptr = &id.subsystem_vendor;
break;
case PLDM_DESC_ID_PCI_SUBDEV_ID:
ptr = &id.subsystem_device;
break;
default:
/* Skip unrelated TLVs */
continue;
}
value = get_unaligned_le16(desc->data);
/* A value of zero for one of the descriptors is sometimes
* used when the record should ignore this field when matching
* device. For example if the record applies to any subsystem
* device or vendor.
*/
if (value)
*ptr = (int)value;
else
*ptr = PCI_ANY_ID;
}
/* the E822 device can have a generic device ID so check for that */
if ((id.vendor == PCI_ANY_ID || id.vendor == pdev->vendor) &&
(id.device == PCI_ANY_ID || id.device == pdev->device ||
id.device == ICE_DEV_ID_E822_SI_DFLT) &&
(id.subsystem_vendor == PCI_ANY_ID || id.subsystem_vendor == pdev->subsystem_vendor) &&
(id.subsystem_device == PCI_ANY_ID || id.subsystem_device == pdev->subsystem_device))
return true;
else
return false;
}
static const struct pldmfw_ops ice_fwu_ops_e810 = {
.match_record = &pldmfw_op_pci_match_record, .match_record = &pldmfw_op_pci_match_record,
.send_package_data = &ice_send_package_data, .send_package_data = &ice_send_package_data,
.send_component_table = &ice_send_component_table, .send_component_table = &ice_send_component_table,
@ -676,97 +836,31 @@ static const struct pldmfw_ops ice_fwu_ops = {
.finalize_update = &ice_finalize_update, .finalize_update = &ice_finalize_update,
}; };
/** static const struct pldmfw_ops ice_fwu_ops_e822 = {
* ice_flash_pldm_image - Write a PLDM-formatted firmware image to the device .match_record = &ice_op_pci_match_record,
* @pf: private device driver structure .send_package_data = &ice_send_package_data,
* @fw: firmware object pointing to the relevant firmware file .send_component_table = &ice_send_component_table,
* @preservation: preservation level to request from firmware .flash_component = &ice_flash_component,
* @extack: netlink extended ACK structure .finalize_update = &ice_finalize_update,
* };
* Parse the data for a given firmware file, verifying that it is a valid PLDM
* formatted image that matches this device.
*
* Extract the device record Package Data and Component Tables and send them
* to the firmware. Extract and write the flash data for each of the three
* main flash components, "fw.mgmt", "fw.undi", and "fw.netlist". Notify
* firmware once the data is written to the inactive banks.
*
* Returns: zero on success or a negative error code on failure.
*/
int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
u8 preservation, struct netlink_ext_ack *extack)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
struct ice_fwu_priv priv;
enum ice_status status;
int err;
switch (preservation) {
case ICE_AQC_NVM_PRESERVE_ALL:
case ICE_AQC_NVM_PRESERVE_SELECTED:
case ICE_AQC_NVM_NO_PRESERVATION:
case ICE_AQC_NVM_FACTORY_DEFAULT:
break;
default:
WARN(1, "Unexpected preservation level request %u", preservation);
return -EINVAL;
}
memset(&priv, 0, sizeof(priv));
priv.context.ops = &ice_fwu_ops;
priv.context.dev = dev;
priv.extack = extack;
priv.pf = pf;
priv.activate_flags = preservation;
status = ice_acquire_nvm(hw, ICE_RES_WRITE);
if (status) {
dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n",
ice_stat_str(status),
ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
return -EIO;
}
err = pldmfw_flash_image(&priv.context, fw);
if (err == -ENOENT) {
dev_err(dev, "Firmware image has no record matching this device\n");
NL_SET_ERR_MSG_MOD(extack, "Firmware image has no record matching this device");
} else if (err) {
/* Do not set a generic extended ACK message here. A more
* specific message may already have been set by one of our
* ops.
*/
dev_err(dev, "Failed to flash PLDM image, err %d", err);
}
ice_release_nvm(hw);
return err;
}
/** /**
* ice_check_for_pending_update - Check for a pending flash update * ice_get_pending_updates - Check if the component has a pending update
* @pf: the PF driver structure * @pf: the PF driver structure
* @component: if not NULL, the name of the component being updated * @pending: on return, bitmap of updates pending
* @extack: Netlink extended ACK structure * @extack: Netlink extended ACK
* *
* Check whether the device already has a pending flash update. If such an * Check if the device has any pending updates on any flash components.
* update is found, cancel it so that the requested update may proceed.
* *
* Returns: zero on success, or a negative error code on failure. * Returns: zero on success, or a negative error code on failure. Updates
* pending with the bitmap of pending updates.
*/ */
int ice_check_for_pending_update(struct ice_pf *pf, const char *component, int ice_get_pending_updates(struct ice_pf *pf, u8 *pending,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct devlink *devlink = priv_to_devlink(pf);
struct device *dev = ice_pf_to_dev(pf); struct device *dev = ice_pf_to_dev(pf);
struct ice_hw_dev_caps *dev_caps; struct ice_hw_dev_caps *dev_caps;
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
enum ice_status status;
u8 pending = 0;
int err; int err;
dev_caps = kzalloc(sizeof(*dev_caps), GFP_KERNEL); dev_caps = kzalloc(sizeof(*dev_caps), GFP_KERNEL);
@ -778,30 +872,59 @@ int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
* may have changed, e.g. if an update was previously completed and * may have changed, e.g. if an update was previously completed and
* the system has not yet rebooted. * the system has not yet rebooted.
*/ */
status = ice_discover_dev_caps(hw, dev_caps); err = ice_discover_dev_caps(hw, dev_caps);
if (status) { if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to read device capabilities"); NL_SET_ERR_MSG_MOD(extack, "Unable to read device capabilities");
kfree(dev_caps); kfree(dev_caps);
return -EIO; return err;
} }
*pending = 0;
if (dev_caps->common_cap.nvm_update_pending_nvm) { if (dev_caps->common_cap.nvm_update_pending_nvm) {
dev_info(dev, "The fw.mgmt flash component has a pending update\n"); dev_info(dev, "The fw.mgmt flash component has a pending update\n");
pending |= ICE_AQC_NVM_ACTIV_SEL_NVM; *pending |= ICE_AQC_NVM_ACTIV_SEL_NVM;
} }
if (dev_caps->common_cap.nvm_update_pending_orom) { if (dev_caps->common_cap.nvm_update_pending_orom) {
dev_info(dev, "The fw.undi flash component has a pending update\n"); dev_info(dev, "The fw.undi flash component has a pending update\n");
pending |= ICE_AQC_NVM_ACTIV_SEL_OROM; *pending |= ICE_AQC_NVM_ACTIV_SEL_OROM;
} }
if (dev_caps->common_cap.nvm_update_pending_netlist) { if (dev_caps->common_cap.nvm_update_pending_netlist) {
dev_info(dev, "The fw.netlist flash component has a pending update\n"); dev_info(dev, "The fw.netlist flash component has a pending update\n");
pending |= ICE_AQC_NVM_ACTIV_SEL_NETLIST; *pending |= ICE_AQC_NVM_ACTIV_SEL_NETLIST;
} }
kfree(dev_caps); kfree(dev_caps);
return 0;
}
/**
* ice_cancel_pending_update - Cancel any pending update for a component
* @pf: the PF driver structure
* @component: if not NULL, the name of the component being updated
* @extack: Netlink extended ACK structure
*
* Cancel any pending update for the specified component. If component is
* NULL, all device updates will be canceled.
*
* Returns: zero on success, or a negative error code on failure.
*/
static int
ice_cancel_pending_update(struct ice_pf *pf, const char *component,
struct netlink_ext_ack *extack)
{
struct devlink *devlink = priv_to_devlink(pf);
struct ice_hw *hw = &pf->hw;
u8 pending;
int err;
err = ice_get_pending_updates(pf, &pending, extack);
if (err)
return err;
/* If the flash_update request is for a specific component, ignore all /* If the flash_update request is for a specific component, ignore all
* of the other components. * of the other components.
*/ */
@ -827,17 +950,141 @@ int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
"Canceling previous pending update", "Canceling previous pending update",
component, 0, 0); component, 0, 0);
status = ice_acquire_nvm(hw, ICE_RES_WRITE); err = ice_acquire_nvm(hw, ICE_RES_WRITE);
if (status) { if (err) {
dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n", dev_err(ice_pf_to_dev(pf), "Failed to acquire device flash lock, err %d aq_err %s\n",
ice_stat_str(status), err, ice_aq_str(hw->adminq.sq_last_status));
ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock"); NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
return -EIO; return err;
} }
pending |= ICE_AQC_NVM_REVERT_LAST_ACTIV; pending |= ICE_AQC_NVM_REVERT_LAST_ACTIV;
err = ice_switch_flash_banks(pf, pending, extack); err = ice_switch_flash_banks(pf, pending, NULL, extack);
ice_release_nvm(hw);
/* Since we've canceled the pending update, we no longer know if EMP
* reset is restricted.
*/
pf->fw_emp_reset_disabled = false;
return err;
}
/**
* ice_flash_pldm_image - Write a PLDM-formatted firmware image to the device
* @devlink: pointer to devlink associated with the device to update
* @params: devlink flash update parameters
* @extack: netlink extended ACK structure
*
* Parse the data for a given firmware file, verifying that it is a valid PLDM
* formatted image that matches this device.
*
* Extract the device record Package Data and Component Tables and send them
* to the firmware. Extract and write the flash data for each of the three
* main flash components, "fw.mgmt", "fw.undi", and "fw.netlist". Notify
* firmware once the data is written to the inactive banks.
*
* Returns: zero on success or a negative error code on failure.
*/
int ice_flash_pldm_image(struct devlink *devlink,
struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
#ifndef HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW
const struct firmware *fw;
#endif
struct ice_fwu_priv priv;
u8 preservation;
int err;
if (ice_get_fw_mode(hw) == ICE_FW_MODE_REC) {
/* The devlink flash update process does not currently support
* updating when in recovery mode.
*/
NL_SET_ERR_MSG_MOD(extack, "Device firmware is in recovery mode. Unable to perform flash update.");
return -EOPNOTSUPP;
}
switch (params->overwrite_mask) {
case 0:
/* preserve all settings and identifiers */
preservation = ICE_AQC_NVM_PRESERVE_ALL;
break;
case DEVLINK_FLASH_OVERWRITE_SETTINGS:
/* overwrite settings, but preserve vital information such as
* device identifiers.
*/
preservation = ICE_AQC_NVM_PRESERVE_SELECTED;
break;
case (DEVLINK_FLASH_OVERWRITE_SETTINGS |
DEVLINK_FLASH_OVERWRITE_IDENTIFIERS):
/* overwrite both settings and identifiers, preserve nothing */
preservation = ICE_AQC_NVM_NO_PRESERVATION;
break;
default:
NL_SET_ERR_MSG_MOD(extack, "Requested overwrite mask is not supported");
return -EOPNOTSUPP;
}
if (!hw->dev_caps.common_cap.nvm_unified_update) {
NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update");
return -EOPNOTSUPP;
}
memset(&priv, 0, sizeof(priv));
/* the E822 device needs a slightly different ops */
if (hw->mac_type == ICE_MAC_GENERIC)
priv.context.ops = &ice_fwu_ops_e822;
else
priv.context.ops = &ice_fwu_ops_e810;
priv.context.dev = dev;
priv.extack = extack;
priv.pf = pf;
priv.activate_flags = preservation;
devlink_flash_update_status_notify(devlink, "Preparing to flash", NULL, 0, 0);
err = ice_cancel_pending_update(pf, NULL, extack);
if (err)
return err;
err = ice_acquire_nvm(hw, ICE_RES_WRITE);
if (err) {
dev_err(dev, "Failed to acquire device flash lock, err %d aq_err %s\n",
err, ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
return err;
}
#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW
err = pldmfw_flash_image(&priv.context, params->fw);
#else
err = request_firmware(&fw, params->file_name, dev);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to read file from disk");
ice_release_nvm(hw);
return err;
}
err = pldmfw_flash_image(&priv.context, fw);
release_firmware(fw);
#endif
if (err == -ENOENT) {
dev_err(dev, "Firmware image has no record matching this device\n");
NL_SET_ERR_MSG_MOD(extack, "Firmware image has no record matching this device");
} else if (err) {
/* Do not set a generic extended ACK message here. A more
* specific message may already have been set by one of our
* ops.
*/
dev_err(dev, "Failed to flash PLDM image, err %d", err);
}
ice_release_nvm(hw); ice_release_nvm(hw);

View File

@ -4,9 +4,13 @@
#ifndef _ICE_FW_UPDATE_H_ #ifndef _ICE_FW_UPDATE_H_
#define _ICE_FW_UPDATE_H_ #define _ICE_FW_UPDATE_H_
int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw, int ice_flash_pldm_image(struct devlink *devlink,
u8 preservation, struct netlink_ext_ack *extack); struct devlink_flash_update_params *params,
int ice_check_for_pending_update(struct ice_pf *pf, const char *component, struct netlink_ext_ack *extack);
struct netlink_ext_ack *extack); int ice_get_pending_updates(struct ice_pf *pf, u8 *pending,
struct netlink_ext_ack *extack);
int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
u16 block_size, u8 *block, bool last_cmd,
u8 *reset_level, struct netlink_ext_ack *extack);
#endif #endif

View File

@ -15,7 +15,7 @@ static void cache_cfg(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
} }
/** /**
* valid_module_entries - validate all the module entry IDs and log levels * valid_module_entries - validate all the module entry IDs and log levels
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
* @entries: entries to validate * @entries: entries to validate
* @num_entries: number of entries to validate * @num_entries: number of entries to validate
@ -92,11 +92,11 @@ static bool valid_cfg(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
* ice_init_hw(). Firmware logging will be configured based on these settings * ice_init_hw(). Firmware logging will be configured based on these settings
* and also the PF will be registered on init. * and also the PF will be registered on init.
*/ */
enum ice_status int
ice_fwlog_init(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) ice_fwlog_init(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
{ {
if (!valid_cfg(hw, cfg)) if (!valid_cfg(hw, cfg))
return ICE_ERR_PARAM; return -EINVAL;
cache_cfg(hw, cfg); cache_cfg(hw, cfg);
@ -111,20 +111,20 @@ ice_fwlog_init(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
* @options: options from ice_fwlog_cfg->options structure * @options: options from ice_fwlog_cfg->options structure
* @log_resolution: logging resolution * @log_resolution: logging resolution
*/ */
static enum ice_status static int
ice_aq_fwlog_set(struct ice_hw *hw, struct ice_fwlog_module_entry *entries, ice_aq_fwlog_set(struct ice_hw *hw, struct ice_fwlog_module_entry *entries,
u16 num_entries, u16 options, u16 log_resolution) u16 num_entries, u16 options, u16 log_resolution)
{ {
struct ice_aqc_fw_log_cfg_resp *fw_modules; struct ice_aqc_fw_log_cfg_resp *fw_modules;
struct ice_aqc_fw_log *cmd; struct ice_aqc_fw_log *cmd;
struct ice_aq_desc desc; struct ice_aq_desc desc;
enum ice_status status; int status;
u16 i; u16 i;
fw_modules = devm_kcalloc(ice_hw_to_dev(hw), num_entries, fw_modules = devm_kcalloc(ice_hw_to_dev(hw), num_entries,
sizeof(*fw_modules), GFP_KERNEL); sizeof(*fw_modules), GFP_KERNEL);
if (!fw_modules) if (!fw_modules)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
for (i = 0; i < num_entries; i++) { for (i = 0; i < num_entries; i++) {
fw_modules[i].module_identifier = fw_modules[i].module_identifier =
@ -180,16 +180,16 @@ bool ice_fwlog_supported(struct ice_hw *hw)
* ice_fwlog_register. Note, that ice_fwlog_register does not need to be called * ice_fwlog_register. Note, that ice_fwlog_register does not need to be called
* for init. * for init.
*/ */
enum ice_status int
ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
{ {
enum ice_status status; int status;
if (!ice_fwlog_supported(hw)) if (!ice_fwlog_supported(hw))
return ICE_ERR_NOT_SUPPORTED; return -EOPNOTSUPP;
if (!valid_cfg(hw, cfg)) if (!valid_cfg(hw, cfg))
return ICE_ERR_PARAM; return -EINVAL;
status = ice_aq_fwlog_set(hw, cfg->module_entries, status = ice_aq_fwlog_set(hw, cfg->module_entries,
ICE_AQC_FW_LOG_ID_MAX, cfg->options, ICE_AQC_FW_LOG_ID_MAX, cfg->options,
@ -240,23 +240,23 @@ update_cached_entries(struct ice_hw *hw, struct ice_fwlog_module_entry *entries,
* Only the entries passed in will be affected. All other firmware logging * Only the entries passed in will be affected. All other firmware logging
* settings will be unaffected. * settings will be unaffected.
*/ */
enum ice_status int
ice_fwlog_update_modules(struct ice_hw *hw, ice_fwlog_update_modules(struct ice_hw *hw,
struct ice_fwlog_module_entry *entries, struct ice_fwlog_module_entry *entries,
u16 num_entries) u16 num_entries)
{ {
struct ice_fwlog_cfg *cfg; struct ice_fwlog_cfg *cfg;
enum ice_status status; int status;
if (!ice_fwlog_supported(hw)) if (!ice_fwlog_supported(hw))
return ICE_ERR_NOT_SUPPORTED; return -EOPNOTSUPP;
if (!valid_module_entries(hw, entries, num_entries)) if (!valid_module_entries(hw, entries, num_entries))
return ICE_ERR_PARAM; return -EINVAL;
cfg = devm_kcalloc(ice_hw_to_dev(hw), 1, sizeof(*cfg), GFP_KERNEL); cfg = devm_kcalloc(ice_hw_to_dev(hw), 1, sizeof(*cfg), GFP_KERNEL);
if (!cfg) if (!cfg)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
status = ice_fwlog_get(hw, cfg); status = ice_fwlog_get(hw, cfg);
if (status) if (status)
@ -273,11 +273,11 @@ status_out:
} }
/** /**
*ice_aq_fwlog_register - Register PF for firmware logging events (0xFF31) * ice_aq_fwlog_register - Register PF for firmware logging events (0xFF31)
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
* @reg: true to register and false to unregister * @reg: true to register and false to unregister
*/ */
static enum ice_status ice_aq_fwlog_register(struct ice_hw *hw, bool reg) static int ice_aq_fwlog_register(struct ice_hw *hw, bool reg)
{ {
struct ice_aq_desc desc; struct ice_aq_desc desc;
@ -296,12 +296,12 @@ static enum ice_status ice_aq_fwlog_register(struct ice_hw *hw, bool reg)
* After this call the PF will start to receive firmware logging based on the * After this call the PF will start to receive firmware logging based on the
* configuration set in ice_fwlog_set. * configuration set in ice_fwlog_set.
*/ */
enum ice_status ice_fwlog_register(struct ice_hw *hw) int ice_fwlog_register(struct ice_hw *hw)
{ {
enum ice_status status; int status;
if (!ice_fwlog_supported(hw)) if (!ice_fwlog_supported(hw))
return ICE_ERR_NOT_SUPPORTED; return -EOPNOTSUPP;
status = ice_aq_fwlog_register(hw, true); status = ice_aq_fwlog_register(hw, true);
if (status) if (status)
@ -316,12 +316,12 @@ enum ice_status ice_fwlog_register(struct ice_hw *hw)
* ice_fwlog_unregister - Unregister the PF from firmware logging * ice_fwlog_unregister - Unregister the PF from firmware logging
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
*/ */
enum ice_status ice_fwlog_unregister(struct ice_hw *hw) int ice_fwlog_unregister(struct ice_hw *hw)
{ {
enum ice_status status; int status;
if (!ice_fwlog_supported(hw)) if (!ice_fwlog_supported(hw))
return ICE_ERR_NOT_SUPPORTED; return -EOPNOTSUPP;
status = ice_aq_fwlog_register(hw, false); status = ice_aq_fwlog_register(hw, false);
if (status) if (status)
@ -337,14 +337,14 @@ enum ice_status ice_fwlog_unregister(struct ice_hw *hw)
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
* @cfg: firmware logging configuration to populate * @cfg: firmware logging configuration to populate
*/ */
static enum ice_status static int
ice_aq_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) ice_aq_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
{ {
struct ice_aqc_fw_log_cfg_resp *fw_modules; struct ice_aqc_fw_log_cfg_resp *fw_modules;
struct ice_aqc_fw_log *cmd; struct ice_aqc_fw_log *cmd;
struct ice_aq_desc desc; struct ice_aq_desc desc;
enum ice_status status;
u16 i, module_id_cnt; u16 i, module_id_cnt;
int status;
void *buf; void *buf;
memset(cfg, 0, sizeof(*cfg)); memset(cfg, 0, sizeof(*cfg));
@ -352,7 +352,7 @@ ice_aq_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
buf = devm_kcalloc(ice_hw_to_dev(hw), 1, ICE_AQ_MAX_BUF_LEN, buf = devm_kcalloc(ice_hw_to_dev(hw), 1, ICE_AQ_MAX_BUF_LEN,
GFP_KERNEL); GFP_KERNEL);
if (!buf) if (!buf)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_query); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_query);
cmd = &desc.params.fw_log; cmd = &desc.params.fw_log;
@ -411,7 +411,7 @@ status_out:
void ice_fwlog_set_support_ena(struct ice_hw *hw) void ice_fwlog_set_support_ena(struct ice_hw *hw)
{ {
struct ice_fwlog_cfg *cfg; struct ice_fwlog_cfg *cfg;
enum ice_status status; int status;
hw->fwlog_support_ena = false; hw->fwlog_support_ena = false;
@ -438,16 +438,16 @@ void ice_fwlog_set_support_ena(struct ice_hw *hw)
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
* @cfg: config to populate based on current firmware logging settings * @cfg: config to populate based on current firmware logging settings
*/ */
enum ice_status int
ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
{ {
enum ice_status status; int status;
if (!ice_fwlog_supported(hw)) if (!ice_fwlog_supported(hw))
return ICE_ERR_NOT_SUPPORTED; return -EOPNOTSUPP;
if (!cfg) if (!cfg)
return ICE_ERR_PARAM; return -EINVAL;
status = ice_aq_fwlog_get(hw, cfg); status = ice_aq_fwlog_get(hw, cfg);
if (status) if (status)

View File

@ -43,20 +43,20 @@ struct ice_fwlog_cfg {
/* options used to configure firmware logging */ /* options used to configure firmware logging */
u16 options; u16 options;
/* minimum number of log events sent per Admin Receive Queue event */ /* minimum number of log events sent per Admin Receive Queue event */
u8 log_resolution; u16 log_resolution;
}; };
void ice_fwlog_set_support_ena(struct ice_hw *hw); void ice_fwlog_set_support_ena(struct ice_hw *hw);
bool ice_fwlog_supported(struct ice_hw *hw); bool ice_fwlog_supported(struct ice_hw *hw);
enum ice_status ice_fwlog_init(struct ice_hw *hw, struct ice_fwlog_cfg *cfg); int ice_fwlog_init(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
enum ice_status ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg); int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
enum ice_status ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg); int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
enum ice_status int
ice_fwlog_update_modules(struct ice_hw *hw, ice_fwlog_update_modules(struct ice_hw *hw,
struct ice_fwlog_module_entry *entries, struct ice_fwlog_module_entry *entries,
u16 num_entries); u16 num_entries);
enum ice_status ice_fwlog_register(struct ice_hw *hw); int ice_fwlog_register(struct ice_hw *hw);
enum ice_status ice_fwlog_unregister(struct ice_hw *hw); int ice_fwlog_unregister(struct ice_hw *hw);
void void
ice_fwlog_event_dump(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf); ice_fwlog_event_dump(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
#endif /* _ICE_FWLOG_H_ */ #endif /* _ICE_FWLOG_H_ */

575
drivers/thirdparty/ice/ice_gnss.c vendored Normal file
View File

@ -0,0 +1,575 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018-2021, Intel Corporation. */
#include "ice.h"
#include "ice_lib.h"
/**
* ice_gnss_do_write - Write data to internal GNSS
* @pf: board private structure
* @buf: command buffer
* @size: command buffer size
*
* Write UBX command data to the GNSS receiver
*/
static unsigned int
ice_gnss_do_write(struct ice_pf *pf, unsigned char *buf, unsigned int size)
{
struct ice_aqc_link_topo_addr link_topo;
struct ice_hw *hw = &pf->hw;
unsigned int offset = 0;
int err;
memset(&link_topo, 0, sizeof(struct ice_aqc_link_topo_addr));
link_topo.topo_params.index = ICE_E810T_GNSS_I2C_BUS;
link_topo.topo_params.node_type_ctx |=
ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE <<
ICE_AQC_LINK_TOPO_NODE_CTX_S;
/* It's not possible to write a single byte to u-blox.
* Write all bytes in a loop until there are 6 or less bytes left. If
* there are exactly 6 bytes left, the last write would be only a byte.
* In this case, do 4+2 bytes writes instead of 5+1. Otherwise, do the
* last 2 to 5 bytes write.
*/
while (size - offset > ICE_GNSS_UBX_WRITE_BYTES + 1) {
err = ice_aq_write_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
cpu_to_le16(buf[offset]),
ICE_MAX_I2C_WRITE_BYTES,
&buf[offset + 1], NULL);
if (err)
goto exit;
offset += ICE_GNSS_UBX_WRITE_BYTES;
}
/* Single byte would be written. Write 4 bytes instead of 5. */
if (size - offset == ICE_GNSS_UBX_WRITE_BYTES + 1) {
err = ice_aq_write_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
cpu_to_le16(buf[offset]),
ICE_MAX_I2C_WRITE_BYTES - 1,
&buf[offset + 1], NULL);
if (err)
goto exit;
offset += ICE_GNSS_UBX_WRITE_BYTES - 1;
}
/* Do the last write, 2 to 5 bytes. */
err = ice_aq_write_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
cpu_to_le16(buf[offset]), size - offset - 1,
&buf[offset + 1], NULL);
if (!err)
offset = size;
exit:
if (err)
dev_err(ice_pf_to_dev(pf), "GNSS failed to write, offset=%u, size=%u, status=%d\n",
offset, size, err);
return offset;
}
/**
* ice_gnss_write_pending - Write all pending data to internal GNSS
* @work: GNSS write work structure
*/
static void ice_gnss_write_pending(struct kthread_work *work)
{
struct gnss_serial *gnss = container_of(work, struct gnss_serial,
write_work);
struct ice_pf *pf = gnss->back;
if (!list_empty(&gnss->queue)) {
struct gnss_write_buf *write_buf = NULL;
unsigned int bytes;
write_buf = list_first_entry(&gnss->queue,
struct gnss_write_buf, queue);
bytes = ice_gnss_do_write(pf, write_buf->buf, write_buf->size);
dev_dbg(ice_pf_to_dev(pf), "%u bytes written to GNSS\n", bytes);
list_del(&write_buf->queue);
kfree(write_buf->buf);
kfree(write_buf);
}
}
/**
* ice_gnss_read - Read data from internal GNSS module
* @work: GNSS read work structure
*
* Read the data from internal GNSS receiver, number of bytes read will be
* returned in *read_data parameter.
*/
static void ice_gnss_read(struct kthread_work *work)
{
struct gnss_serial *gnss = container_of(work, struct gnss_serial,
read_work.work);
struct ice_aqc_link_topo_addr link_topo;
unsigned int i, bytes_read, data_len;
struct tty_port *port;
struct ice_pf *pf;
struct ice_hw *hw;
__be16 data_len_b;
char *buf = NULL;
u8 i2c_params;
int err = 0;
pf = gnss->back;
if (!pf || !gnss->tty || !gnss->tty->port)
return;
hw = &pf->hw;
port = gnss->tty->port;
buf = (char *)get_zeroed_page(GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto exit;
}
memset(&link_topo, 0, sizeof(struct ice_aqc_link_topo_addr));
link_topo.topo_params.index = ICE_E810T_GNSS_I2C_BUS;
link_topo.topo_params.node_type_ctx |=
ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE <<
ICE_AQC_LINK_TOPO_NODE_CTX_S;
i2c_params = ICE_GNSS_UBX_DATA_LEN_WIDTH |
ICE_AQC_I2C_USE_REPEATED_START;
/* Read data length in a loop, when it's not 0 the data is ready */
for (i = 0; i < ICE_MAX_UBX_READ_TRIES; i++) {
err = ice_aq_read_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
cpu_to_le16(ICE_GNSS_UBX_DATA_LEN_H),
i2c_params, (u8 *)&data_len_b, NULL);
if (err)
goto exit_buf;
data_len = be16_to_cpu(data_len_b);
if (data_len != 0 && data_len != U16_MAX)
break;
msleep(20);
}
data_len = min_t(typeof(data_len), data_len, PAGE_SIZE);
data_len = tty_buffer_request_room(port, data_len);
if (!data_len) {
err = -ENOMEM;
goto exit_buf;
}
/* Read received data */
for (i = 0; i < data_len; i += bytes_read) {
unsigned int bytes_left = data_len - i;
bytes_read = min_t(typeof(bytes_left), bytes_left,
ICE_MAX_I2C_DATA_SIZE);
err = ice_aq_read_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
cpu_to_le16(ICE_GNSS_UBX_EMPTY_DATA),
bytes_read, &buf[i], NULL);
if (err)
goto exit_buf;
}
/* Send the data to the tty layer for users to read. This doesn't
* actually push the data through unless tty->low_latency is set.
*/
tty_insert_flip_string(port, buf, i);
tty_flip_buffer_push(port);
exit_buf:
free_page((unsigned long)buf);
kthread_queue_delayed_work(gnss->kworker, &gnss->read_work,
ICE_GNSS_TIMER_DELAY_TIME);
exit:
if (err)
dev_dbg(ice_pf_to_dev(pf), "GNSS failed to read err=%d\n", err);
}
/**
* ice_gnss_struct_init - Initialize GNSS structure for the TTY
* @pf: Board private structure
* @index: TTY device index
*/
static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf, int index)
{
struct device *dev = ice_pf_to_dev(pf);
struct kthread_worker *kworker;
struct gnss_serial *gnss;
gnss = kzalloc(sizeof(*gnss), GFP_KERNEL);
if (!gnss)
return NULL;
mutex_init(&gnss->gnss_mutex);
gnss->open_count = 0;
gnss->back = pf;
pf->gnss_serial = gnss;
kthread_init_delayed_work(&gnss->read_work, ice_gnss_read);
INIT_LIST_HEAD(&gnss->queue);
kthread_init_work(&gnss->write_work, ice_gnss_write_pending);
/* Allocate a kworker for handling work required for the GNSS TTY
* writes.
*/
kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev));
if (IS_ERR(kworker)) {
kfree(gnss);
return NULL;
}
gnss->kworker = kworker;
return gnss;
}
/**
* ice_gnss_tty_open - Initialize GNSS structures on TTY device open
* @tty: pointer to the tty_struct
* @filp: pointer to the file
*
* This routine is mandatory. If this routine is not filled in, the attempted
* open will fail with ENODEV.
*/
static int ice_gnss_tty_open(struct tty_struct *tty, struct file *filp)
{
struct gnss_serial *gnss;
struct ice_pf *pf;
pf = (struct ice_pf *)tty->driver->driver_state;
if (!pf)
return -EFAULT;
/* Clear the pointer in case something fails */
tty->driver_data = NULL;
/* Get the serial object associated with this tty pointer */
gnss = pf->gnss_serial;
if (!gnss) {
/* Initialize GNSS struct on the first device open */
gnss = ice_gnss_struct_init(pf, tty->index);
if (!gnss)
return -ENOMEM;
}
mutex_lock(&gnss->gnss_mutex);
/* Save our structure within the tty structure */
tty->driver_data = gnss;
gnss->tty = tty;
gnss->open_count++;
kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, 0);
mutex_unlock(&gnss->gnss_mutex);
return 0;
}
/**
* ice_gnss_tty_close - Cleanup GNSS structures on tty device close
* @tty: pointer to the tty_struct
* @filp: pointer to the file
*/
static void ice_gnss_tty_close(struct tty_struct *tty, struct file *filp)
{
struct gnss_serial *gnss = tty->driver_data;
struct ice_pf *pf;
if (!gnss)
return;
pf = (struct ice_pf *)tty->driver->driver_state;
if (!pf)
return;
mutex_lock(&gnss->gnss_mutex);
if (!gnss->open_count) {
/* Port was never opened */
dev_err(ice_pf_to_dev(pf), "GNSS port not opened\n");
goto exit;
}
gnss->open_count--;
if (gnss->open_count <= 0) {
/* Port is in shutdown state */
kthread_cancel_delayed_work_sync(&gnss->read_work);
}
exit:
mutex_unlock(&gnss->gnss_mutex);
}
/**
* ice_gnss_tty_write - Write GNSS data
* @tty: pointer to the tty_struct
* @buf: pointer to the user data
* @count: the number of characters that was able to be sent to the hardware (or
* queued to be sent at a later time)
*
* The write function call is called by the user when there is data to be sent
* to the hardware. First the tty core receives the call, and then it passes the
* data on to the tty drivers write function. The tty core also tells the tty
* driver the size of the data being sent.
* If any errors happen during the write call, a negative error value should be
* returned instead of the number of characters that were written.
*/
static int
ice_gnss_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
struct gnss_write_buf *write_buf;
struct gnss_serial *gnss;
unsigned char *cmd_buf;
struct ice_pf *pf;
int err = count;
/* We cannot write a single byte using our I2C implementation. */
if (count <= 1 || count > ICE_GNSS_TTY_WRITE_BUF)
return -EINVAL;
gnss = tty->driver_data;
if (!gnss)
return -EFAULT;
pf = (struct ice_pf *)tty->driver->driver_state;
if (!pf)
return -EFAULT;
mutex_lock(&gnss->gnss_mutex);
if (!gnss->open_count) {
err = -EINVAL;
goto exit;
}
cmd_buf = kcalloc(count, sizeof(*buf), GFP_KERNEL);
if (!cmd_buf) {
err = -ENOMEM;
goto exit;
}
memcpy(cmd_buf, buf, count);
/* Send the data out to a hardware port */
write_buf = kzalloc(sizeof(*write_buf), GFP_KERNEL);
if (!write_buf) {
err = -ENOMEM;
goto exit;
}
write_buf->buf = cmd_buf;
write_buf->size = count;
INIT_LIST_HEAD(&write_buf->queue);
list_add_tail(&write_buf->queue, &gnss->queue);
kthread_queue_work(gnss->kworker, &gnss->write_work);
exit:
mutex_unlock(&gnss->gnss_mutex);
return err;
}
/**
* ice_gnss_tty_write_room - Returns the numbers of characters to be written.
* @tty: pointer to the tty_struct
*
* This routine returns the numbers of characters the tty driver will accept
* for queuing to be written. This number is subject to change as output buffers
* get emptied, or if the output flow control is acted.
*/
#ifdef HAVE_TTY_WRITE_ROOM_UINT
static unsigned int ice_gnss_tty_write_room(struct tty_struct *tty)
#else
static int ice_gnss_tty_write_room(struct tty_struct *tty)
#endif /* !HAVE_TTY_WRITE_ROOM_UINT */
{
struct gnss_serial *gnss = tty->driver_data;
if (!gnss)
#ifndef HAVE_TTY_WRITE_ROOM_UINT
return 0;
#else
return -EFAULT;
#endif /* !HAVE_TTY_WRITE_ROOM_UINT */
mutex_lock(&gnss->gnss_mutex);
if (!gnss->open_count) {
mutex_unlock(&gnss->gnss_mutex);
#ifndef HAVE_TTY_WRITE_ROOM_UINT
return 0;
#else
return -EFAULT;
#endif /* !HAVE_TTY_WRITE_ROOM_UINT */
}
mutex_unlock(&gnss->gnss_mutex);
return ICE_GNSS_TTY_WRITE_BUF;
}
/**
* ice_gnss_tty_set_termios - mock for set_termios tty operations
* @tty: pointer to the tty_struct
* @new_termios: pointer to the new termios parameters
*/
static void
ice_gnss_tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
{
/**
* Some 3rd party tools (ex. ubxtool) want to change the TTY parameters.
* In our virtual interface (I2C communication over FW AQ) we don't have
* to change anything, but we need to implement it to unblock tools.
*/
}
static const struct tty_operations tty_gps_ops = {
.open = ice_gnss_tty_open,
.close = ice_gnss_tty_close,
.write = ice_gnss_tty_write,
.write_room = ice_gnss_tty_write_room,
.set_termios = ice_gnss_tty_set_termios,
};
/**
* ice_gnss_create_tty_driver - Create a TTY driver for GNSS
* @pf: Board private structure
*/
static struct tty_driver *ice_gnss_create_tty_driver(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
const int ICE_TTYDRV_NAME_MAX = 12;
struct tty_driver *tty_driver;
char *ttydrv_name;
int err;
tty_driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW);
if (IS_ERR(tty_driver)) {
dev_err(dev, "Failed to allocate memory for GNSS TTY\n");
return NULL;
}
ttydrv_name = kzalloc(ICE_TTYDRV_NAME_MAX, GFP_KERNEL);
if (!ttydrv_name) {
tty_driver_kref_put(tty_driver);
return NULL;
}
snprintf(ttydrv_name, ICE_TTYDRV_NAME_MAX, "ttyGNSS_%02x%02x",
(u8)pf->pdev->bus->number, (u8)PCI_SLOT(pf->pdev->devfn));
/* Initialize the tty driver*/
tty_driver->owner = THIS_MODULE;
tty_driver->driver_name = dev_driver_string(dev);
tty_driver->name = (const char *)ttydrv_name;
tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
tty_driver->subtype = SERIAL_TYPE_NORMAL;
tty_driver->init_termios = tty_std_termios;
tty_driver->init_termios.c_iflag &= ~INLCR;
tty_driver->init_termios.c_iflag |= IGNCR;
tty_driver->init_termios.c_oflag &= ~OPOST;
tty_driver->init_termios.c_lflag &= ~ICANON;
tty_driver->init_termios.c_cflag &= ~(CSIZE | CBAUD | CBAUDEX);
/* baud rate 9600 */
tty_termios_encode_baud_rate(&tty_driver->init_termios, 9600, 9600);
tty_driver->driver_state = pf;
tty_set_operations(tty_driver, &tty_gps_ops);
pf->gnss_tty_port =
kzalloc(sizeof(*pf->gnss_tty_port), GFP_KERNEL);
pf->gnss_serial = NULL;
tty_port_init(pf->gnss_tty_port);
tty_port_link_device(pf->gnss_tty_port, tty_driver, 0);
err = tty_register_driver(tty_driver);
if (err) {
dev_err(dev, "Failed to register TTY driver err=%d\n", err);
tty_port_destroy(pf->gnss_tty_port);
kfree(pf->gnss_tty_port);
kfree(ttydrv_name);
tty_driver_kref_put(tty_driver);
return NULL;
}
dev_info(dev, "%s registered\n", ttydrv_name);
return tty_driver;
}
/**
* ice_gnss_init - Initialize GNSS TTY support
* @pf: Board private structure
*/
void ice_gnss_init(struct ice_pf *pf)
{
struct tty_driver *tty_driver;
tty_driver = ice_gnss_create_tty_driver(pf);
if (!tty_driver)
return;
pf->ice_gnss_tty_driver = tty_driver;
set_bit(ICE_FLAG_GNSS, pf->flags);
dev_info(ice_pf_to_dev(pf), "GNSS TTY init successful\n");
}
/**
* ice_gnss_exit - Disable GNSS TTY support
* @pf: Board private structure
*/
void ice_gnss_exit(struct ice_pf *pf)
{
if (!test_bit(ICE_FLAG_GNSS, pf->flags) || !pf->ice_gnss_tty_driver)
return;
if (pf->gnss_tty_port) {
tty_port_destroy(pf->gnss_tty_port);
kfree(pf->gnss_tty_port);
}
if (pf->gnss_serial) {
struct gnss_serial *gnss = pf->gnss_serial;
kthread_cancel_work_sync(&gnss->write_work);
kthread_cancel_delayed_work_sync(&gnss->read_work);
kfree(gnss);
pf->gnss_serial = NULL;
}
tty_unregister_driver(pf->ice_gnss_tty_driver);
kfree(pf->ice_gnss_tty_driver->name);
tty_driver_kref_put(pf->ice_gnss_tty_driver);
pf->ice_gnss_tty_driver = NULL;
}
/**
* ice_gnss_is_gps_present - Check if GPS HW is present
* @hw: pointer to HW struct
*/
bool ice_gnss_is_gps_present(struct ice_hw *hw)
{
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
if (!hw->func_caps.ts_func_info.src_tmr_owned)
return false;
if (ice_is_pca9575_present(hw)) {
int status;
u8 data;
status = ice_read_pca9575_reg_e810t(hw, ICE_PCA9575_P0_IN,
&data);
if (status || !!(data & ICE_E810T_P0_GNSS_PRSNT_N))
return false;
} else {
return false;
}
return true;
#else /* CONFIG_PTP_1588_CLOCK */
return false;
#endif /* CONFIG_PTP_1588_CLOCK */
}

62
drivers/thirdparty/ice/ice_gnss.h vendored Normal file
View File

@ -0,0 +1,62 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018-2021, Intel Corporation. */
#ifndef _ICE_GNSS_H_
#define _ICE_GNSS_H_
#include <linux/tty.h>
#include <linux/tty_flip.h>
#define ICE_E810T_GNSS_I2C_BUS 0x2
#define ICE_GNSS_TIMER_DELAY_TIME (HZ / 10) /* 0.1 second per message */
#define ICE_GNSS_TTY_WRITE_BUF 250
#define ICE_MAX_I2C_DATA_SIZE (ICE_AQC_I2C_DATA_SIZE_M >> \
ICE_AQC_I2C_DATA_SIZE_S)
#define ICE_MAX_I2C_WRITE_BYTES 4
/* ublox specific deifinitions */
#define ICE_GNSS_UBX_I2C_BUS_ADDR 0x42
/* Data length register is big endian */
#define ICE_GNSS_UBX_DATA_LEN_H 0xFD
#define ICE_GNSS_UBX_DATA_LEN_WIDTH 2
#define ICE_GNSS_UBX_EMPTY_DATA 0xFF
/* For ublox writes are performed without address so the first byte to write is
* passed as I2C addr parameter.
*/
#define ICE_GNSS_UBX_WRITE_BYTES (ICE_MAX_I2C_WRITE_BYTES + 1)
#define ICE_MAX_UBX_READ_TRIES 255
#define ICE_MAX_UBX_ACK_READ_TRIES 4095
struct gnss_write_buf {
struct list_head queue;
unsigned int size;
unsigned char *buf;
};
/**
* struct gnss_serial - data used to initialize GNSS TTY port
* @back: back pointer to PF
* @tty: pointer to the tty for this device
* @open_count: number of times this port has been opened
* @gnss_mutex: gnss_mutex used to protect GNSS serial operations
* @kworker: kwork thread for handling periodic work
* @read_work: read_work function for handling GNSS reads
* @write_work: write_work function for handling GNSS writes
* @queue: write buffers queue
* @buf: write buffer for a single u8, negative if empty
*/
struct gnss_serial {
struct ice_pf *back;
struct tty_struct *tty;
int open_count;
struct mutex gnss_mutex; /* protects GNSS serial structure */
struct kthread_worker *kworker;
struct kthread_delayed_work read_work;
struct kthread_work write_work;
struct list_head queue;
};
void ice_gnss_init(struct ice_pf *pf);
void ice_gnss_exit(struct ice_pf *pf);
bool ice_gnss_is_gps_present(struct ice_hw *hw);
#endif /* _ICE_GNSS_H_ */

View File

@ -1,12 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018-2021, Intel Corporation. */ /* Copyright (C) 2018-2021, Intel Corporation. */
/* Machine-generated file; do not edit */ /* Machine generated file. Do not edit. */
#ifndef _ICE_HW_AUTOGEN_H_ #ifndef _ICE_HW_AUTOGEN_H_
#define _ICE_HW_AUTOGEN_H_ #define _ICE_HW_AUTOGEN_H_
#define GL_HIDA(_i) (0x00082000 + ((_i) * 4))
#define GL_HIBA(_i) (0x00081000 + ((_i) * 4))
#define GL_HICR 0x00082040
#define GL_HICR_EN 0x00082044
#define GLGEN_CSR_DEBUG_C 0x00075750
#define GLNVM_GENS 0x000B6100
#define GLNVM_FLA 0x000B6108
#define GL_HIDA_MAX_INDEX 15
#define GL_HIBA_MAX_INDEX 1023
#define GL_RDPU_CNTRL 0x00052054 /* Reset Source: CORER */ #define GL_RDPU_CNTRL 0x00052054 /* Reset Source: CORER */
#define GL_RDPU_CNTRL_RX_PAD_EN_S 0 #define GL_RDPU_CNTRL_RX_PAD_EN_S 0
#define GL_RDPU_CNTRL_RX_PAD_EN_M BIT(0) #define GL_RDPU_CNTRL_RX_PAD_EN_M BIT(0)
@ -449,8 +457,8 @@
#define PF0INT_OICR_CPM_PAGE_RSV3_M BIT(23) #define PF0INT_OICR_CPM_PAGE_RSV3_M BIT(23)
#define PF0INT_OICR_CPM_PAGE_STORM_DETECT_S 24 #define PF0INT_OICR_CPM_PAGE_STORM_DETECT_S 24
#define PF0INT_OICR_CPM_PAGE_STORM_DETECT_M BIT(24) #define PF0INT_OICR_CPM_PAGE_STORM_DETECT_M BIT(24)
#define PF0INT_OICR_CPM_PAGE_LINK_STAT_CHANGE_S 25 #define PF0INT_OICR_CPM_PAGE_LINK_STAT_CHANGE_S 25
#define PF0INT_OICR_CPM_PAGE_LINK_STAT_CHANGE_M BIT(25) #define PF0INT_OICR_CPM_PAGE_LINK_STAT_CHANGE_M BIT(25)
#define PF0INT_OICR_CPM_PAGE_HMC_ERR_S 26 #define PF0INT_OICR_CPM_PAGE_HMC_ERR_S 26
#define PF0INT_OICR_CPM_PAGE_HMC_ERR_M BIT(26) #define PF0INT_OICR_CPM_PAGE_HMC_ERR_M BIT(26)
#define PF0INT_OICR_CPM_PAGE_PE_PUSH_S 27 #define PF0INT_OICR_CPM_PAGE_PE_PUSH_S 27
@ -513,8 +521,8 @@
#define PF0INT_OICR_HLP_PAGE_RSV3_M BIT(23) #define PF0INT_OICR_HLP_PAGE_RSV3_M BIT(23)
#define PF0INT_OICR_HLP_PAGE_STORM_DETECT_S 24 #define PF0INT_OICR_HLP_PAGE_STORM_DETECT_S 24
#define PF0INT_OICR_HLP_PAGE_STORM_DETECT_M BIT(24) #define PF0INT_OICR_HLP_PAGE_STORM_DETECT_M BIT(24)
#define PF0INT_OICR_HLP_PAGE_LINK_STAT_CHANGE_S 25 #define PF0INT_OICR_HLP_PAGE_LINK_STAT_CHANGE_S 25
#define PF0INT_OICR_HLP_PAGE_LINK_STAT_CHANGE_M BIT(25) #define PF0INT_OICR_HLP_PAGE_LINK_STAT_CHANGE_M BIT(25)
#define PF0INT_OICR_HLP_PAGE_HMC_ERR_S 26 #define PF0INT_OICR_HLP_PAGE_HMC_ERR_S 26
#define PF0INT_OICR_HLP_PAGE_HMC_ERR_M BIT(26) #define PF0INT_OICR_HLP_PAGE_HMC_ERR_M BIT(26)
#define PF0INT_OICR_HLP_PAGE_PE_PUSH_S 27 #define PF0INT_OICR_HLP_PAGE_PE_PUSH_S 27
@ -562,8 +570,8 @@
#define PF0INT_OICR_PSM_PAGE_RSV3_M BIT(23) #define PF0INT_OICR_PSM_PAGE_RSV3_M BIT(23)
#define PF0INT_OICR_PSM_PAGE_STORM_DETECT_S 24 #define PF0INT_OICR_PSM_PAGE_STORM_DETECT_S 24
#define PF0INT_OICR_PSM_PAGE_STORM_DETECT_M BIT(24) #define PF0INT_OICR_PSM_PAGE_STORM_DETECT_M BIT(24)
#define PF0INT_OICR_PSM_PAGE_LINK_STAT_CHANGE_S 25 #define PF0INT_OICR_PSM_PAGE_LINK_STAT_CHANGE_S 25
#define PF0INT_OICR_PSM_PAGE_LINK_STAT_CHANGE_M BIT(25) #define PF0INT_OICR_PSM_PAGE_LINK_STAT_CHANGE_M BIT(25)
#define PF0INT_OICR_PSM_PAGE_HMC_ERR_S 26 #define PF0INT_OICR_PSM_PAGE_HMC_ERR_S 26
#define PF0INT_OICR_PSM_PAGE_HMC_ERR_M BIT(26) #define PF0INT_OICR_PSM_PAGE_HMC_ERR_M BIT(26)
#define PF0INT_OICR_PSM_PAGE_PE_PUSH_S 27 #define PF0INT_OICR_PSM_PAGE_PE_PUSH_S 27
@ -703,8 +711,8 @@
#define GL_ACL_PROFILE_BWSB_SEL_WSB_SRC_OFF_M ICE_M(0x1F, 8) #define GL_ACL_PROFILE_BWSB_SEL_WSB_SRC_OFF_M ICE_M(0x1F, 8)
#define GL_ACL_PROFILE_DWSB_SEL(_i) (0x00391088 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define GL_ACL_PROFILE_DWSB_SEL(_i) (0x00391088 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
#define GL_ACL_PROFILE_DWSB_SEL_MAX_INDEX 15 #define GL_ACL_PROFILE_DWSB_SEL_MAX_INDEX 15
#define GL_ACL_PROFILE_DWSB_SEL_DWORD_SEL_OFF_S 0 #define GL_ACL_PROFILE_DWSB_SEL_DWORD_SEL_OFF_S 0
#define GL_ACL_PROFILE_DWSB_SEL_DWORD_SEL_OFF_M ICE_M(0xF, 0) #define GL_ACL_PROFILE_DWSB_SEL_DWORD_SEL_OFF_M ICE_M(0xF, 0)
#define GL_ACL_PROFILE_PF_CFG(_i) (0x003910C8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GL_ACL_PROFILE_PF_CFG(_i) (0x003910C8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
#define GL_ACL_PROFILE_PF_CFG_MAX_INDEX 7 #define GL_ACL_PROFILE_PF_CFG_MAX_INDEX 7
#define GL_ACL_PROFILE_PF_CFG_SCEN_SEL_S 0 #define GL_ACL_PROFILE_PF_CFG_SCEN_SEL_S 0
@ -862,8 +870,8 @@
#define GLLAN_TCLAN_CACHE_CTL_FETCH_CL_ALIGN_M BIT(6) #define GLLAN_TCLAN_CACHE_CTL_FETCH_CL_ALIGN_M BIT(6)
#define GLLAN_TCLAN_CACHE_CTL_MIN_ALLOC_THRESH_S 7 #define GLLAN_TCLAN_CACHE_CTL_MIN_ALLOC_THRESH_S 7
#define GLLAN_TCLAN_CACHE_CTL_MIN_ALLOC_THRESH_M ICE_M(0x7F, 7) #define GLLAN_TCLAN_CACHE_CTL_MIN_ALLOC_THRESH_M ICE_M(0x7F, 7)
#define GLLAN_TCLAN_CACHE_CTL_CACHE_ENTRY_CNT_S 14 #define GLLAN_TCLAN_CACHE_CTL_CACHE_ENTRY_CNT_S 14
#define GLLAN_TCLAN_CACHE_CTL_CACHE_ENTRY_CNT_M ICE_M(0xFF, 14) #define GLLAN_TCLAN_CACHE_CTL_CACHE_ENTRY_CNT_M ICE_M(0xFF, 14)
#define GLLAN_TCLAN_CACHE_CTL_CACHE_DESC_LIM_S 22 #define GLLAN_TCLAN_CACHE_CTL_CACHE_DESC_LIM_S 22
#define GLLAN_TCLAN_CACHE_CTL_CACHE_DESC_LIM_M ICE_M(0x3FF, 22) #define GLLAN_TCLAN_CACHE_CTL_CACHE_DESC_LIM_M ICE_M(0x3FF, 22)
#define GLTCLAN_CQ_CNTX0(_CQ) (0x000F0800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX0(_CQ) (0x000F0800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
@ -2206,8 +2214,8 @@
#define PRTDCB_TX_DSCP2UP_CTL 0x00040980 /* Reset Source: CORER */ #define PRTDCB_TX_DSCP2UP_CTL 0x00040980 /* Reset Source: CORER */
#define PRTDCB_TX_DSCP2UP_CTL_DSCP2UP_ENA_S 0 #define PRTDCB_TX_DSCP2UP_CTL_DSCP2UP_ENA_S 0
#define PRTDCB_TX_DSCP2UP_CTL_DSCP2UP_ENA_M BIT(0) #define PRTDCB_TX_DSCP2UP_CTL_DSCP2UP_ENA_M BIT(0)
#define PRTDCB_TX_DSCP2UP_CTL_DSCP_DEFAULT_UP_S 1 #define PRTDCB_TX_DSCP2UP_CTL_DSCP_DEFAULT_UP_S 1
#define PRTDCB_TX_DSCP2UP_CTL_DSCP_DEFAULT_UP_M ICE_M(0x7, 1) #define PRTDCB_TX_DSCP2UP_CTL_DSCP_DEFAULT_UP_M ICE_M(0x7, 1)
#define PRTDCB_TX_DSCP2UP_IPV4_LUT(_i) (0x000409A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: CORER */ #define PRTDCB_TX_DSCP2UP_IPV4_LUT(_i) (0x000409A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: CORER */
#define PRTDCB_TX_DSCP2UP_IPV4_LUT_MAX_INDEX 7 #define PRTDCB_TX_DSCP2UP_IPV4_LUT_MAX_INDEX 7
#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_0_S 0 #define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_0_S 0
@ -2355,8 +2363,8 @@
#define TPB_PRTTCB_LL_DWRR_REG_CREDITS_CREDITS_S 0 #define TPB_PRTTCB_LL_DWRR_REG_CREDITS_CREDITS_S 0
#define TPB_PRTTCB_LL_DWRR_REG_CREDITS_CREDITS_M ICE_M(0x3FFFF, 0) #define TPB_PRTTCB_LL_DWRR_REG_CREDITS_CREDITS_M ICE_M(0x3FFFF, 0)
#define TPB_PRTTCB_LL_DWRR_WB_CREDITS 0x00099320 /* Reset Source: CORER */ #define TPB_PRTTCB_LL_DWRR_WB_CREDITS 0x00099320 /* Reset Source: CORER */
#define TPB_PRTTCB_LL_DWRR_WB_CREDITS_CREDITS_S 0 #define TPB_PRTTCB_LL_DWRR_WB_CREDITS_CREDITS_S 0
#define TPB_PRTTCB_LL_DWRR_WB_CREDITS_CREDITS_M ICE_M(0x3FFFF, 0) #define TPB_PRTTCB_LL_DWRR_WB_CREDITS_CREDITS_M ICE_M(0x3FFFF, 0)
#define TPB_WB_RL_TC_CFG(_i) (0x00099360 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define TPB_WB_RL_TC_CFG(_i) (0x00099360 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
#define TPB_WB_RL_TC_CFG_MAX_INDEX 31 #define TPB_WB_RL_TC_CFG_MAX_INDEX 31
#define TPB_WB_RL_TC_CFG_TOKENS_S 0 #define TPB_WB_RL_TC_CFG_TOKENS_S 0
@ -2421,8 +2429,8 @@
#define GL_ACLEXT_FORCE_L1CDID_MAX_INDEX 2 #define GL_ACLEXT_FORCE_L1CDID_MAX_INDEX 2
#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_S 0 #define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_S 0
#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_M ICE_M(0xF, 0) #define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_M ICE_M(0xF, 0)
#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31 #define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31
#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31) #define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31)
#define GL_ACLEXT_FORCE_PID(_i) (0x00210000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_ACLEXT_FORCE_PID(_i) (0x00210000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
#define GL_ACLEXT_FORCE_PID_MAX_INDEX 2 #define GL_ACLEXT_FORCE_PID_MAX_INDEX 2
#define GL_ACLEXT_FORCE_PID_STATIC_PID_S 0 #define GL_ACLEXT_FORCE_PID_STATIC_PID_S 0
@ -2615,8 +2623,8 @@
#define GL_PREEXT_FORCE_L1CDID_MAX_INDEX 2 #define GL_PREEXT_FORCE_L1CDID_MAX_INDEX 2
#define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_S 0 #define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_S 0
#define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_M ICE_M(0xF, 0) #define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_M ICE_M(0xF, 0)
#define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31 #define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31
#define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31) #define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31)
#define GL_PREEXT_FORCE_PID(_i) (0x0020F000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_FORCE_PID(_i) (0x0020F000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
#define GL_PREEXT_FORCE_PID_MAX_INDEX 2 #define GL_PREEXT_FORCE_PID_MAX_INDEX 2
#define GL_PREEXT_FORCE_PID_STATIC_PID_S 0 #define GL_PREEXT_FORCE_PID_STATIC_PID_S 0
@ -2817,8 +2825,8 @@
#define GL_PSTEXT_FORCE_L1CDID_MAX_INDEX 2 #define GL_PSTEXT_FORCE_L1CDID_MAX_INDEX 2
#define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_S 0 #define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_S 0
#define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_M ICE_M(0xF, 0) #define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_M ICE_M(0xF, 0)
#define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31 #define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31
#define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31) #define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31)
#define GL_PSTEXT_FORCE_PID(_i) (0x0020E000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_FORCE_PID(_i) (0x0020E000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
#define GL_PSTEXT_FORCE_PID_MAX_INDEX 2 #define GL_PSTEXT_FORCE_PID_MAX_INDEX 2
#define GL_PSTEXT_FORCE_PID_STATIC_PID_S 0 #define GL_PSTEXT_FORCE_PID_STATIC_PID_S 0
@ -2985,10 +2993,10 @@
#define GLFLXP_RX_CMD_LX_PROT_IDX_L4_OFFSET_INDEX_M ICE_M(0x7, 4) #define GLFLXP_RX_CMD_LX_PROT_IDX_L4_OFFSET_INDEX_M ICE_M(0x7, 4)
#define GLFLXP_RX_CMD_LX_PROT_IDX_PAYLOAD_OFFSET_INDEX_S 8 #define GLFLXP_RX_CMD_LX_PROT_IDX_PAYLOAD_OFFSET_INDEX_S 8
#define GLFLXP_RX_CMD_LX_PROT_IDX_PAYLOAD_OFFSET_INDEX_M ICE_M(0x7, 8) #define GLFLXP_RX_CMD_LX_PROT_IDX_PAYLOAD_OFFSET_INDEX_M ICE_M(0x7, 8)
#define GLFLXP_RX_CMD_LX_PROT_IDX_L3_PROTOCOL_S 12 #define GLFLXP_RX_CMD_LX_PROT_IDX_L3_PROTOCOL_S 12
#define GLFLXP_RX_CMD_LX_PROT_IDX_L3_PROTOCOL_M ICE_M(0x3, 12) #define GLFLXP_RX_CMD_LX_PROT_IDX_L3_PROTOCOL_M ICE_M(0x3, 12)
#define GLFLXP_RX_CMD_LX_PROT_IDX_L4_PROTOCOL_S 14 #define GLFLXP_RX_CMD_LX_PROT_IDX_L4_PROTOCOL_S 14
#define GLFLXP_RX_CMD_LX_PROT_IDX_L4_PROTOCOL_M ICE_M(0x3, 14) #define GLFLXP_RX_CMD_LX_PROT_IDX_L4_PROTOCOL_M ICE_M(0x3, 14)
#define GLFLXP_RX_CMD_PROTIDS(_i, _j) (0x0045A000 + ((_i) * 4 + (_j) * 1024)) /* _i=0...255, _j=0...5 */ /* Reset Source: CORER */ #define GLFLXP_RX_CMD_PROTIDS(_i, _j) (0x0045A000 + ((_i) * 4 + (_j) * 1024)) /* _i=0...255, _j=0...5 */ /* Reset Source: CORER */
#define GLFLXP_RX_CMD_PROTIDS_MAX_INDEX 255 #define GLFLXP_RX_CMD_PROTIDS_MAX_INDEX 255
#define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_S 0 #define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_S 0
@ -3067,8 +3075,8 @@
#define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_M ICE_M(0xFF, 0) #define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_M ICE_M(0xFF, 0)
#define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_S 8 #define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_S 8
#define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_M ICE_M(0x1F, 8) #define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_M ICE_M(0x1F, 8)
#define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_1_S 16 #define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_1_S 16
#define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_1_M ICE_M(0xFF, 16) #define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_1_M ICE_M(0xFF, 16)
#define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_1_S 24 #define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_1_S 24
#define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_1_M ICE_M(0x1F, 24) #define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_1_M ICE_M(0x1F, 24)
#define QRXFLXP_CNTXT(_QRX) (0x00480000 + ((_QRX) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */ #define QRXFLXP_CNTXT(_QRX) (0x00480000 + ((_QRX) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
@ -3281,18 +3289,18 @@
#define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MDSTART_M ICE_M(0xF, 5) #define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MDSTART_M ICE_M(0xF, 5)
#define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MD_LEN_S 9 #define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MD_LEN_S 9
#define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MD_LEN_M ICE_M(0x1F, 9) #define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MD_LEN_M ICE_M(0x1F, 9)
#define GLGEN_ANA_PROFIL_CTRL_NUM_CTRL_DOMAIN_S 14 #define GLGEN_ANA_PROFIL_CTRL_NUM_CTRL_DOMAIN_S 14
#define GLGEN_ANA_PROFIL_CTRL_NUM_CTRL_DOMAIN_M ICE_M(0x3, 14) #define GLGEN_ANA_PROFIL_CTRL_NUM_CTRL_DOMAIN_M ICE_M(0x3, 14)
#define GLGEN_ANA_PROFIL_CTRL_DEF_PROF_ID_S 16 #define GLGEN_ANA_PROFIL_CTRL_DEF_PROF_ID_S 16
#define GLGEN_ANA_PROFIL_CTRL_DEF_PROF_ID_M ICE_M(0xF, 16) #define GLGEN_ANA_PROFIL_CTRL_DEF_PROF_ID_M ICE_M(0xF, 16)
#define GLGEN_ANA_PROFIL_CTRL_SEL_DEF_PROF_ID_S 20 #define GLGEN_ANA_PROFIL_CTRL_SEL_DEF_PROF_ID_S 20
#define GLGEN_ANA_PROFIL_CTRL_SEL_DEF_PROF_ID_M BIT(20) #define GLGEN_ANA_PROFIL_CTRL_SEL_DEF_PROF_ID_M BIT(20)
#define GLGEN_ANA_TX_ABORT_PTYPE 0x0020D21C /* Reset Source: CORER */ #define GLGEN_ANA_TX_ABORT_PTYPE 0x0020D21C /* Reset Source: CORER */
#define GLGEN_ANA_TX_ABORT_PTYPE_ABORT_S 0 #define GLGEN_ANA_TX_ABORT_PTYPE_ABORT_S 0
#define GLGEN_ANA_TX_ABORT_PTYPE_ABORT_M ICE_M(0x3FF, 0) #define GLGEN_ANA_TX_ABORT_PTYPE_ABORT_M ICE_M(0x3FF, 0)
#define GLGEN_ANA_TX_ALU_ACCSS_OUT_OF_PKT 0x0020D208 /* Reset Source: CORER */ #define GLGEN_ANA_TX_ALU_ACCSS_OUT_OF_PKT 0x0020D208 /* Reset Source: CORER */
#define GLGEN_ANA_TX_ALU_ACCSS_OUT_OF_PKT_NPC_S 0 #define GLGEN_ANA_TX_ALU_ACCSS_OUT_OF_PKT_NPC_S 0
#define GLGEN_ANA_TX_ALU_ACCSS_OUT_OF_PKT_NPC_M ICE_M(0xFF, 0) #define GLGEN_ANA_TX_ALU_ACCSS_OUT_OF_PKT_NPC_M ICE_M(0xFF, 0)
#define GLGEN_ANA_TX_CFG_CTRL 0x0020D104 /* Reset Source: CORER */ #define GLGEN_ANA_TX_CFG_CTRL 0x0020D104 /* Reset Source: CORER */
#define GLGEN_ANA_TX_CFG_CTRL_LINE_IDX_S 0 #define GLGEN_ANA_TX_CFG_CTRL_LINE_IDX_S 0
#define GLGEN_ANA_TX_CFG_CTRL_LINE_IDX_M ICE_M(0x3FFFF, 0) #define GLGEN_ANA_TX_CFG_CTRL_LINE_IDX_M ICE_M(0x3FFFF, 0)
@ -3318,10 +3326,10 @@
#define GLGEN_ANA_TX_CFG_RDDATA_RD_DATA_S 0 #define GLGEN_ANA_TX_CFG_RDDATA_RD_DATA_S 0
#define GLGEN_ANA_TX_CFG_RDDATA_RD_DATA_M ICE_M(0xFFFFFFFF, 0) #define GLGEN_ANA_TX_CFG_RDDATA_RD_DATA_M ICE_M(0xFFFFFFFF, 0)
#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT 0x0020D15C /* Reset Source: CORER */ #define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT 0x0020D15C /* Reset Source: CORER */
#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_HIT_S 0 #define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_HIT_S 0
#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_HIT_M BIT(0) #define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_HIT_M BIT(0)
#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_RSV_S 1 #define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_RSV_S 1
#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_RSV_M ICE_M(0x7, 1) #define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_RSV_M ICE_M(0x7, 1)
#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_ADDR_S 4 #define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_ADDR_S 4
#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_ADDR_M ICE_M(0x1FF, 4) #define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_ADDR_M ICE_M(0x1FF, 4)
#define GLGEN_ANA_TX_CFG_WRDATA 0x0020D108 /* Reset Source: CORER */ #define GLGEN_ANA_TX_CFG_WRDATA 0x0020D108 /* Reset Source: CORER */
@ -3640,8 +3648,8 @@
#define GLHMC_FWSDDATAHIGH_PMSDDATAHIGH_S 0 #define GLHMC_FWSDDATAHIGH_PMSDDATAHIGH_S 0
#define GLHMC_FWSDDATAHIGH_PMSDDATAHIGH_M ICE_M(0xFFFFFFFF, 0) #define GLHMC_FWSDDATAHIGH_PMSDDATAHIGH_M ICE_M(0xFFFFFFFF, 0)
#define GLHMC_FWSDDATAHIGH_FPMAT 0x00102078 /* Reset Source: CORER */ #define GLHMC_FWSDDATAHIGH_FPMAT 0x00102078 /* Reset Source: CORER */
#define GLHMC_FWSDDATAHIGH_FPMAT_PMSDDATAHIGH_S 0 #define GLHMC_FWSDDATAHIGH_FPMAT_PMSDDATAHIGH_S 0
#define GLHMC_FWSDDATAHIGH_FPMAT_PMSDDATAHIGH_M ICE_M(0xFFFFFFFF, 0) #define GLHMC_FWSDDATAHIGH_FPMAT_PMSDDATAHIGH_M ICE_M(0xFFFFFFFF, 0)
#define GLHMC_FWSDDATALOW 0x00522074 /* Reset Source: CORER */ #define GLHMC_FWSDDATALOW 0x00522074 /* Reset Source: CORER */
#define GLHMC_FWSDDATALOW_PMSDVALID_S 0 #define GLHMC_FWSDDATALOW_PMSDVALID_S 0
#define GLHMC_FWSDDATALOW_PMSDVALID_M BIT(0) #define GLHMC_FWSDDATALOW_PMSDVALID_M BIT(0)
@ -4039,8 +4047,8 @@
#define GLHMC_VFPEMRCNT_FPMPEMRSZ_M ICE_M(0x1FFFFFFF, 0) #define GLHMC_VFPEMRCNT_FPMPEMRSZ_M ICE_M(0x1FFFFFFF, 0)
#define GLHMC_VFPEOOISCBASE(_i) (0x0052E600 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEOOISCBASE(_i) (0x0052E600 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
#define GLHMC_VFPEOOISCBASE_MAX_INDEX 31 #define GLHMC_VFPEOOISCBASE_MAX_INDEX 31
#define GLHMC_VFPEOOISCBASE_GLHMC_PEOOISCBASE_S 0 #define GLHMC_VFPEOOISCBASE_GLHMC_PEOOISCBASE_S 0
#define GLHMC_VFPEOOISCBASE_GLHMC_PEOOISCBASE_M ICE_M(0xFFFFFFFF, 0) #define GLHMC_VFPEOOISCBASE_GLHMC_PEOOISCBASE_M ICE_M(0xFFFFFFFF, 0)
#define GLHMC_VFPEOOISCCNT(_i) (0x0052E700 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEOOISCCNT(_i) (0x0052E700 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
#define GLHMC_VFPEOOISCCNT_MAX_INDEX 31 #define GLHMC_VFPEOOISCCNT_MAX_INDEX 31
#define GLHMC_VFPEOOISCCNT_GLHMC_PEOOISCCNT_S 0 #define GLHMC_VFPEOOISCCNT_GLHMC_PEOOISCCNT_S 0
@ -4087,8 +4095,8 @@
#define GLHMC_VFPERRFCNT_GLHMC_PERRFCNT_M ICE_M(0xFFFFFFFF, 0) #define GLHMC_VFPERRFCNT_GLHMC_PERRFCNT_M ICE_M(0xFFFFFFFF, 0)
#define GLHMC_VFPERRFFLBASE(_i) (0x0052EA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPERRFFLBASE(_i) (0x0052EA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
#define GLHMC_VFPERRFFLBASE_MAX_INDEX 31 #define GLHMC_VFPERRFFLBASE_MAX_INDEX 31
#define GLHMC_VFPERRFFLBASE_GLHMC_PERRFFLBASE_S 0 #define GLHMC_VFPERRFFLBASE_GLHMC_PERRFFLBASE_S 0
#define GLHMC_VFPERRFFLBASE_GLHMC_PERRFFLBASE_M ICE_M(0xFFFFFFFF, 0) #define GLHMC_VFPERRFFLBASE_GLHMC_PERRFFLBASE_M ICE_M(0xFFFFFFFF, 0)
#define GLHMC_VFPETIMERBASE(_i) (0x0052DA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPETIMERBASE(_i) (0x0052DA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
#define GLHMC_VFPETIMERBASE_MAX_INDEX 31 #define GLHMC_VFPETIMERBASE_MAX_INDEX 31
#define GLHMC_VFPETIMERBASE_FPMPETIMERBASE_S 0 #define GLHMC_VFPETIMERBASE_FPMPETIMERBASE_S 0
@ -4115,8 +4123,8 @@
#define GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_M ICE_M(0xFFFFFFFF, 0) #define GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_M ICE_M(0xFFFFFFFF, 0)
#define GLHMC_VFSDDATAHIGH_FPMAT(_i) (0x00108200 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFSDDATAHIGH_FPMAT(_i) (0x00108200 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
#define GLHMC_VFSDDATAHIGH_FPMAT_MAX_INDEX 31 #define GLHMC_VFSDDATAHIGH_FPMAT_MAX_INDEX 31
#define GLHMC_VFSDDATAHIGH_FPMAT_PMSDDATAHIGH_S 0 #define GLHMC_VFSDDATAHIGH_FPMAT_PMSDDATAHIGH_S 0
#define GLHMC_VFSDDATAHIGH_FPMAT_PMSDDATAHIGH_M ICE_M(0xFFFFFFFF, 0) #define GLHMC_VFSDDATAHIGH_FPMAT_PMSDDATAHIGH_M ICE_M(0xFFFFFFFF, 0)
#define GLHMC_VFSDDATALOW(_i) (0x00528100 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFSDDATALOW(_i) (0x00528100 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
#define GLHMC_VFSDDATALOW_MAX_INDEX 31 #define GLHMC_VFSDDATALOW_MAX_INDEX 31
#define GLHMC_VFSDDATALOW_PMSDVALID_S 0 #define GLHMC_VFSDDATALOW_PMSDVALID_S 0
@ -4222,8 +4230,8 @@
#define PFHMC_ERRORINFO_FPMAT_PMF_ISVF_M BIT(7) #define PFHMC_ERRORINFO_FPMAT_PMF_ISVF_M BIT(7)
#define PFHMC_ERRORINFO_FPMAT_HMC_ERROR_TYPE_S 8 #define PFHMC_ERRORINFO_FPMAT_HMC_ERROR_TYPE_S 8
#define PFHMC_ERRORINFO_FPMAT_HMC_ERROR_TYPE_M ICE_M(0xF, 8) #define PFHMC_ERRORINFO_FPMAT_HMC_ERROR_TYPE_M ICE_M(0xF, 8)
#define PFHMC_ERRORINFO_FPMAT_HMC_OBJECT_TYPE_S 16 #define PFHMC_ERRORINFO_FPMAT_HMC_OBJECT_TYPE_S 16
#define PFHMC_ERRORINFO_FPMAT_HMC_OBJECT_TYPE_M ICE_M(0x1F, 16) #define PFHMC_ERRORINFO_FPMAT_HMC_OBJECT_TYPE_M ICE_M(0x1F, 16)
#define PFHMC_ERRORINFO_FPMAT_ERROR_DETECTED_S 31 #define PFHMC_ERRORINFO_FPMAT_ERROR_DETECTED_S 31
#define PFHMC_ERRORINFO_FPMAT_ERROR_DETECTED_M BIT(31) #define PFHMC_ERRORINFO_FPMAT_ERROR_DETECTED_M BIT(31)
#define PFHMC_PDINV 0x00520300 /* Reset Source: PFR */ #define PFHMC_PDINV 0x00520300 /* Reset Source: PFR */
@ -4310,8 +4318,8 @@
#define GL_MDCK_TDAT_TCLAN_TSO_SUM_BUFFS_LT_SUM_HDRS_M BIT(11) #define GL_MDCK_TDAT_TCLAN_TSO_SUM_BUFFS_LT_SUM_HDRS_M BIT(11)
#define GL_MDCK_TDAT_TCLAN_TSO_ZERO_MSS_TLEN_HDRS_S 12 #define GL_MDCK_TDAT_TCLAN_TSO_ZERO_MSS_TLEN_HDRS_S 12
#define GL_MDCK_TDAT_TCLAN_TSO_ZERO_MSS_TLEN_HDRS_M BIT(12) #define GL_MDCK_TDAT_TCLAN_TSO_ZERO_MSS_TLEN_HDRS_M BIT(12)
#define GL_MDCK_TDAT_TCLAN_TSO_CTX_DESC_IPSEC_S 13 #define GL_MDCK_TDAT_TCLAN_TSO_CTX_DESC_IPSEC_S 13
#define GL_MDCK_TDAT_TCLAN_TSO_CTX_DESC_IPSEC_M BIT(13) #define GL_MDCK_TDAT_TCLAN_TSO_CTX_DESC_IPSEC_M BIT(13)
#define GL_MDCK_TDAT_TCLAN_SSO_COMS_NOT_WHOLE_PKT_NUM_IN_QUANTA_S 14 #define GL_MDCK_TDAT_TCLAN_SSO_COMS_NOT_WHOLE_PKT_NUM_IN_QUANTA_S 14
#define GL_MDCK_TDAT_TCLAN_SSO_COMS_NOT_WHOLE_PKT_NUM_IN_QUANTA_M BIT(14) #define GL_MDCK_TDAT_TCLAN_SSO_COMS_NOT_WHOLE_PKT_NUM_IN_QUANTA_M BIT(14)
#define GL_MDCK_TDAT_TCLAN_COMS_QUANTA_BYTES_EXCEED_PKTLEN_X_64_S 15 #define GL_MDCK_TDAT_TCLAN_COMS_QUANTA_BYTES_EXCEED_PKTLEN_X_64_S 15
@ -5206,10 +5214,10 @@
#define PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E36C0 /* Reset Source: GLOBR */ #define PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E36C0 /* Reset Source: GLOBR */
#define PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_S 0 #define PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_S 0
#define PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_M BIT(0) #define PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_M BIT(0)
#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3220 /* Reset Source: GLOBR */ #define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3220 /* Reset Source: GLOBR */
#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_S 0 #define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_S 0
#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_M ICE_M(0xFFFFFFFF, 0) #define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_M ICE_M(0xFFFFFFFF, 0)
#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3240 /* Reset Source: GLOBR */ #define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3240 /* Reset Source: GLOBR */
#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_S 0 #define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_S 0
#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_M ICE_M(0xFFFF, 0) #define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_M ICE_M(0xFFFF, 0)
#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E3180 /* Reset Source: GLOBR */ #define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E3180 /* Reset Source: GLOBR */
@ -5310,10 +5318,10 @@
#define GL_MDCK_EN_TX_PQM_SSO_PKTCNT_EXCEED_M BIT(17) #define GL_MDCK_EN_TX_PQM_SSO_PKTCNT_EXCEED_M BIT(17)
#define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_ZERO_S 18 #define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_ZERO_S 18
#define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_ZERO_M BIT(18) #define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_ZERO_M BIT(18)
#define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_EXCEED_S 19 #define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_EXCEED_S 19
#define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_EXCEED_M BIT(19) #define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_EXCEED_M BIT(19)
#define GL_MDCK_EN_TX_PQM_TAIL_GT_RING_LENGTH_S 20 #define GL_MDCK_EN_TX_PQM_TAIL_GT_RING_LENGTH_S 20
#define GL_MDCK_EN_TX_PQM_TAIL_GT_RING_LENGTH_M BIT(20) #define GL_MDCK_EN_TX_PQM_TAIL_GT_RING_LENGTH_M BIT(20)
#define GL_MDCK_EN_TX_PQM_RESERVED_DBL_TYPE_S 21 #define GL_MDCK_EN_TX_PQM_RESERVED_DBL_TYPE_S 21
#define GL_MDCK_EN_TX_PQM_RESERVED_DBL_TYPE_M BIT(21) #define GL_MDCK_EN_TX_PQM_RESERVED_DBL_TYPE_M BIT(21)
#define GL_MDCK_EN_TX_PQM_ILLEGAL_HEAD_DROP_DBL_S 22 #define GL_MDCK_EN_TX_PQM_ILLEGAL_HEAD_DROP_DBL_S 22
@ -5332,8 +5340,8 @@
#define GL_MDCK_TX_TDPU 0x00049348 /* Reset Source: CORER */ #define GL_MDCK_TX_TDPU 0x00049348 /* Reset Source: CORER */
#define GL_MDCK_TX_TDPU_TTL_ERR_ITR_DIS_S 0 #define GL_MDCK_TX_TDPU_TTL_ERR_ITR_DIS_S 0
#define GL_MDCK_TX_TDPU_TTL_ERR_ITR_DIS_M BIT(0) #define GL_MDCK_TX_TDPU_TTL_ERR_ITR_DIS_M BIT(0)
#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_S 1 #define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_S 1
#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M BIT(1) #define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M BIT(1)
#define GL_MDCK_TX_TDPU_PCIE_UR_ITR_DIS_S 2 #define GL_MDCK_TX_TDPU_PCIE_UR_ITR_DIS_S 2
#define GL_MDCK_TX_TDPU_PCIE_UR_ITR_DIS_M BIT(2) #define GL_MDCK_TX_TDPU_PCIE_UR_ITR_DIS_M BIT(2)
#define GL_MDCK_TX_TDPU_MAL_OFFSET_ITR_DIS_S 3 #define GL_MDCK_TX_TDPU_MAL_OFFSET_ITR_DIS_S 3
@ -5346,8 +5354,8 @@
#define GL_MDCK_TX_TDPU_L2_ACCEPT_FAIL_ITR_DIS_M BIT(6) #define GL_MDCK_TX_TDPU_L2_ACCEPT_FAIL_ITR_DIS_M BIT(6)
#define GL_MDCK_TX_TDPU_NIC_DSI_ITR_DIS_S 7 #define GL_MDCK_TX_TDPU_NIC_DSI_ITR_DIS_S 7
#define GL_MDCK_TX_TDPU_NIC_DSI_ITR_DIS_M BIT(7) #define GL_MDCK_TX_TDPU_NIC_DSI_ITR_DIS_M BIT(7)
#define GL_MDCK_TX_TDPU_MAL_IPSEC_CMD_ITR_DIS_S 8 #define GL_MDCK_TX_TDPU_MAL_IPSEC_CMD_ITR_DIS_S 8
#define GL_MDCK_TX_TDPU_MAL_IPSEC_CMD_ITR_DIS_M BIT(8) #define GL_MDCK_TX_TDPU_MAL_IPSEC_CMD_ITR_DIS_M BIT(8)
#define GL_MDCK_TX_TDPU_DSCP_CHECK_FAIL_ITR_DIS_S 9 #define GL_MDCK_TX_TDPU_DSCP_CHECK_FAIL_ITR_DIS_S 9
#define GL_MDCK_TX_TDPU_DSCP_CHECK_FAIL_ITR_DIS_M BIT(9) #define GL_MDCK_TX_TDPU_DSCP_CHECK_FAIL_ITR_DIS_M BIT(9)
#define GL_MDCK_TX_TDPU_NIC_IPSEC_ITR_DIS_S 10 #define GL_MDCK_TX_TDPU_NIC_IPSEC_ITR_DIS_S 10
@ -5429,8 +5437,8 @@
#define VP_MDET_TX_TDPU_VALID_M BIT(0) #define VP_MDET_TX_TDPU_VALID_M BIT(0)
#define GENERAL_MNG_FW_DBG_CSR(_i) (0x000B6180 + ((_i) * 4)) /* _i=0...9 */ /* Reset Source: POR */ #define GENERAL_MNG_FW_DBG_CSR(_i) (0x000B6180 + ((_i) * 4)) /* _i=0...9 */ /* Reset Source: POR */
#define GENERAL_MNG_FW_DBG_CSR_MAX_INDEX 9 #define GENERAL_MNG_FW_DBG_CSR_MAX_INDEX 9
#define GENERAL_MNG_FW_DBG_CSR_GENERAL_FW_DBG_S 0 #define GENERAL_MNG_FW_DBG_CSR_GENERAL_FW_DBG_S 0
#define GENERAL_MNG_FW_DBG_CSR_GENERAL_FW_DBG_M ICE_M(0xFFFFFFFF, 0) #define GENERAL_MNG_FW_DBG_CSR_GENERAL_FW_DBG_M ICE_M(0xFFFFFFFF, 0)
#define GL_FWRESETCNT 0x00083100 /* Reset Source: POR */ #define GL_FWRESETCNT 0x00083100 /* Reset Source: POR */
#define GL_FWRESETCNT_FWRESETCNT_S 0 #define GL_FWRESETCNT_FWRESETCNT_S 0
#define GL_FWRESETCNT_FWRESETCNT_M ICE_M(0xFFFFFFFF, 0) #define GL_FWRESETCNT_FWRESETCNT_M ICE_M(0xFFFFFFFF, 0)
@ -5842,8 +5850,8 @@
#define GL_XLR_MARKER_TRIG_RCU_PRS 0x002001C0 /* Reset Source: CORER */ #define GL_XLR_MARKER_TRIG_RCU_PRS 0x002001C0 /* Reset Source: CORER */
#define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_NUM_S 0 #define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_NUM_S 0
#define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_NUM_M ICE_M(0x3FF, 0) #define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_NUM_M ICE_M(0x3FF, 0)
#define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_TYPE_S 10 #define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_TYPE_S 10
#define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_TYPE_M ICE_M(0x3, 10) #define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_TYPE_M ICE_M(0x3, 10)
#define GL_XLR_MARKER_TRIG_RCU_PRS_PF_NUM_S 12 #define GL_XLR_MARKER_TRIG_RCU_PRS_PF_NUM_S 12
#define GL_XLR_MARKER_TRIG_RCU_PRS_PF_NUM_M ICE_M(0x7, 12) #define GL_XLR_MARKER_TRIG_RCU_PRS_PF_NUM_M ICE_M(0x7, 12)
#define GL_XLR_MARKER_TRIG_RCU_PRS_PORT_NUM_S 16 #define GL_XLR_MARKER_TRIG_RCU_PRS_PORT_NUM_S 16
@ -6722,11 +6730,11 @@
#define GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_S 0 #define GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_S 0
#define GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_M ICE_M(0xFFFFFFFF, 0) #define GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_M ICE_M(0xFFFFFFFF, 0)
#define GLPES_TCPRXFOURHOLEHI 0x0055E03C /* Reset Source: CORER */ #define GLPES_TCPRXFOURHOLEHI 0x0055E03C /* Reset Source: CORER */
#define GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_S 0 #define GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_S 0
#define GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_M ICE_M(0xFFFFFF, 0) #define GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_M ICE_M(0xFFFFFF, 0)
#define GLPES_TCPRXFOURHOLELO 0x0055E038 /* Reset Source: CORER */ #define GLPES_TCPRXFOURHOLELO 0x0055E038 /* Reset Source: CORER */
#define GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_S 0 #define GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_S 0
#define GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_M ICE_M(0xFFFFFFFF, 0) #define GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_M ICE_M(0xFFFFFFFF, 0)
#define GLPES_TCPRXONEHOLEHI 0x0055E024 /* Reset Source: CORER */ #define GLPES_TCPRXONEHOLEHI 0x0055E024 /* Reset Source: CORER */
#define GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_S 0 #define GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_S 0
#define GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_M ICE_M(0xFFFFFF, 0) #define GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_M ICE_M(0xFFFFFF, 0)
@ -8207,7 +8215,7 @@
#define TPB_PRTTPB_STAT_PKT_SENT_PKTCNT_S 0 #define TPB_PRTTPB_STAT_PKT_SENT_PKTCNT_S 0
#define TPB_PRTTPB_STAT_PKT_SENT_PKTCNT_M ICE_M(0xFFFFFFFF, 0) #define TPB_PRTTPB_STAT_PKT_SENT_PKTCNT_M ICE_M(0xFFFFFFFF, 0)
#define TPB_PRTTPB_STAT_TC_BYTES_SENT(_i) (0x00099094 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define TPB_PRTTPB_STAT_TC_BYTES_SENT(_i) (0x00099094 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
#define TPB_PRTTPB_STAT_TC_BYTES_SENT_MAX_INDEX 63 #define TPB_PRTTPB_STAT_TC_BYTES_SENT_MAX_INDEX 63
#define TPB_PRTTPB_STAT_TC_BYTES_SENT_TCCNT_S 0 #define TPB_PRTTPB_STAT_TC_BYTES_SENT_TCCNT_S 0
#define TPB_PRTTPB_STAT_TC_BYTES_SENT_TCCNT_M ICE_M(0xFFFFFFFF, 0) #define TPB_PRTTPB_STAT_TC_BYTES_SENT_TCCNT_M ICE_M(0xFFFFFFFF, 0)
#define EMP_SWT_PRUNIND 0x00204020 /* Reset Source: CORER */ #define EMP_SWT_PRUNIND 0x00204020 /* Reset Source: CORER */
@ -9449,5 +9457,5 @@
#define VFPE_WQEALLOC1_PEQPID_M ICE_M(0x3FFFF, 0) #define VFPE_WQEALLOC1_PEQPID_M ICE_M(0x3FFFF, 0)
#define VFPE_WQEALLOC1_WQE_DESC_INDEX_S 20 #define VFPE_WQEALLOC1_WQE_DESC_INDEX_S 20
#define VFPE_WQEALLOC1_WQE_DESC_INDEX_M ICE_M(0xFFF, 20) #define VFPE_WQEALLOC1_WQE_DESC_INDEX_M ICE_M(0xFFF, 20)
#endif /* !_ICE_HW_AUTOGEN_H_ */
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,422 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018-2021, Intel Corporation. */
#ifndef _ICE_IDC_H_
#define _ICE_IDC_H_
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/dcbnl.h>
#include <linux/ptp_clock_kernel.h>
/* This major and minor version represent IDC API version information.
* During peer driver registration, peer driver specifies major and minor
* version information (via. peer_driver:ver_info). It gets checked against
* following defines and if mismatch, then peer driver registration
* fails and appropriate message gets logged.
*/
#define ICE_PEER_MAJOR_VER 7
#define ICE_PEER_MINOR_VER 1
enum ice_peer_features {
ICE_PEER_FEATURE_ADK_SUPPORT,
ICE_PEER_FEATURE_PTP_SUPPORT,
ICE_PEER_FEATURE_SRIOV_SUPPORT,
ICE_PEER_FEATURE_PCIIOV_SUPPORT,
ICE_PEER_FEATURE_NBITS
};
#define ICE_ADK_SUP 0
#define ICE_PTP_SUP BIT(ICE_PEER_FEATURE_PTP_SUPPORT)
#define ICE_SRIOV_SUP BIT(ICE_PEER_FEATURE_SRIOV_SUPPORT)
#ifdef CONFIG_PCI_IOV
#define ICE_PCIIOV_SUP BIT(ICE_PEER_FEATURE_PCIIOV_SUPPORT)
#else
#define ICE_PCIIOV_SUP 0
#endif /* CONFIG_PCI_IOV */
#define ICE_IDC_FEATURES (ICE_ADK_SUP | ICE_PTP_SUP | ICE_SRIOV_SUP |\
ICE_PCIIOV_SUP)
enum ice_event_type {
ICE_EVENT_LINK_CHANGE = 0x0,
ICE_EVENT_MTU_CHANGE,
ICE_EVENT_TC_CHANGE,
ICE_EVENT_API_CHANGE,
ICE_EVENT_MBX_CHANGE,
ICE_EVENT_CRIT_ERR,
ICE_EVENT_NBITS /* must be last */
};
enum ice_res_type {
ICE_INVAL_RES = 0x0,
ICE_VSI,
ICE_VEB,
ICE_EVENT_Q,
ICE_EGRESS_CMPL_Q,
ICE_CMPL_EVENT_Q,
ICE_ASYNC_EVENT_Q,
ICE_DOORBELL_Q,
ICE_RDMA_QSETS_TXSCHED,
};
enum ice_peer_reset_type {
ICE_PEER_PFR = 0,
ICE_PEER_CORER,
ICE_PEER_CORER_SW_CORE,
ICE_PEER_CORER_SW_FULL,
ICE_PEER_GLOBR,
};
/* reason notified to peer driver as part of event handling */
enum ice_close_reason {
ICE_REASON_INVAL = 0x0,
ICE_REASON_HW_UNRESPONSIVE,
ICE_REASON_INTERFACE_DOWN, /* Administrative down */
ICE_REASON_PEER_DRV_UNREG, /* peer driver getting unregistered */
ICE_REASON_PEER_OBJ_UNINIT,
ICE_REASON_GLOBR_REQ,
ICE_REASON_CORER_REQ,
ICE_REASON_EMPR_REQ,
ICE_REASON_PFR_REQ,
ICE_REASON_HW_RESET_PENDING,
ICE_REASON_RECOVERY_MODE,
ICE_REASON_PARAM_CHANGE,
};
enum ice_rdma_filter {
ICE_RDMA_FILTER_INVAL = 0x0,
ICE_RDMA_FILTER_IWARP,
ICE_RDMA_FILTER_ROCEV2,
ICE_RDMA_FILTER_BOTH,
};
/* This information is needed to handle peer driver registration,
* instead of adding more params to peer_drv_registration function,
* let's get it thru' peer_drv object.
*/
struct ice_ver_info {
u16 major;
u16 minor;
u64 support;
};
/* Struct to hold per DCB APP info */
struct ice_dcb_app_info {
u8 priority;
u8 selector;
u16 prot_id;
};
struct ice_peer_obj;
struct ice_peer_obj_int;
#define ICE_IDC_MAX_USER_PRIORITY 8
#define ICE_IDC_MAX_APPS 64
#define ICE_IDC_DSCP_NUM_VAL 64
/* Source timer mode */
enum ice_src_tmr_mode {
ICE_SRC_TMR_MODE_NANOSECONDS,
ICE_SRC_TMR_MODE_LOCKED,
NUM_ICE_SRC_TMR_MODE
};
/* Struct to hold per RDMA Qset info */
struct ice_rdma_qset_params {
u32 teid; /* qset TEID */
u16 qs_handle; /* RDMA driver provides this */
u16 vsi_id; /* VSI index */
u8 tc; /* TC branch the QSet should belong to */
u8 reserved[3];
};
struct ice_res_base {
/* Union for future provision e.g. other res_type */
union {
struct ice_rdma_qset_params qsets;
} res;
};
struct ice_res {
/* Type of resource. Filled by peer driver */
enum ice_res_type res_type;
/* Count requested by peer driver */
u16 cnt_req;
/* Number of resources allocated. Filled in by callee.
* Based on this value, caller to fill up "resources"
*/
u16 res_allocated;
/* Unique handle to resources allocated. Zero if call fails.
* Allocated by callee and for now used by caller for internal
* tracking purpose.
*/
u32 res_handle;
/* Peer driver has to allocate sufficient memory, to accommodate
* cnt_requested before calling this function.
* Memory has to be zero initialized. It is input/output param.
* As a result of alloc_res API, this structures will be populated.
*/
struct ice_res_base res[1];
};
struct ice_qos_info {
u64 tc_ctx;
u8 rel_bw;
u8 prio_type;
u8 egress_virt_up;
u8 ingress_virt_up;
};
#define IDC_QOS_MODE_VLAN 0x0
#define IDC_QOS_MODE_DSCP 0x1
/* Struct to hold QoS info */
struct ice_qos_params {
struct ice_qos_info tc_info[IEEE_8021QAZ_MAX_TCS];
u8 up2tc[ICE_IDC_MAX_USER_PRIORITY];
u8 vsi_relative_bw;
u8 vsi_priority_type;
u32 num_apps;
u8 pfc_mode;
u8 dscp_map[ICE_IDC_DSCP_NUM_VAL];
struct ice_dcb_app_info apps[ICE_IDC_MAX_APPS];
u8 num_tc;
};
union ice_event_info {
/* ICE_EVENT_LINK_CHANGE */
struct {
struct net_device *lwr_nd;
u16 vsi_num; /* HW index of VSI corresponding to lwr ndev */
u8 new_link_state;
u8 lport;
} link_info;
/* ICE_EVENT_MTU_CHANGE */
u16 mtu;
/* ICE_EVENT_TC_CHANGE */
struct ice_qos_params port_qos;
/* ICE_EVENT_API_CHANGE */
u8 api_rdy;
/* ICE_EVENT_MBX_CHANGE */
u8 mbx_rdy;
/* ICE_EVENT_CRIT_ERR */
u32 reg;
};
/* ice_event elements are to be passed back and forth between the ice driver
* and the peer drivers. They are to be used to both register/unregister
* for event reporting and to report an event (events can be either ice
* generated or peer generated).
*
* For (un)registering for events, the structure needs to be populated with:
* reporter - pointer to the ice_peer_obj struct of the peer (un)registering
* type - bitmap with bits set for event types to (un)register for
*
* For reporting events, the structure needs to be populated with:
* reporter - pointer to peer that generated the event (NULL for ice)
* type - bitmap with single bit set for this event type
* info - union containing data relevant to this event type
*/
struct ice_event {
struct ice_peer_obj *reporter;
DECLARE_BITMAP(type, ICE_EVENT_NBITS);
union ice_event_info info;
};
/* Following APIs are implemented by ICE driver and invoked by peer drivers */
struct ice_ops {
/* APIs to allocate resources such as VEB, VSI, Doorbell queues,
* completion queues, Tx/Rx queues, etc...
*/
int (*alloc_res)(struct ice_peer_obj *peer_obj,
struct ice_res *res,
int partial_acceptable);
int (*free_res)(struct ice_peer_obj *peer_obj,
struct ice_res *res);
int (*is_vsi_ready)(struct ice_peer_obj *peer_obj);
int (*peer_register)(struct ice_peer_obj *peer_obj);
int (*peer_unregister)(struct ice_peer_obj *peer_obj);
int (*request_reset)(struct ice_peer_obj *obj,
enum ice_peer_reset_type reset_type);
void (*notify_state_change)(struct ice_peer_obj *obj,
struct ice_event *event);
/* Notification APIs */
void (*reg_for_notification)(struct ice_peer_obj *obj,
struct ice_event *event);
void (*unreg_for_notification)(struct ice_peer_obj *obj,
struct ice_event *event);
int (*update_vsi_filter)(struct ice_peer_obj *peer_obj,
enum ice_rdma_filter filter, bool enable);
int (*vc_send)(struct ice_peer_obj *peer_obj, u32 vf_id, u8 *msg,
u16 len);
};
/* Following APIs are implemented by peer drivers and invoked by ICE driver */
struct ice_peer_ops {
void (*event_handler)(struct ice_peer_obj *peer_obj,
struct ice_event *event);
/* Why we have 'open' and when it is expected to be called:
* 1. symmetric set of API w.r.t close
* 2. To be invoked form driver initialization path
* - call peer_driver:open once ice driver is fully initialized
* 3. To be invoked upon RESET complete
*
* Calls to open are performed from ice_finish_init_peer_obj
* which is invoked from the service task. This helps keep objects
* from having their open called until the ice driver is ready and
* has scheduled its service task.
*/
int (*open)(struct ice_peer_obj *peer_obj);
/* Peer's close function is to be called when the peer needs to be
* quiesced. This can be for a variety of reasons (enumerated in the
* ice_close_reason enum struct). A call to close will only be
* followed by a call to either remove or open. No IDC calls from the
* peer should be accepted until it is re-opened.
*
* The *reason* parameter is the reason for the call to close. This
* can be for any reason enumerated in the ice_close_reason struct.
* It's primary reason is for the peer's bookkeeping and in case the
* peer want to perform any different tasks dictated by the reason.
*/
void (*close)(struct ice_peer_obj *peer_obj,
enum ice_close_reason reason);
int (*vc_receive)(struct ice_peer_obj *peer_obj, u32 vf_id, u8 *msg,
u16 len);
/* tell RDMA peer to prepare for TC change in a blocking call
* that will directly precede the change event
*/
void (*prep_tc_change)(struct ice_peer_obj *peer_obj);
};
#define ICE_PEER_RDMA_NAME "ice_rdma"
#define ICE_PEER_RDMA_ID 0x00000010
#define ICE_MAX_NUM_PEERS 4
/* The const struct that instantiates peer_obj_id needs to be initialized
* in the .c with the macro ASSIGN_PEER_INFO.
* For example:
* static const struct peer_obj_id peer_obj_ids[] = ASSIGN_PEER_INFO;
*/
struct peer_obj_id {
char *name;
int id;
};
#define IDC_RDMA_INFO { .name = ICE_PEER_RDMA_NAME, .id = ICE_PEER_RDMA_ID },
#define IDC_AE_INFO
#define IDC_IPSEC_INFO
#define IDC_SWITCH_INFO
#define IDC_ADK_INFO
/* this is a list of all possible peers, some are unused but left for clarity */
#define ASSIGN_PEER_INFO \
{ \
IDC_RDMA_INFO \
IDC_AE_INFO \
IDC_IPSEC_INFO \
IDC_SWITCH_INFO \
IDC_ADK_INFO \
}
#define ice_peer_priv(x) ((x)->peer_priv)
/* structure representing peer_object */
struct ice_peer_obj {
struct ice_ver_info ver;
struct pci_dev *pdev; /* PCI device of corresponding to main function */
/* KVA / Linear address corresponding to BAR0 of underlying
* pci_device.
*/
u8 __iomem *hw_addr;
int peer_obj_id;
int index;
/* Opaque pointer for peer specific data tracking. This memory will
* be alloc'd and freed by the peer driver and used for private data
* accessible only to the specific peer. It is stored here so that
* when this struct is passed to the peer via an IDC call, the data
* can be accessed by the peer at that time.
* The peers should only retrieve the pointer by the macro:
* ice_peer_priv(struct ice_peer_obj *)
*/
void *peer_priv;
u8 ftype; /* PF(false) or VF (true) */
/* Data VSI created by driver */
u16 pf_vsi_num;
u8 lan_addr[ETH_ALEN]; /* default MAC address of main netdev */
u16 initial_mtu; /* Initial MTU of main netdev */
struct ice_qos_params initial_qos_info;
struct net_device *netdev;
/* PCI info */
u8 ari_ena;
u16 bus_num;
u16 dev_num;
u16 fn_num;
/* Based on peer driver type, this shall point to corresponding MSIx
* entries in pf->msix_entries (which were allocated as part of driver
* initialization) e.g. for RDMA driver, msix_entries reserved will be
* num_online_cpus + 1.
*/
u16 msix_count; /* How many vectors are reserved for this device */
struct msix_entry *msix_entries;
/* Following struct contains function pointers to be initialized
* by ICE driver and called by peer driver
*/
const struct ice_ops *ops;
/* Following struct contains function pointers to be initialized
* by peer driver and called by ICE driver
*/
const struct ice_peer_ops *peer_ops;
/* Pointer to peer_drv struct to be populated by peer driver */
struct ice_peer_drv *peer_drv;
};
struct ice_peer_obj_platform_data {
struct ice_peer_obj *peer_obj;
};
/* structure representing peer driver
* Peer driver to initialize those function ptrs and
* it will be invoked by ICE as part of driver_registration
* via bus infrastructure
*/
struct ice_peer_drv {
u16 driver_id;
#define ICE_PEER_LAN_DRIVER 0
#define ICE_PEER_RDMA_DRIVER 4
#define ICE_PEER_ADK_DRIVER 5
struct ice_ver_info ver;
const char *name;
};
#endif /* _ICE_IDC_H_*/

View File

@ -4,167 +4,22 @@
#ifndef _ICE_IDC_INT_H_ #ifndef _ICE_IDC_INT_H_
#define _ICE_IDC_INT_H_ #define _ICE_IDC_INT_H_
#include "ice.h" #include "iidc.h"
#include "ice_idc.h"
#define ICE_MAX_NUM_AUX 4
enum ice_peer_obj_state { struct ice_pf;
ICE_PEER_OBJ_STATE_INIT, void ice_send_event_to_auxs(struct ice_pf *pf, struct iidc_event *event);
ICE_PEER_OBJ_STATE_PROBED, struct iidc_auxiliary_drv
ICE_PEER_OBJ_STATE_OPENING, *ice_get_auxiliary_drv(struct iidc_core_dev_info *cdev_info);
ICE_PEER_OBJ_STATE_OPENED, void ice_send_event_to_aux_no_lock(struct iidc_core_dev_info *cdev, void *data);
ICE_PEER_OBJ_STATE_PREP_RST,
ICE_PEER_OBJ_STATE_PREPPED,
ICE_PEER_OBJ_STATE_CLOSING,
ICE_PEER_OBJ_STATE_CLOSED,
ICE_PEER_OBJ_STATE_REMOVED,
ICE_PEER_OBJ_STATE_API_RDY,
ICE_PEER_OBJ_STATE_NBITS, /* must be last */
};
enum ice_peer_drv_state { void ice_cdev_info_update_vsi(struct iidc_core_dev_info *cdev_info,
ICE_PEER_DRV_STATE_MBX_RDY, struct ice_vsi *vsi);
ICE_PEER_DRV_STATE_NBITS, /* must be last */ int ice_unroll_cdev_info(struct iidc_core_dev_info *cdev_info, void *data);
}; struct iidc_core_dev_info
*ice_find_cdev_info_by_id(struct ice_pf *pf, int cdev_info_id);
struct ice_peer_drv_int { void ice_send_vf_reset_to_aux(struct iidc_core_dev_info *cdev_info, u16 vf_id);
struct ice_peer_drv *peer_drv; bool ice_is_rdma_aux_loaded(struct ice_pf *pf);
/* States associated with peer driver */
DECLARE_BITMAP(state, ICE_PEER_DRV_STATE_NBITS);
/* if this peer_obj is the originator of an event, these are the
* most recent events of each type
*/
struct ice_event current_events[ICE_EVENT_NBITS];
};
#define ICE_MAX_PEER_NAME 64
struct ice_peer_obj_int {
struct ice_peer_obj peer_obj;
struct ice_peer_drv_int *peer_drv_int; /* driver private structure */
char plat_name[ICE_MAX_PEER_NAME];
struct ice_peer_obj_platform_data plat_data;
/* if this peer_obj is the originator of an event, these are the
* most recent events of each type
*/
struct ice_event current_events[ICE_EVENT_NBITS];
/* Events a peer has registered to be notified about */
DECLARE_BITMAP(events, ICE_EVENT_NBITS);
/* States associated with peer_obj */
DECLARE_BITMAP(state, ICE_PEER_OBJ_STATE_NBITS);
struct mutex peer_obj_state_mutex; /* peer_obj state mutex */
/* per peer workqueue */
struct workqueue_struct *ice_peer_wq;
struct work_struct peer_prep_task;
struct work_struct peer_close_task;
enum ice_close_reason rst_type;
};
static inline struct
ice_peer_obj_int *peer_to_ice_obj_int(struct ice_peer_obj *peer_obj)
{
return peer_obj ? container_of(peer_obj, struct ice_peer_obj_int,
peer_obj) : NULL;
}
static inline struct
ice_peer_obj *ice_get_peer_obj(struct ice_peer_obj_int *peer_obj_int)
{
if (peer_obj_int)
return &peer_obj_int->peer_obj;
else
return NULL;
}
#if IS_ENABLED(CONFIG_MFD_CORE)
int ice_peer_update_vsi(struct ice_peer_obj_int *peer_obj_int, void *data);
int ice_close_peer_for_reset(struct ice_peer_obj_int *peer_obj_int, void *data);
int ice_unroll_peer(struct ice_peer_obj_int *peer_obj_int, void *data);
int ice_unreg_peer_obj(struct ice_peer_obj_int *peer_obj_int, void *data);
int ice_peer_close(struct ice_peer_obj_int *peer_obj_int, void *data);
int ice_peer_check_for_reg(struct ice_peer_obj_int *peer_obj_int, void *data);
int
ice_finish_init_peer_obj(struct ice_peer_obj_int *peer_obj_int, void *data);
static inline bool ice_validate_peer_obj(struct ice_peer_obj *peer_obj)
{
struct ice_peer_obj_int *peer_obj_int;
struct ice_pf *pf;
if (!peer_obj || !peer_obj->pdev)
return false;
if (!peer_obj->peer_ops)
return false;
pf = pci_get_drvdata(peer_obj->pdev);
if (!pf)
return false;
peer_obj_int = peer_to_ice_obj_int(peer_obj);
if (!peer_obj_int)
return false;
if (test_bit(ICE_PEER_OBJ_STATE_REMOVED, peer_obj_int->state) ||
test_bit(ICE_PEER_OBJ_STATE_INIT, peer_obj_int->state))
return false;
return true;
}
#else /* !CONFIG_MFD_CORE */
static inline int
ice_peer_update_vsi(struct ice_peer_obj_int *peer_obj_int, void *data)
{
return 0;
}
static inline int
ice_close_peer_for_reset(struct ice_peer_obj_int *peer_obj_int, void *data)
{
return 0;
}
static inline int
ice_unroll_peer(struct ice_peer_obj_int *peer_obj_int, void *data)
{
return 0;
}
static inline int
ice_unreg_peer_obj(struct ice_peer_obj_int *peer_obj_int, void *data)
{
return 0;
}
static inline int
ice_peer_close(struct ice_peer_obj_int *peer_obj_int, void *data)
{
return 0;
}
static inline int
ice_peer_check_for_reg(struct ice_peer_obj_int *peer_obj_int, void *data)
{
return 0;
}
static inline int
ice_finish_init_peer_obj(struct ice_peer_obj_int *peer_obj_int, void *data)
{
return 0;
}
static inline bool ice_validate_peer_obj(struct ice_peer_obj *peer)
{
return true;
}
#endif /* !CONFIG_MFD_CORE */
#endif /* !_ICE_IDC_INT_H_ */ #endif /* !_ICE_IDC_INT_H_ */

1032
drivers/thirdparty/ice/ice_ieps.c vendored Normal file

File diff suppressed because it is too large Load Diff

14
drivers/thirdparty/ice/ice_ieps.h vendored Normal file
View File

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018-2021, Intel Corporation. */
/* Intel(R) Ethernet Connection E800 Series Linux Driver IEPS extensions */
#ifndef _ICE_IEPS_H_
#define _ICE_IEPS_H_
#include "ieps_peer.h"
#include "iidc.h"
int ice_ieps_entry(struct iidc_core_dev_info *obj, void *arg);
#endif /* _ICE_IEPS_H_ */

250
drivers/thirdparty/ice/ice_imem.c vendored Normal file
View File

@ -0,0 +1,250 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018-2021, Intel Corporation. */
#include "ice_common.h"
#include "ice_parser_util.h"
#define ICE_IMEM_TABLE_SIZE 192
static void _imem_bst_bm_dump(struct ice_hw *hw, struct ice_bst_main *bm)
{
dev_info(ice_hw_to_dev(hw), "boost main:\n");
dev_info(ice_hw_to_dev(hw), "\tal0 = %d\n", bm->al0);
dev_info(ice_hw_to_dev(hw), "\tal1 = %d\n", bm->al1);
dev_info(ice_hw_to_dev(hw), "\tal2 = %d\n", bm->al2);
dev_info(ice_hw_to_dev(hw), "\tpg = %d\n", bm->pg);
}
static void _imem_bst_kb_dump(struct ice_hw *hw, struct ice_bst_keybuilder *kb)
{
dev_info(ice_hw_to_dev(hw), "boost key builder:\n");
dev_info(ice_hw_to_dev(hw), "\tpriority = %d\n", kb->priority);
dev_info(ice_hw_to_dev(hw), "\ttsr_ctrl = %d\n", kb->tsr_ctrl);
}
static void _imem_np_kb_dump(struct ice_hw *hw, struct ice_np_keybuilder *kb)
{
dev_info(ice_hw_to_dev(hw), "next proto key builder:\n");
dev_info(ice_hw_to_dev(hw), "\tops = %d\n", kb->ops);
dev_info(ice_hw_to_dev(hw), "\tstart_or_reg0 = %d\n",
kb->start_or_reg0);
dev_info(ice_hw_to_dev(hw), "\tlen_or_reg1 = %d\n", kb->len_or_reg1);
}
static void _imem_pg_kb_dump(struct ice_hw *hw, struct ice_pg_keybuilder *kb)
{
dev_info(ice_hw_to_dev(hw), "parse graph key builder:\n");
dev_info(ice_hw_to_dev(hw), "\tflag0_ena = %d\n", kb->flag0_ena);
dev_info(ice_hw_to_dev(hw), "\tflag1_ena = %d\n", kb->flag1_ena);
dev_info(ice_hw_to_dev(hw), "\tflag2_ena = %d\n", kb->flag2_ena);
dev_info(ice_hw_to_dev(hw), "\tflag3_ena = %d\n", kb->flag3_ena);
dev_info(ice_hw_to_dev(hw), "\tflag0_idx = %d\n", kb->flag0_idx);
dev_info(ice_hw_to_dev(hw), "\tflag1_idx = %d\n", kb->flag1_idx);
dev_info(ice_hw_to_dev(hw), "\tflag2_idx = %d\n", kb->flag2_idx);
dev_info(ice_hw_to_dev(hw), "\tflag3_idx = %d\n", kb->flag3_idx);
dev_info(ice_hw_to_dev(hw), "\talu_reg_idx = %d\n", kb->alu_reg_idx);
}
static void _imem_alu_dump(struct ice_hw *hw, struct ice_alu *alu, int index)
{
dev_info(ice_hw_to_dev(hw), "alu%d:\n", index);
dev_info(ice_hw_to_dev(hw), "\topc = %d\n", alu->opc);
dev_info(ice_hw_to_dev(hw), "\tsrc_start = %d\n", alu->src_start);
dev_info(ice_hw_to_dev(hw), "\tsrc_len = %d\n", alu->src_len);
dev_info(ice_hw_to_dev(hw), "\tshift_xlate_select = %d\n",
alu->shift_xlate_select);
dev_info(ice_hw_to_dev(hw), "\tshift_xlate_key = %d\n",
alu->shift_xlate_key);
dev_info(ice_hw_to_dev(hw), "\tsrc_reg_id = %d\n", alu->src_reg_id);
dev_info(ice_hw_to_dev(hw), "\tdst_reg_id = %d\n", alu->dst_reg_id);
dev_info(ice_hw_to_dev(hw), "\tinc0 = %d\n", alu->inc0);
dev_info(ice_hw_to_dev(hw), "\tinc1 = %d\n", alu->inc1);
dev_info(ice_hw_to_dev(hw), "\tproto_offset_opc = %d\n",
alu->proto_offset_opc);
dev_info(ice_hw_to_dev(hw), "\tproto_offset = %d\n",
alu->proto_offset);
dev_info(ice_hw_to_dev(hw), "\tbranch_addr = %d\n", alu->branch_addr);
dev_info(ice_hw_to_dev(hw), "\timm = %d\n", alu->imm);
dev_info(ice_hw_to_dev(hw), "\tdst_start = %d\n", alu->dst_start);
dev_info(ice_hw_to_dev(hw), "\tdst_len = %d\n", alu->dst_len);
dev_info(ice_hw_to_dev(hw), "\tflags_extr_imm = %d\n",
alu->flags_extr_imm);
dev_info(ice_hw_to_dev(hw), "\tflags_start_imm= %d\n",
alu->flags_start_imm);
}
/**
* ice_imem_dump - dump an imem item info
* @hw: pointer to the hardware structure
* @item: imem item to dump
*/
void ice_imem_dump(struct ice_hw *hw, struct ice_imem_item *item)
{
dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx);
_imem_bst_bm_dump(hw, &item->b_m);
_imem_bst_kb_dump(hw, &item->b_kb);
dev_info(ice_hw_to_dev(hw), "pg priority = %d\n", item->pg);
_imem_np_kb_dump(hw, &item->np_kb);
_imem_pg_kb_dump(hw, &item->pg_kb);
_imem_alu_dump(hw, &item->alu0, 0);
_imem_alu_dump(hw, &item->alu1, 1);
_imem_alu_dump(hw, &item->alu2, 2);
}
/** The function parses a 4 bits Boost Main with below format:
* BIT 0: ALU 0 (bm->alu0)
* BIT 1: ALU 1 (bm->alu1)
* BIT 2: ALU 2 (bm->alu2)
* BIT 3: Parge Graph (bm->pg)
*/
static void _imem_bm_init(struct ice_bst_main *bm, u8 data)
{
bm->al0 = (data & 0x1) != 0;
bm->al1 = (data & 0x2) != 0;
bm->al2 = (data & 0x4) != 0;
bm->pg = (data & 0x8) != 0;
}
/** The function parses a 10 bits Boost Main Build with below format:
* BIT 0-7: Priority (bkb->priority)
* BIT 8: TSR Control (bkb->tsr_ctrl)
* BIT 9: Reserved
*/
static void _imem_bkb_init(struct ice_bst_keybuilder *bkb, u16 data)
{
bkb->priority = (u8)(data & 0xff);
bkb->tsr_ctrl = (data & 0x100) != 0;
}
/** The function parses a 18 bits Next Protocol Key Build with below format:
* BIT 0-1: Opcode kb->ops
* BIT 2-9: Start / Reg 0 (kb->start_or_reg0)
* BIT 10-17: Length / Reg 1 (kb->len_or_reg1)
*/
static void _imem_npkb_init(struct ice_np_keybuilder *kb, u32 data)
{
kb->ops = (u8)(data & 0x3);
kb->start_or_reg0 = (u8)((data >> 2) & 0xff);
kb->len_or_reg1 = (u8)((data >> 10) & 0xff);
}
/** The function parses a 35 bits Parse Graph Key Build with below format:
* BIT 0: Flag 0 Enable (kb->flag0_ena)
* BIT 1-6: Flag 0 Index (kb->flag0_idx)
* BIT 7: Flag 1 Enable (kb->flag1_ena)
* BIT 8-13: Flag 1 Index (kb->flag1_idx)
* BIT 14: Flag 2 Enable (kb->flag2_ena)
* BIT 15-20: Flag 2 Index (kb->flag2_idx)
* BIT 21: Flag 3 Enable (kb->flag3_ena)
* BIT 22-27: Flag 3 Index (kb->flag3_idx)
* BIT 28-34: ALU Register Index (kb->alu_reg_idx)
*/
static void _imem_pgkb_init(struct ice_pg_keybuilder *kb, u64 data)
{
kb->flag0_ena = (data & 0x1) != 0;
kb->flag0_idx = (u8)((data >> 1) & 0x3f);
kb->flag1_ena = ((data >> 7) & 0x1) != 0;
kb->flag1_idx = (u8)((data >> 8) & 0x3f);
kb->flag2_ena = ((data >> 14) & 0x1) != 0;
kb->flag2_idx = (u8)((data >> 15) & 0x3f);
kb->flag3_ena = ((data >> 21) & 0x1) != 0;
kb->flag3_idx = (u8)((data >> 22) & 0x3f);
kb->alu_reg_idx = (u8)((data >> 28) & 0x7f);
}
/** The function parses a 96 bits ALU entry with below format:
* BIT 0-5: Opcode (alu->opc)
* BIT 6-13: Source Start (alu->src_start)
* BIT 14-18: Source Length (alu->src_len)
* BIT 19: Shift/Xlate Select (alu->shift_xlate_select)
* BIT 20-23: Shift/Xlate Key (alu->shift_xlate_key)
* BIT 24-30: Source Register ID (alu->src_reg_id)
* BIT 31-37: Dest. Register ID (alu->dst_reg_id)
* BIT 38: Inc0 (alu->inc0)
* BIT 39: Inc1:(alu->inc1)
* BIT 40:41 Protocol Offset Opcode (alu->proto_offset_opc)
* BIT 42:49 Protocol Offset (alu->proto_offset)
* BIT 50:57 Branch Address (alu->branch_addr)
* BIT 58:73 Immediate (alu->imm)
* BIT 74 Dedicated Flags Enable (alu->dedicate_flags_ena)
* BIT 75:80 Dest. Start (alu->dst_start)
* BIT 81:86 Dest. Length (alu->dst_len)
* BIT 87 Flags Extract Imm. (alu->flags_extr_imm)
* BIT 88:95 Flags Start/Immediate (alu->flags_start_imm)
*
* NOTE: the first 5 bits are skipped as the start bit is not
* byte aligned.
*/
static void _imem_alu_init(struct ice_alu *alu, u8 *data)
{
u64 d64 = *(u64 *)data >> 5;
alu->opc = (enum ice_alu_opcode)(d64 & 0x3f);
alu->src_start = (u8)((d64 >> 6) & 0xff);
alu->src_len = (u8)((d64 >> 14) & 0x1f);
alu->shift_xlate_select = ((d64 >> 19) & 0x1) != 0;
alu->shift_xlate_key = (u8)((d64 >> 20) & 0xf);
alu->src_reg_id = (u8)((d64 >> 24) & 0x7f);
alu->dst_reg_id = (u8)((d64 >> 31) & 0x7f);
alu->inc0 = ((d64 >> 38) & 0x1) != 0;
alu->inc1 = ((d64 >> 39) & 0x1) != 0;
alu->proto_offset_opc = (u8)((d64 >> 40) & 0x3);
alu->proto_offset = (u8)((d64 >> 42) & 0xff);
alu->branch_addr = (u8)((d64 >> 50) & 0xff);
d64 = *(u64 *)(&data[7]) >> 7;
alu->imm = (u16)(d64 & 0xffff);
alu->dedicate_flags_ena = ((d64 >> 16) & 0x1) != 0;
alu->dst_start = (u8)((d64 >> 17) & 0x3f);
alu->dst_len = (u8)((d64 >> 23) & 0x3f);
alu->flags_extr_imm = ((d64 >> 29) & 0x1) != 0;
alu->flags_start_imm = (u8)((d64 >> 30) & 0xff);
}
/** The function parses a 384 bits IMEM entry with below format:
* BIT 0-3: Boost Main (ii->b_m)
* BIT 4-13: Boost Key Build (ii->b_kb)
* BIT 14-15: PG Priority (ii->pg)
* BIT 16-33: Next Proto Key Build (ii->np_kb)
* BIT 34-68: PG Key Build (ii->pg_kb)
* BIT 69-164: ALU0 (ii->alu0)
* BIT 165-260:ALU1 (ii->alu1)
* BIT 261-356:ALU2 (ii->alu2)
* BIT 357-383:Reserved
*/
static void _imem_parse_item(struct ice_hw *hw, u16 idx, void *item,
void *data, int size)
{
struct ice_imem_item *ii = (struct ice_imem_item *)item;
u8 *buf = (u8 *)data;
ii->idx = idx;
_imem_bm_init(&ii->b_m, buf[0]);
_imem_bkb_init(&ii->b_kb, *((u16 *)(&buf[0])) >> 4);
ii->pg = (u8)((buf[1] & 0xc0) >> 6);
_imem_npkb_init(&ii->np_kb, *((u32 *)(&buf[2])));
_imem_pgkb_init(&ii->pg_kb, *((u64 *)(&buf[2])) >> 18);
_imem_alu_init(&ii->alu0, &buf[8]);
_imem_alu_init(&ii->alu1, &buf[20]);
_imem_alu_init(&ii->alu2, &buf[32]);
if (hw->debug_mask & ICE_DBG_PARSER)
ice_imem_dump(hw, ii);
}
/**
* ice_imem_table_get - create an imem table
* @hw: pointer to the hardware structure
*/
struct ice_imem_item *ice_imem_table_get(struct ice_hw *hw)
{
return (struct ice_imem_item *)
ice_parser_create_table(hw, ICE_SID_RXPARSER_IMEM,
sizeof(struct ice_imem_item),
ICE_IMEM_TABLE_SIZE,
ice_parser_sect_item_get,
_imem_parse_item, false);
}

108
drivers/thirdparty/ice/ice_imem.h vendored Normal file
View File

@ -0,0 +1,108 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018-2021, Intel Corporation. */
#ifndef _ICE_IMEM_H_
#define _ICE_IMEM_H_
struct ice_bst_main {
bool al0;
bool al1;
bool al2;
bool pg;
};
struct ice_bst_keybuilder {
u8 priority;
bool tsr_ctrl;
};
struct ice_np_keybuilder {
u8 ops;
u8 start_or_reg0;
u8 len_or_reg1;
};
struct ice_pg_keybuilder {
bool flag0_ena;
bool flag1_ena;
bool flag2_ena;
bool flag3_ena;
u8 flag0_idx;
u8 flag1_idx;
u8 flag2_idx;
u8 flag3_idx;
u8 alu_reg_idx;
};
enum ice_alu_opcode {
ICE_ALU_PARK = 0,
ICE_ALU_MOV_ADD = 1,
ICE_ALU_ADD = 2,
ICE_ALU_MOV_AND = 4,
ICE_ALU_AND = 5,
ICE_ALU_AND_IMM = 6,
ICE_ALU_MOV_OR = 7,
ICE_ALU_OR = 8,
ICE_ALU_MOV_XOR = 9,
ICE_ALU_XOR = 10,
ICE_ALU_NOP = 11,
ICE_ALU_BR = 12,
ICE_ALU_BREQ = 13,
ICE_ALU_BRNEQ = 14,
ICE_ALU_BRGT = 15,
ICE_ALU_BRLT = 16,
ICE_ALU_BRGEQ = 17,
ICE_ALU_BRLEG = 18,
ICE_ALU_SETEQ = 19,
ICE_ALU_ANDEQ = 20,
ICE_ALU_OREQ = 21,
ICE_ALU_SETNEQ = 22,
ICE_ALU_ANDNEQ = 23,
ICE_ALU_ORNEQ = 24,
ICE_ALU_SETGT = 25,
ICE_ALU_ANDGT = 26,
ICE_ALU_ORGT = 27,
ICE_ALU_SETLT = 28,
ICE_ALU_ANDLT = 29,
ICE_ALU_ORLT = 30,
ICE_ALU_MOV_SUB = 31,
ICE_ALU_SUB = 32,
ICE_ALU_INVALID = 64,
};
struct ice_alu {
enum ice_alu_opcode opc;
u8 src_start;
u8 src_len;
bool shift_xlate_select;
u8 shift_xlate_key;
u8 src_reg_id;
u8 dst_reg_id;
bool inc0;
bool inc1;
u8 proto_offset_opc;
u8 proto_offset;
u8 branch_addr;
u16 imm;
bool dedicate_flags_ena;
u8 dst_start;
u8 dst_len;
bool flags_extr_imm;
u8 flags_start_imm;
};
struct ice_imem_item {
u16 idx;
struct ice_bst_main b_m;
struct ice_bst_keybuilder b_kb;
u8 pg;
struct ice_np_keybuilder np_kb;
struct ice_pg_keybuilder pg_kb;
struct ice_alu alu0;
struct ice_alu alu1;
struct ice_alu alu2;
};
void ice_imem_dump(struct ice_hw *hw, struct ice_imem_item *item);
struct ice_imem_item *ice_imem_table_get(struct ice_hw *hw);
#endif /* _ICE_IMEM_H_ */

377
drivers/thirdparty/ice/ice_irq.c vendored Normal file
View File

@ -0,0 +1,377 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018-2021, Intel Corporation. */
#include "ice.h"
#include "ice_lib.h"
#include "ice_irq.h"
#ifdef HAVE_PCI_ALLOC_IRQ
static int ice_alloc_and_fill_msix_entries(struct ice_pf *pf, int nvec)
{
int i;
pf->msix_entries = kcalloc(nvec, sizeof(*pf->msix_entries),
GFP_KERNEL);
if (!pf->msix_entries)
return -ENOMEM;
for (i = 0; i < nvec; i++) {
pf->msix_entries[i].entry = i;
pf->msix_entries[i].vector = ice_get_irq_num(pf, i);
}
return 0;
}
#endif /* HAVE_PCI_ALLOC_IRQ */
#ifndef HAVE_PCI_ALLOC_IRQ
static int ice_alloc_msix_entries(struct ice_pf *pf, u16 num_entries)
{
u16 i;
pf->msix_entries = devm_kcalloc(ice_pf_to_dev(pf), num_entries,
sizeof(*pf->msix_entries), GFP_KERNEL);
if (!pf->msix_entries)
return -ENOMEM;
for (i = 0; i < num_entries; i++)
pf->msix_entries[i].entry = i;
return 0;
}
static void ice_free_msix_entries(struct ice_pf *pf)
{
devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
pf->msix_entries = NULL;
}
#endif /* HAVE_PCI_ALLOC_IRQ */
static void ice_dis_msix(struct ice_pf *pf)
{
#ifdef HAVE_PCI_ALLOC_IRQ
pci_free_irq_vectors(pf->pdev);
#else
ice_free_msix_entries(pf);
pci_disable_msix(pf->pdev);
#endif /* HAVE_PCI_ALLOC_IRQ */
}
static int ice_ena_msix(struct ice_pf *pf, int nvec)
{
#ifdef HAVE_PCI_ALLOC_IRQ
return pci_alloc_irq_vectors(pf->pdev, ICE_MIN_MSIX, nvec,
PCI_IRQ_MSIX);
#else
int vectors;
int err;
err = ice_alloc_msix_entries(pf, nvec);
if (err)
return err;
vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
ICE_MIN_MSIX, nvec);
if (vectors < 0)
ice_free_msix_entries(pf);
return vectors;
#endif /* HAVE_PCI_ALLOC_IRQ */
}
static void ice_adj_vec_clear(int *src, int size)
{
int i;
for (i = 0; i < size; i++)
src[i] = 0;
}
static void ice_adj_vec_sum(int *dst, int *src, int size)
{
int i;
for (i = 0; i < size; i++)
dst[i] += src[i];
}
/*
* Allow 256 queue pairs for ADQ only if the PF has at least
* 1024 msix vectors (1 or 2 port NIC).
*/
static int ice_adq_max_qps(struct ice_pf *pf)
{
if (pf->hw.func_caps.common_cap.num_msix_vectors >= 1024)
return ICE_ADQ_MAX_QPS;
return num_online_cpus();
}
/**
* ice_ena_msix_range - request a range of MSI-X vectors from the OS
* @pf: board private structure
*
* The driver tries to enable best-case scenario MSI-X vectors. If that doesn't
* succeed than adjust to irqs number returned by kernel.
*
* The fall-back logic is described below with each [#] represented needed irqs
* number for the step. If any of the steps is lower than received number, then
* return the number of MSI-X. If any of the steps is greater, then check next
* one. If received value is lower than irqs value in last step return error.
*
* Step [0]: Enable the best-case scenario MSI-X vectors.
*
* Step [1]: Enable MSI-X vectors with eswitch support disabled
*
* Step [2]: Enable MSI-X vectors with the number of vectors reserved for
* MACVLAN and Scalable IOV support reduced by a factor of 2.
*
* Step [3]: Enable MSI-X vectors with the number of vectors reserved for
* MACVLAN and Scalable IOV support reduced by a factor of 4.
*
* Step [4]: Enable MSI-X vectors with MACVLAN and Scalable IOV support
* disabled.
*
* Step [5]: Enable MSI-X vectors with the number of pf->num_lan_msix reduced
* by a factor of 2 from the previous step (i.e. num_online_cpus() / 2).
* Also, with the number of pf->num_rdma_msix reduced by a factor of ~2 from the
* previous step (i.e. num_online_cpus() / 2 + ICE_RDMA_NUM_AEQ_MSIX).
*
* Step [6]: Same as step [3], except reduce both by a factor of 4.
*
* Step [7]: Enable the bare-minimum MSI-X vectors.
*
* Each feature has separate table with needed irqs in each step. Sum of these
* tables is tracked in adj_vec to show needed irqs in each step. Separate
* tables are later use to set correct number of irqs for each feature based on
* choosed step.
*/
static int ice_ena_msix_range(struct ice_pf *pf)
{
#define ICE_ADJ_VEC_STEPS 8
#define ICE_ADJ_VEC_WORST_CASE 0
#define ICE_ADJ_VEC_BEST_CASE (ICE_ADJ_VEC_STEPS - 1)
int num_cpus = num_online_cpus();
int rdma_adj_vec[ICE_ADJ_VEC_STEPS] = {
ICE_MIN_RDMA_MSIX,
num_cpus / 4 > ICE_MIN_RDMA_MSIX ?
num_cpus / 4 + ICE_RDMA_NUM_AEQ_MSIX :
ICE_MIN_RDMA_MSIX,
num_cpus / 2 > ICE_MIN_RDMA_MSIX ?
num_cpus / 2 + ICE_RDMA_NUM_AEQ_MSIX :
ICE_MIN_RDMA_MSIX,
num_cpus > ICE_MIN_RDMA_MSIX ?
num_cpus + ICE_RDMA_NUM_AEQ_MSIX : ICE_MIN_RDMA_MSIX,
num_cpus > ICE_MIN_RDMA_MSIX ?
num_cpus + ICE_RDMA_NUM_AEQ_MSIX : ICE_MIN_RDMA_MSIX,
num_cpus > ICE_MIN_RDMA_MSIX ?
num_cpus + ICE_RDMA_NUM_AEQ_MSIX : ICE_MIN_RDMA_MSIX,
num_cpus > ICE_MIN_RDMA_MSIX ?
num_cpus + ICE_RDMA_NUM_AEQ_MSIX : ICE_MIN_RDMA_MSIX,
num_cpus > ICE_MIN_RDMA_MSIX ?
num_cpus + ICE_RDMA_NUM_AEQ_MSIX : ICE_MIN_RDMA_MSIX,
};
int lan_adj_vec[ICE_ADJ_VEC_STEPS] = {
ICE_MIN_LAN_MSIX,
max_t(int, num_cpus / 4, ICE_MIN_LAN_MSIX),
max_t(int, num_cpus / 2, ICE_MIN_LAN_MSIX),
max_t(int, num_cpus, ICE_MIN_LAN_MSIX),
max_t(int, num_cpus, ICE_MIN_LAN_MSIX),
max_t(int, num_cpus, ICE_MIN_LAN_MSIX),
max_t(int, num_cpus, ICE_MIN_LAN_MSIX),
max_t(int, ice_adq_max_qps(pf), ICE_MIN_LAN_MSIX),
};
int fdir_adj_vec[ICE_ADJ_VEC_STEPS] = {
ICE_FDIR_MSIX, ICE_FDIR_MSIX, ICE_FDIR_MSIX,
ICE_FDIR_MSIX, ICE_FDIR_MSIX, ICE_FDIR_MSIX,
ICE_FDIR_MSIX, ICE_FDIR_MSIX,
};
int adj_vec[ICE_ADJ_VEC_STEPS] = {
ICE_OICR_MSIX, ICE_OICR_MSIX, ICE_OICR_MSIX,
ICE_OICR_MSIX, ICE_OICR_MSIX, ICE_OICR_MSIX,
ICE_OICR_MSIX, ICE_OICR_MSIX,
};
#ifdef HAVE_NDO_DFWD_OPS
int macvlan_adj_vec[ICE_ADJ_VEC_STEPS] = {
0, 0, 0, 0,
(ICE_MAX_MACVLANS * ICE_DFLT_VEC_VMDQ_VSI) / 4,
(ICE_MAX_MACVLANS * ICE_DFLT_VEC_VMDQ_VSI) / 2,
ICE_MAX_MACVLANS * ICE_DFLT_VEC_VMDQ_VSI,
ICE_MAX_MACVLANS * ICE_DFLT_VEC_VMDQ_VSI,
};
#endif /* OFFLOAD_MACVLAN_SUPPORT */
int eswitch_adj_vec[ICE_ADJ_VEC_STEPS] = {
0, 0, 0, 0, 0, 0, 0,
ICE_ESWITCH_MSIX,
};
int scalable_adj_vec[ICE_ADJ_VEC_STEPS] = {
0, 0, 0, 0,
(ICE_MAX_SCALABLE * ICE_NUM_VF_MSIX_SMALL) / 4,
(ICE_MAX_SCALABLE * ICE_NUM_VF_MSIX_SMALL) / 2,
ICE_MAX_SCALABLE * ICE_NUM_VF_MSIX_SMALL,
ICE_MAX_SCALABLE * ICE_NUM_VF_MSIX_SMALL,
};
struct device *dev = ice_pf_to_dev(pf);
int adj_step = ICE_ADJ_VEC_BEST_CASE;
int err = -ENOSPC;
int v_actual, i;
int needed = 0;
needed += ICE_OICR_MSIX;
needed += lan_adj_vec[ICE_ADJ_VEC_BEST_CASE];
ice_adj_vec_sum(adj_vec, lan_adj_vec, ICE_ADJ_VEC_STEPS);
if (test_bit(ICE_FLAG_ESWITCH_CAPABLE, pf->flags)) {
needed += eswitch_adj_vec[ICE_ADJ_VEC_BEST_CASE];
ice_adj_vec_sum(adj_vec, eswitch_adj_vec, ICE_ADJ_VEC_STEPS);
} else {
ice_adj_vec_clear(eswitch_adj_vec, ICE_ADJ_VEC_STEPS);
}
#ifdef HAVE_NDO_DFWD_OPS
if (test_bit(ICE_FLAG_VMDQ_ENA, pf->flags)) {
needed += macvlan_adj_vec[ICE_ADJ_VEC_BEST_CASE];
ice_adj_vec_sum(adj_vec, macvlan_adj_vec, ICE_ADJ_VEC_STEPS);
} else {
ice_adj_vec_clear(macvlan_adj_vec, ICE_ADJ_VEC_STEPS);
}
#endif /* OFFLOAD_MACVLAN_SUPPORT */
if (ice_chk_rdma_cap(pf)) {
needed += rdma_adj_vec[ICE_ADJ_VEC_BEST_CASE];
ice_adj_vec_sum(adj_vec, rdma_adj_vec, ICE_ADJ_VEC_STEPS);
} else {
ice_adj_vec_clear(rdma_adj_vec, ICE_ADJ_VEC_STEPS);
}
if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
needed += fdir_adj_vec[ICE_ADJ_VEC_BEST_CASE];
ice_adj_vec_sum(adj_vec, fdir_adj_vec, ICE_ADJ_VEC_STEPS);
} else {
ice_adj_vec_clear(fdir_adj_vec, ICE_ADJ_VEC_STEPS);
}
if (test_bit(ICE_FLAG_SIOV_CAPABLE, pf->flags)) {
needed += scalable_adj_vec[ICE_ADJ_VEC_BEST_CASE];
ice_adj_vec_sum(adj_vec, scalable_adj_vec, ICE_ADJ_VEC_STEPS);
} else {
ice_adj_vec_clear(scalable_adj_vec, ICE_ADJ_VEC_STEPS);
}
v_actual = ice_ena_msix(pf, needed);
if (v_actual < 0) {
err = v_actual;
goto err;
} else if (v_actual < adj_vec[ICE_ADJ_VEC_WORST_CASE]) {
ice_dis_msix(pf);
goto err;
}
for (i = ICE_ADJ_VEC_WORST_CASE + 1; i < ICE_ADJ_VEC_STEPS; i++) {
if (v_actual < adj_vec[i]) {
adj_step = i - 1;
break;
}
}
pf->num_lan_msix = lan_adj_vec[adj_step];
pf->num_rdma_msix = rdma_adj_vec[adj_step];
if (test_bit(ICE_FLAG_ESWITCH_CAPABLE, pf->flags) &&
!eswitch_adj_vec[adj_step]) {
dev_warn(dev, "Not enough MSI-X for eswitch support, disabling feature\n");
clear_bit(ICE_FLAG_ESWITCH_CAPABLE, pf->flags);
}
#ifdef HAVE_NDO_DFWD_OPS
if (test_bit(ICE_FLAG_VMDQ_ENA, pf->flags) &&
!macvlan_adj_vec[adj_step]) {
dev_warn(dev, "Not enough MSI-X for hardware MACVLAN support, disabling feature\n");
clear_bit(ICE_FLAG_VMDQ_ENA, pf->flags);
}
#endif /* OFFLOAD_MACVLAN_SUPPORT */
pf->max_adq_qps = lan_adj_vec[adj_step];
if (test_bit(ICE_FLAG_SIOV_CAPABLE, pf->flags) &&
!scalable_adj_vec[adj_step]) {
dev_warn(dev, "Not enough MSI-X for Scalable IOV support, disabling feature\n");
clear_bit(ICE_FLAG_SIOV_CAPABLE, pf->flags);
}
return v_actual;
err:
dev_err(dev, "Failed to enable MSI-X vectors\n");
return err;
}
/**
* ice_init_interrupt_scheme - Determine proper interrupt scheme
* @pf: board private structure to initialize
*/
int ice_init_interrupt_scheme(struct ice_pf *pf)
{
int vectors = ice_ena_msix_range(pf);
if (vectors < 0)
return vectors;
/* pf->msix_entries is used in idc and needs to be filled on kernel
* with new irq alloc API
*/
#ifdef HAVE_PCI_ALLOC_IRQ
if (ice_alloc_and_fill_msix_entries(pf, vectors)) {
ice_dis_msix(pf);
return -ENOMEM;
}
#endif /* HAVE_PCI_ALLOC_IRQ */
/* set up vector assignment tracking */
pf->irq_tracker =
devm_kzalloc(ice_pf_to_dev(pf),
struct_size(pf->irq_tracker, list, vectors),
GFP_KERNEL);
if (!pf->irq_tracker) {
ice_dis_msix(pf);
return -ENOMEM;
}
/* populate SW interrupts pool with number of OS granted IRQs. */
pf->num_avail_sw_msix = (u16)vectors;
pf->irq_tracker->num_entries = (u16)vectors;
pf->irq_tracker->end = pf->irq_tracker->num_entries;
return 0;
}
/**
* ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
* @pf: board private structure
*/
void ice_clear_interrupt_scheme(struct ice_pf *pf)
{
#ifdef HAVE_PCI_ALLOC_IRQ
kfree(pf->msix_entries);
pf->msix_entries = NULL;
#endif /* PEER_SUPPORT */
ice_dis_msix(pf);
if (pf->irq_tracker) {
devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
pf->irq_tracker = NULL;
}
}
/**
* ice_get_irq_num - get system irq number based on index from driver
* @pf: board private structure
* @idx: driver irq index
*/
int ice_get_irq_num(struct ice_pf *pf, int idx)
{
#ifdef HAVE_PCI_ALLOC_IRQ
return pci_irq_vector(pf->pdev, idx);
#else
if (!pf->msix_entries)
return -EINVAL;
return pf->msix_entries[idx].vector;
#endif /* HAVE_PCI_ALLOC_IRQ */
}

12
drivers/thirdparty/ice/ice_irq.h vendored Normal file
View File

@ -0,0 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018-2021, Intel Corporation. */
#ifndef _ICE_IRQ_H_
#define _ICE_IRQ_H_
int ice_init_interrupt_scheme(struct ice_pf *pf);
void ice_clear_interrupt_scheme(struct ice_pf *pf);
int ice_get_irq_num(struct ice_pf *pf, int idx);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -6,6 +6,10 @@
#ifdef HAVE_NETDEV_UPPER_INFO #ifdef HAVE_NETDEV_UPPER_INFO
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include "ice.h"
#define ICE_LAG_INVALID_PORT 0xFF
#define ICE_LAG_SINGLE_FILTER_SIZE 0xC
/* LAG roles for netdev */ /* LAG roles for netdev */
enum ice_lag_role { enum ice_lag_role {
@ -17,24 +21,101 @@ enum ice_lag_role {
struct ice_pf; struct ice_pf;
struct ice_lag_netdev_list {
struct list_head node;
struct net_device *netdev;
};
/* LAG info struct */ /* LAG info struct */
struct ice_lag { struct ice_lag {
struct ice_pf *pf; /* backlink to PF struct */ struct ice_pf *pf; /* backlink to PF struct */
struct iidc_rdma_qset_params rdma_qset[IEEE_8021QAZ_MAX_TCS];
struct ice_vsi *rdma_vsi;
struct net_device *netdev; /* this PF's netdev */ struct net_device *netdev; /* this PF's netdev */
struct net_device *peer_netdev;
struct net_device *upper_netdev; /* upper bonding netdev */ struct net_device *upper_netdev; /* upper bonding netdev */
struct list_head *netdev_head;
struct notifier_block notif_block; struct notifier_block notif_block;
int bond_id; /* identify which bond we are in */
s32 bond_mode;
u8 bonded:1; /* currently bonded */ u8 bonded:1; /* currently bonded */
u8 master:1; /* this is a master */ u8 primary:1; /* this is primary */
u8 handler:1; /* did we register a rx_netdev_handler */ u8 handler:1; /* did we register a rx_netdev_handler */
/* each thing blocking bonding will increment this value by one. /* each thing blocking bonding will increment this value by one.
* If this value is zero, then bonding is allowed. * If this value is zero, then bonding is allowed.
*/ */
u16 dis_lag; u16 dis_lag;
u8 role; u8 role;
struct ice_rule_query_data fltr;
u16 action_idx;
};
/* LAG workqueue struct */
struct ice_lag_work {
struct work_struct lag_task;
struct ice_lag_netdev_list netdev_list;
struct ice_lag *lag;
unsigned long event;
struct net_device *event_netdev;
union {
struct netdev_notifier_changeupper_info changeupper_info;
struct netdev_notifier_bonding_info bonding_info;
} info;
}; };
int ice_init_lag(struct ice_pf *pf); int ice_init_lag(struct ice_pf *pf);
int ice_lag_move_node_sync(struct ice_hw *old_hw, struct ice_hw *new_hw,
struct ice_vsi *new_vsi,
struct iidc_rdma_qset_params *qset);
void ice_deinit_lag(struct ice_pf *pf); void ice_deinit_lag(struct ice_pf *pf);
struct ice_lag *ice_lag_find_primary(struct ice_lag *lag);
rx_handler_result_t ice_lag_nop_handler(struct sk_buff **pskb);
/**
* ice_disable_lag - increment LAG disable count
* @lag: LAG struct
*/
static inline void ice_disable_lag(struct ice_lag *lag)
{
/* If LAG this PF is not already disabled, disable it */
rtnl_lock();
if (!netdev_is_rx_handler_busy(lag->netdev)) {
if (!netdev_rx_handler_register(lag->netdev,
ice_lag_nop_handler,
NULL))
lag->handler = true;
}
rtnl_unlock();
lag->dis_lag++;
}
/**
* ice_enable_lag - decrement disable count for a PF
* @lag: LAG struct
*
* Decrement the disable counter for a port, and if that count reaches
* zero, then remove the no-op Rx handler from that netdev
*/
static inline void ice_enable_lag(struct ice_lag *lag)
{
if (lag->dis_lag)
lag->dis_lag--;
if (!lag->dis_lag && lag->handler) {
rtnl_lock();
netdev_rx_handler_unregister(lag->netdev);
rtnl_unlock();
lag->handler = false;
}
}
/**
* ice_is_lag_dis - is LAG disabled
* @lag: LAG struct
*
* Return true if bonding is disabled
*/
static inline bool ice_is_lag_dis(struct ice_lag *lag)
{
return !!(lag->dis_lag);
}
#endif /* HAVE_NETDEV_UPPER_INFO */ #endif /* HAVE_NETDEV_UPPER_INFO */
#endif /* _ICE_LAG_H_ */ #endif /* _ICE_LAG_H_ */

View File

@ -218,7 +218,6 @@ struct ice_fltr_desc {
(0x1ULL << ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S) (0x1ULL << ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S)
#define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES 0x1ULL #define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES 0x1ULL
enum ice_rx_desc_status_bits { enum ice_rx_desc_status_bits {
/* Note: These are predefined bit offsets */ /* Note: These are predefined bit offsets */
ICE_RX_DESC_STATUS_DD_S = 0, ICE_RX_DESC_STATUS_DD_S = 0,
@ -249,7 +248,6 @@ enum ice_rx_desc_status_bits {
#define ICE_RXD_QW1_STATUS_TSYNVALID_S ICE_RX_DESC_STATUS_TSYNVALID_S #define ICE_RXD_QW1_STATUS_TSYNVALID_S ICE_RX_DESC_STATUS_TSYNVALID_S
#define ICE_RXD_QW1_STATUS_TSYNVALID_M BIT_ULL(ICE_RXD_QW1_STATUS_TSYNVALID_S) #define ICE_RXD_QW1_STATUS_TSYNVALID_M BIT_ULL(ICE_RXD_QW1_STATUS_TSYNVALID_S)
enum ice_rx_desc_fltstat_values { enum ice_rx_desc_fltstat_values {
ICE_RX_DESC_FLTSTAT_NO_DATA = 0, ICE_RX_DESC_FLTSTAT_NO_DATA = 0,
ICE_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */ ICE_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
@ -257,7 +255,6 @@ enum ice_rx_desc_fltstat_values {
ICE_RX_DESC_FLTSTAT_RSS_HASH = 3, ICE_RX_DESC_FLTSTAT_RSS_HASH = 3,
}; };
#define ICE_RXD_QW1_ERROR_S 19 #define ICE_RXD_QW1_ERROR_S 19
#define ICE_RXD_QW1_ERROR_M (0xFFUL << ICE_RXD_QW1_ERROR_S) #define ICE_RXD_QW1_ERROR_M (0xFFUL << ICE_RXD_QW1_ERROR_S)
@ -356,7 +353,6 @@ enum ice_rx_ptype_payload_layer {
ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
}; };
#define ICE_RXD_QW1_LEN_PBUF_S 38 #define ICE_RXD_QW1_LEN_PBUF_S 38
#define ICE_RXD_QW1_LEN_PBUF_M (0x3FFFULL << ICE_RXD_QW1_LEN_PBUF_S) #define ICE_RXD_QW1_LEN_PBUF_M (0x3FFFULL << ICE_RXD_QW1_LEN_PBUF_S)
@ -366,7 +362,6 @@ enum ice_rx_ptype_payload_layer {
#define ICE_RXD_QW1_LEN_SPH_S 63 #define ICE_RXD_QW1_LEN_SPH_S 63
#define ICE_RXD_QW1_LEN_SPH_M BIT_ULL(ICE_RXD_QW1_LEN_SPH_S) #define ICE_RXD_QW1_LEN_SPH_M BIT_ULL(ICE_RXD_QW1_LEN_SPH_S)
enum ice_rx_desc_ext_status_bits { enum ice_rx_desc_ext_status_bits {
/* Note: These are predefined bit offsets */ /* Note: These are predefined bit offsets */
ICE_RX_DESC_EXT_STATUS_L2TAG2P_S = 0, ICE_RX_DESC_EXT_STATUS_L2TAG2P_S = 0,
@ -377,7 +372,6 @@ enum ice_rx_desc_ext_status_bits {
ICE_RX_DESC_EXT_STATUS_PELONGB_S = 11, ICE_RX_DESC_EXT_STATUS_PELONGB_S = 11,
}; };
enum ice_rx_desc_pe_status_bits { enum ice_rx_desc_pe_status_bits {
/* Note: These are predefined bit offsets */ /* Note: These are predefined bit offsets */
ICE_RX_DESC_PE_STATUS_QPID_S = 0, /* 18 BITS */ ICE_RX_DESC_PE_STATUS_QPID_S = 0, /* 18 BITS */
@ -398,7 +392,6 @@ enum ice_rx_desc_pe_status_bits {
#define ICE_RX_PROG_STATUS_DESC_QW1_PROGID_M \ #define ICE_RX_PROG_STATUS_DESC_QW1_PROGID_M \
(0x7UL << ICE_RX_PROG_STATUS_DESC_QW1_PROGID_S) (0x7UL << ICE_RX_PROG_STATUS_DESC_QW1_PROGID_S)
#define ICE_RX_PROG_STATUS_DESC_QW1_ERROR_S 19 #define ICE_RX_PROG_STATUS_DESC_QW1_ERROR_S 19
#define ICE_RX_PROG_STATUS_DESC_QW1_ERROR_M \ #define ICE_RX_PROG_STATUS_DESC_QW1_ERROR_M \
(0x3FUL << ICE_RX_PROG_STATUS_DESC_QW1_ERROR_S) (0x3FUL << ICE_RX_PROG_STATUS_DESC_QW1_ERROR_S)
@ -642,7 +635,6 @@ struct ice_32b_rx_flex_desc_nic_2 {
} flex_ts; } flex_ts;
}; };
/* Receive Flex Descriptor profile IDs: There are a total /* Receive Flex Descriptor profile IDs: There are a total
* of 64 profiles where profile IDs 0/1 are for legacy; and * of 64 profiles where profile IDs 0/1 are for legacy; and
* profiles 2-63 are flex profiles that can be programmed * profiles 2-63 are flex profiles that can be programmed
@ -820,6 +812,14 @@ enum ice_rx_flex_desc_exstat_bits {
ICE_RX_FLEX_DESC_EXSTAT_OVERSIZE_S = 3, ICE_RX_FLEX_DESC_EXSTAT_OVERSIZE_S = 3,
}; };
/*
* For ice_32b_rx_flex_desc.ts_low:
* [0]: Timestamp-low validity bit
* [1:7]: Timestamp-low value
*/
#define ICE_RX_FLEX_DESC_TS_L_VALID_S 0x01
#define ICE_RX_FLEX_DESC_TS_L_VALID_M ICE_RX_FLEX_DESC_TS_L_VALID_S
#define ICE_RX_FLEX_DESC_TS_L_M 0xFE
#define ICE_RXQ_CTX_SIZE_DWORDS 8 #define ICE_RXQ_CTX_SIZE_DWORDS 8
#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32)) #define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
@ -967,6 +967,11 @@ struct ice_tx_ctx_desc {
__le64 qw1; __le64 qw1;
}; };
#define ICE_TX_GCS_DESC_START 0 /* 7 BITS */
#define ICE_TX_GCS_DESC_OFFSET 7 /* 4 BITS */
#define ICE_TX_GCS_DESC_TYPE 11 /* 2 BITS */
#define ICE_TX_GCS_DESC_ENA 13 /* 1 BIT */
#define ICE_TXD_CTX_QW1_DTYPE_S 0 #define ICE_TXD_CTX_QW1_DTYPE_S 0
#define ICE_TXD_CTX_QW1_DTYPE_M (0xFUL << ICE_TXD_CTX_QW1_DTYPE_S) #define ICE_TXD_CTX_QW1_DTYPE_M (0xFUL << ICE_TXD_CTX_QW1_DTYPE_S)
@ -1036,7 +1041,6 @@ enum ice_tx_ctx_desc_eipt_offload {
#define ICE_TXD_CTX_QW0_L4T_CS_S 23 #define ICE_TXD_CTX_QW0_L4T_CS_S 23
#define ICE_TXD_CTX_QW0_L4T_CS_M BIT_ULL(ICE_TXD_CTX_QW0_L4T_CS_S) #define ICE_TXD_CTX_QW0_L4T_CS_M BIT_ULL(ICE_TXD_CTX_QW0_L4T_CS_S)
#define ICE_LAN_TXQ_MAX_QGRPS 127 #define ICE_LAN_TXQ_MAX_QGRPS 127
#define ICE_LAN_TXQ_MAX_QDIS 1023 #define ICE_LAN_TXQ_MAX_QDIS 1023
@ -1090,7 +1094,6 @@ struct ice_tx_cmpltnq {
u8 cmpl_type; u8 cmpl_type;
} __packed; } __packed;
/* LAN Tx Completion Queue Context */ /* LAN Tx Completion Queue Context */
struct ice_tx_cmpltnq_ctx { struct ice_tx_cmpltnq_ctx {
u64 base; u64 base;
@ -1118,7 +1121,6 @@ struct ice_tx_drbell_fmt {
u32 db; u32 db;
}; };
/* LAN Tx Doorbell Queue Context */ /* LAN Tx Doorbell Queue Context */
struct ice_tx_drbell_q_ctx { struct ice_tx_drbell_q_ctx {
u64 base; u64 base;
@ -1396,17 +1398,4 @@ static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
return ice_ptype_lkup[ptype]; return ice_ptype_lkup[ptype];
} }
#define ICE_LINK_SPEED_UNKNOWN 0
#define ICE_LINK_SPEED_10MBPS 10
#define ICE_LINK_SPEED_100MBPS 100
#define ICE_LINK_SPEED_1000MBPS 1000
#define ICE_LINK_SPEED_2500MBPS 2500
#define ICE_LINK_SPEED_5000MBPS 5000
#define ICE_LINK_SPEED_10000MBPS 10000
#define ICE_LINK_SPEED_20000MBPS 20000
#define ICE_LINK_SPEED_25000MBPS 25000
#define ICE_LINK_SPEED_40000MBPS 40000
#define ICE_LINK_SPEED_50000MBPS 50000
#define ICE_LINK_SPEED_100000MBPS 100000
#endif /* _ICE_LAN_TX_RX_H_ */ #endif /* _ICE_LAN_TX_RX_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -53,10 +53,13 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi); int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi);
int ice_get_valid_rss_size(struct ice_hw *hw, int new_size);
int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size);
struct ice_vsi * struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch, enum ice_vsi_type vsi_type, struct ice_vf *vf,
u8 tc); struct ice_channel *ch, u8 tc);
void ice_napi_del(struct ice_vsi *vsi); void ice_napi_del(struct ice_vsi *vsi);
@ -90,14 +93,14 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena); void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable);
void ice_update_tx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes); void ice_update_tx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes);
void ice_update_rx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes); void ice_update_rx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes);
void ice_vsi_cfg_frame_size(struct ice_vsi *vsi); void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
int ice_status_to_errno(enum ice_status err);
void void
ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio, ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
bool __maybe_unused ena_ts); bool __maybe_unused ena_ts);
@ -109,15 +112,15 @@ irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data);
void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl); void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl);
void ice_write_itr(struct ice_ring_container *rc, u16 itr); void ice_write_itr(struct ice_ring_container *rc, u16 itr);
void ice_set_q_vector_intrl(struct ice_q_vector *q_vector); void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
void ice_vsi_get_q_vector_q_base(struct ice_vsi *vsi, u16 vector_id, u16 *txq,
u16 *rxq);
enum ice_status int ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
bool ice_is_safe_mode(struct ice_pf *pf); bool ice_is_safe_mode(struct ice_pf *pf);
bool ice_is_peer_ena(struct ice_pf *pf); bool ice_is_aux_ena(struct ice_pf *pf);
bool ice_is_dflt_vsi_in_use(struct ice_sw *sw); bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi);
bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi); int ice_set_dflt_vsi(struct ice_vsi *vsi);
int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi); int ice_clear_dflt_vsi(struct ice_vsi *vsi);
int ice_clear_dflt_vsi(struct ice_sw *sw);
int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate); int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate);
int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate); int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate);
int ice_get_link_speed_kbps(struct ice_vsi *vsi); int ice_get_link_speed_kbps(struct ice_vsi *vsi);
@ -130,6 +133,7 @@ void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx);
#endif /* HAVE_METADATA_PORT_INFO */ #endif /* HAVE_METADATA_PORT_INFO */
void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx); void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx);
void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx); void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx);
int ice_check_mtu_valid(struct net_device *netdev, int new_mtu);
int ice_vsi_add_vlan_zero(struct ice_vsi *vsi); int ice_vsi_add_vlan_zero(struct ice_vsi *vsi);
int ice_vsi_del_vlan_zero(struct ice_vsi *vsi); int ice_vsi_del_vlan_zero(struct ice_vsi *vsi);
bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi); bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi);

File diff suppressed because it is too large Load Diff

155
drivers/thirdparty/ice/ice_metainit.c vendored Normal file
View File

@ -0,0 +1,155 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018-2021, Intel Corporation. */
#include "ice_common.h"
#include "ice_parser_util.h"
#define ICE_METAINIT_TABLE_SIZE 16
/**
* ice_metainit_dump - dump an metainit item info
* @hw: pointer to the hardware structure
* @item: metainit item to dump
*/
void ice_metainit_dump(struct ice_hw *hw, struct ice_metainit_item *item)
{
dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx);
dev_info(ice_hw_to_dev(hw), "tsr = %d\n", item->tsr);
dev_info(ice_hw_to_dev(hw), "ho = %d\n", item->ho);
dev_info(ice_hw_to_dev(hw), "pc = %d\n", item->pc);
dev_info(ice_hw_to_dev(hw), "pg_rn = %d\n", item->pg_rn);
dev_info(ice_hw_to_dev(hw), "cd = %d\n", item->cd);
dev_info(ice_hw_to_dev(hw), "gpr_a_ctrl = %d\n", item->gpr_a_ctrl);
dev_info(ice_hw_to_dev(hw), "gpr_a_data_mdid = %d\n",
item->gpr_a_data_mdid);
dev_info(ice_hw_to_dev(hw), "gpr_a_data_start = %d\n",
item->gpr_a_data_start);
dev_info(ice_hw_to_dev(hw), "gpr_a_data_len = %d\n",
item->gpr_a_data_len);
dev_info(ice_hw_to_dev(hw), "gpr_a_id = %d\n", item->gpr_a_id);
dev_info(ice_hw_to_dev(hw), "gpr_b_ctrl = %d\n", item->gpr_b_ctrl);
dev_info(ice_hw_to_dev(hw), "gpr_b_data_mdid = %d\n",
item->gpr_b_data_mdid);
dev_info(ice_hw_to_dev(hw), "gpr_b_data_start = %d\n",
item->gpr_b_data_start);
dev_info(ice_hw_to_dev(hw), "gpr_b_data_len = %d\n",
item->gpr_b_data_len);
dev_info(ice_hw_to_dev(hw), "gpr_b_id = %d\n", item->gpr_b_id);
dev_info(ice_hw_to_dev(hw), "gpr_c_ctrl = %d\n", item->gpr_c_ctrl);
dev_info(ice_hw_to_dev(hw), "gpr_c_data_mdid = %d\n",
item->gpr_c_data_mdid);
dev_info(ice_hw_to_dev(hw), "gpr_c_data_start = %d\n",
item->gpr_c_data_start);
dev_info(ice_hw_to_dev(hw), "gpr_c_data_len = %d\n",
item->gpr_c_data_len);
dev_info(ice_hw_to_dev(hw), "gpr_c_id = %d\n", item->gpr_c_id);
dev_info(ice_hw_to_dev(hw), "gpr_d_ctrl = %d\n", item->gpr_d_ctrl);
dev_info(ice_hw_to_dev(hw), "gpr_d_data_mdid = %d\n",
item->gpr_d_data_mdid);
dev_info(ice_hw_to_dev(hw), "gpr_d_data_start = %d\n",
item->gpr_d_data_start);
dev_info(ice_hw_to_dev(hw), "gpr_d_data_len = %d\n",
item->gpr_d_data_len);
dev_info(ice_hw_to_dev(hw), "gpr_d_id = %d\n", item->gpr_d_id);
dev_info(ice_hw_to_dev(hw), "flags = 0x%llx\n",
(unsigned long long)(item->flags));
}
/** The function parses a 192 bits Metadata Init entry with below format:
* BIT 0-7: TCAM Search Key Register (mi->tsr)
* BIT 8-16: Header Offset (mi->ho)
* BIT 17-24: Program Counter (mi->pc)
* BIT 25-35: Parse Graph Root Node (mi->pg_rn)
* BIT 36-38: Control Domain (mi->cd)
* BIT 39: GPR_A Data Control (mi->gpr_a_ctrl)
* BIT 40-44: GPR_A MDID.ID (mi->gpr_a_data_mdid)
* BIT 45-48: GPR_A MDID.START (mi->gpr_a_data_start)
* BIT 49-53: GPR_A MDID.LEN (mi->gpr_a_data_len)
* BIT 54-55: reserved
* BIT 56-59: GPR_A ID (mi->gpr_a_id)
* BIT 60: GPR_B Data Control (mi->gpr_b_ctrl)
* BIT 61-65: GPR_B MDID.ID (mi->gpr_b_data_mdid)
* BIT 66-69: GPR_B MDID.START (mi->gpr_b_data_start)
* BIT 70-74: GPR_B MDID.LEN (mi->gpr_b_data_len)
* BIT 75-76: reserved
* BIT 77-80: GPR_B ID (mi->gpr_a_id)
* BIT 81: GPR_C Data Control (mi->gpr_c_ctrl)
* BIT 82-86: GPR_C MDID.ID (mi->gpr_c_data_mdid)
* BIT 87-90: GPR_C MDID.START (mi->gpr_c_data_start)
* BIT 91-95: GPR_C MDID.LEN (mi->gpr_c_data_len)
* BIT 96-97: reserved
* BIT 98-101: GPR_C ID (mi->gpr_c_id)
* BIT 102: GPR_D Data Control (mi->gpr_d_ctrl)
* BIT 103-107:GPR_D MDID.ID (mi->gpr_d_data_mdid)
* BIT 108-111:GPR_D MDID.START (mi->gpr_d_data_start)
* BIT 112-116:GPR_D MDID.LEN (mi->gpr_d_data_len)
* BIT 117-118:reserved
* BIT 119-122:GPR_D ID (mi->gpr_d_id)
* BIT 123-186:Flags (mi->flags)
* BIT 187-191:rserved
*/
static void _metainit_parse_item(struct ice_hw *hw, u16 idx, void *item,
void *data, int size)
{
struct ice_metainit_item *mi = (struct ice_metainit_item *)item;
u8 *buf = (u8 *)data;
u64 d64;
mi->idx = idx;
d64 = *(u64 *)buf;
mi->tsr = (u8)(d64 & 0xff);
mi->ho = (u16)((d64 >> 8) & 0x1ff);
mi->pc = (u16)((d64 >> 17) & 0xff);
mi->pg_rn = (u16)((d64 >> 25) & 0x3ff);
mi->cd = (u16)((d64 >> 36) & 0x7);
mi->gpr_a_ctrl = ((d64 >> 39) & 0x1) != 0;
mi->gpr_a_data_mdid = (u8)((d64 >> 40) & 0x1f);
mi->gpr_a_data_start = (u8)((d64 >> 45) & 0xf);
mi->gpr_a_data_len = (u8)((d64 >> 49) & 0x1f);
mi->gpr_a_id = (u8)((d64 >> 56) & 0xf);
d64 = *(u64 *)&buf[7] >> 4;
mi->gpr_b_ctrl = (d64 & 0x1) != 0;
mi->gpr_b_data_mdid = (u8)((d64 >> 1) & 0x1f);
mi->gpr_b_data_start = (u8)((d64 >> 6) & 0xf);
mi->gpr_b_data_len = (u8)((d64 >> 10) & 0x1f);
mi->gpr_b_id = (u8)((d64 >> 17) & 0xf);
mi->gpr_c_ctrl = ((d64 >> 21) & 0x1) != 0;
mi->gpr_c_data_mdid = (u8)((d64 >> 22) & 0x1f);
mi->gpr_c_data_start = (u8)((d64 >> 27) & 0xf);
mi->gpr_c_data_len = (u8)((d64 >> 31) & 0x1f);
mi->gpr_c_id = (u8)((d64 >> 38) & 0xf);
mi->gpr_d_ctrl = ((d64 >> 42) & 0x1) != 0;
mi->gpr_d_data_mdid = (u8)((d64 >> 43) & 0x1f);
mi->gpr_d_data_start = (u8)((d64 >> 48) & 0xf);
mi->gpr_d_data_len = (u8)((d64 >> 52) & 0x1f);
d64 = *(u64 *)&buf[14] >> 7;
mi->gpr_d_id = (u8)(d64 & 0xf);
d64 = *(u64 *)&buf[15] >> 3;
mi->flags = d64;
d64 = ((*(u64 *)&buf[16] >> 56) & 0x7);
mi->flags |= (d64 << 61);
if (hw->debug_mask & ICE_DBG_PARSER)
ice_metainit_dump(hw, mi);
}
/**
* ice_metainit_table_get - create a metainit table
* @hw: pointer to the hardware structure
*/
struct ice_metainit_item *ice_metainit_table_get(struct ice_hw *hw)
{
return (struct ice_metainit_item *)
ice_parser_create_table(hw, ICE_SID_RXPARSER_METADATA_INIT,
sizeof(struct ice_metainit_item),
ICE_METAINIT_TABLE_SIZE,
ice_parser_sect_item_get,
_metainit_parse_item, false);
}

45
drivers/thirdparty/ice/ice_metainit.h vendored Normal file
View File

@ -0,0 +1,45 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018-2021, Intel Corporation. */
#ifndef _ICE_METAINIT_H_
#define _ICE_METAINIT_H_
struct ice_metainit_item {
u16 idx;
u8 tsr;
u16 ho;
u16 pc;
u16 pg_rn;
u8 cd;
bool gpr_a_ctrl;
u8 gpr_a_data_mdid;
u8 gpr_a_data_start;
u8 gpr_a_data_len;
u8 gpr_a_id;
bool gpr_b_ctrl;
u8 gpr_b_data_mdid;
u8 gpr_b_data_start;
u8 gpr_b_data_len;
u8 gpr_b_id;
bool gpr_c_ctrl;
u8 gpr_c_data_mdid;
u8 gpr_c_data_start;
u8 gpr_c_data_len;
u8 gpr_c_id;
bool gpr_d_ctrl;
u8 gpr_d_data_mdid;
u8 gpr_d_data_start;
u8 gpr_d_data_len;
u8 gpr_d_id;
u64 flags;
};
void ice_metainit_dump(struct ice_hw *hw, struct ice_metainit_item *item);
struct ice_metainit_item *ice_metainit_table_get(struct ice_hw *hw);
#endif /*_ICE_METAINIT_H_ */

25
drivers/thirdparty/ice/ice_migration.c vendored Normal file
View File

@ -0,0 +1,25 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018-2021, Intel Corporation. */
#include "ice.h"
/**
* ice_migration_get_vf - Get ice vf structure pointer by pdev
* @vf_pdev: pointer to ice vfio pci vf pdev structure
*
* Return nonzero for success, NULL for failure.
*/
void *ice_migration_get_vf(struct pci_dev *vf_pdev)
{
struct pci_dev *pf_pdev = vf_pdev->physfn;
int vf_id = pci_iov_vf_id(vf_pdev);
struct ice_pf *pf;
if (!pf_pdev || vf_id < 0)
return NULL;
pf = pci_get_drvdata(pf_pdev);
return ice_get_vf_by_id(pf, vf_id);
}
EXPORT_SYMBOL(ice_migration_get_vf);

18
drivers/thirdparty/ice/ice_migration.h vendored Normal file
View File

@ -0,0 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018-2021, Intel Corporation. */
#ifndef _ICE_MIGRATION_H_
#define _ICE_MIGRATION_H_
#include "kcompat.h"
#if IS_ENABLED(CONFIG_VFIO_PCI_CORE) && defined(HAVE_LMV1_SUPPORT)
void *ice_migration_get_vf(struct pci_dev *vf_pdev);
#else
static inline void *ice_migration_get_vf(struct pci_dev *vf_pdev)
{
return NULL;
}
#endif /* CONFIG_VFIO_PCI_CORE && HAVE_LMV1_SUPPORT */
#endif /* _ICE_MIGRATION_H_ */

54
drivers/thirdparty/ice/ice_mk_grp.c vendored Normal file
View File

@ -0,0 +1,54 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018-2021, Intel Corporation. */
#include "ice_common.h"
#include "ice_parser_util.h"
#define ICE_MK_GRP_TABLE_SIZE 128
#define ICE_MK_COUNT_PER_GRP 8
/**
* ice_mk_grp_dump - dump an marker group item info
* @hw: pointer to the hardware structure
* @item: marker group item to dump
*/
void ice_mk_grp_dump(struct ice_hw *hw, struct ice_mk_grp_item *item)
{
int i;
dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx);
dev_info(ice_hw_to_dev(hw), "markers: ");
for (i = 0; i < ICE_MK_COUNT_PER_GRP; i++)
dev_info(ice_hw_to_dev(hw), "%d ", item->markers[i]);
dev_info(ice_hw_to_dev(hw), "\n");
}
static void _mk_grp_parse_item(struct ice_hw *hw, u16 idx, void *item,
void *data, int size)
{
struct ice_mk_grp_item *grp = (struct ice_mk_grp_item *)item;
u8 *buf = (u8 *)data;
int i;
grp->idx = idx;
for (i = 0; i < ICE_MK_COUNT_PER_GRP; i++)
grp->markers[i] = buf[i];
if (hw->debug_mask & ICE_DBG_PARSER)
ice_mk_grp_dump(hw, grp);
}
/**
* ice_mk_grp_table_get - create a marker group table
* @hw: pointer to the hardware structure
*/
struct ice_mk_grp_item *ice_mk_grp_table_get(struct ice_hw *hw)
{
return (struct ice_mk_grp_item *)
ice_parser_create_table(hw, ICE_SID_RXPARSER_MARKER_GRP,
sizeof(struct ice_mk_grp_item),
ICE_MK_GRP_TABLE_SIZE,
ice_parser_sect_item_get,
_mk_grp_parse_item, false);
}

14
drivers/thirdparty/ice/ice_mk_grp.h vendored Normal file
View File

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018-2021, Intel Corporation. */
#ifndef _ICE_MK_GRP_H_
#define _ICE_MK_GRP_H_
struct ice_mk_grp_item {
int idx;
u8 markers[8];
};
void ice_mk_grp_dump(struct ice_hw *hw, struct ice_mk_grp_item *item);
struct ice_mk_grp_item *ice_mk_grp_table_get(struct ice_hw *hw);
#endif /* _ICE_MK_GRP_H_ */

View File

@ -3,6 +3,7 @@
#include "ice_common.h" #include "ice_common.h"
#define GL_MNG_DEF_DEVID 0x000B611C
/** /**
* ice_aq_read_nvm * ice_aq_read_nvm
@ -17,7 +18,7 @@
* *
* Read the NVM using the admin queue commands (0x0701) * Read the NVM using the admin queue commands (0x0701)
*/ */
static enum ice_status int
ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
void *data, bool last_command, bool read_shadow_ram, void *data, bool last_command, bool read_shadow_ram,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
@ -28,7 +29,7 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
cmd = &desc.params.nvm; cmd = &desc.params.nvm;
if (offset > ICE_AQC_NVM_MAX_OFFSET) if (offset > ICE_AQC_NVM_MAX_OFFSET)
return ICE_ERR_PARAM; return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read);
@ -61,21 +62,21 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
* Returns a status code on failure. Note that the data pointer may be * Returns a status code on failure. Note that the data pointer may be
* partially updated if some reads succeed before a failure. * partially updated if some reads succeed before a failure.
*/ */
enum ice_status int
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram) bool read_shadow_ram)
{ {
enum ice_status status;
u32 inlen = *length; u32 inlen = *length;
u32 bytes_read = 0; u32 bytes_read = 0;
bool last_cmd; bool last_cmd;
int status;
*length = 0; *length = 0;
/* Verify the length of the read if this is for the Shadow RAM */ /* Verify the length of the read if this is for the Shadow RAM */
if (read_shadow_ram && ((offset + inlen) > (hw->flash.sr_words * 2u))) { if (read_shadow_ram && ((offset + inlen) > (hw->flash.sr_words * 2u))) {
ice_debug(hw, ICE_DBG_NVM, "NVM error: requested data is beyond Shadow RAM limit\n"); ice_debug(hw, ICE_DBG_NVM, "NVM error: requested data is beyond Shadow RAM limit\n");
return ICE_ERR_PARAM; return -EINVAL;
} }
do { do {
@ -124,7 +125,7 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
* *
* Update the NVM using the admin queue commands (0x0703) * Update the NVM using the admin queue commands (0x0703)
*/ */
enum ice_status int
ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command, u8 command_flags, u16 length, void *data, bool last_command, u8 command_flags,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
@ -136,7 +137,7 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
/* In offset the highest byte must be zeroed. */ /* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000) if (offset & 0xFF000000)
return ICE_ERR_PARAM; return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write);
@ -163,12 +164,12 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
* *
* Erase the NVM sector using the admin queue commands (0x0702) * Erase the NVM sector using the admin queue commands (0x0702)
*/ */
enum ice_status int
ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd) ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
{ {
struct ice_aq_desc desc; struct ice_aq_desc desc;
struct ice_aqc_nvm *cmd; struct ice_aqc_nvm *cmd;
enum ice_status status; int status;
__le16 len; __le16 len;
/* read a length value from SR, so module_typeid is equal to 0 */ /* read a length value from SR, so module_typeid is equal to 0 */
@ -191,7 +192,6 @@ ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
} }
/** /**
* ice_read_sr_word_aq - Reads Shadow RAM via AQ * ice_read_sr_word_aq - Reads Shadow RAM via AQ
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
@ -200,12 +200,12 @@ ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
* *
* Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm. * Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm.
*/ */
static enum ice_status static int
ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
{ {
u32 bytes = sizeof(u16); u32 bytes = sizeof(u16);
enum ice_status status;
__le16 data_local; __le16 data_local;
int status;
/* Note that ice_read_flat_nvm checks if the read is past the Shadow /* Note that ice_read_flat_nvm checks if the read is past the Shadow
* RAM size, and ensures we don't read across a Shadow RAM sector * RAM size, and ensures we don't read across a Shadow RAM sector
@ -220,7 +220,6 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
return 0; return 0;
} }
/** /**
* ice_acquire_nvm - Generic request for acquiring the NVM ownership * ice_acquire_nvm - Generic request for acquiring the NVM ownership
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
@ -228,7 +227,7 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
* *
* This function will request NVM ownership. * This function will request NVM ownership.
*/ */
enum ice_status int
ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
{ {
if (hw->flash.blank_nvm_mode) if (hw->flash.blank_nvm_mode)
@ -336,18 +335,18 @@ static u32 ice_get_flash_bank_offset(struct ice_hw *hw, enum ice_bank_select ban
* hw->flash.banks data being setup by ice_determine_active_flash_banks() * hw->flash.banks data being setup by ice_determine_active_flash_banks()
* during initialization. * during initialization.
*/ */
static enum ice_status static int
ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module, ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module,
u32 offset, u8 *data, u32 length) u32 offset, u8 *data, u32 length)
{ {
enum ice_status status; int status;
u32 start; u32 start;
start = ice_get_flash_bank_offset(hw, bank, module); start = ice_get_flash_bank_offset(hw, bank, module);
if (!start) { if (!start) {
ice_debug(hw, ICE_DBG_NVM, "Unable to calculate flash bank offset for module 0x%04x\n", ice_debug(hw, ICE_DBG_NVM, "Unable to calculate flash bank offset for module 0x%04x\n",
module); module);
return ICE_ERR_PARAM; return -EINVAL;
} }
status = ice_acquire_nvm(hw, ICE_RES_READ); status = ice_acquire_nvm(hw, ICE_RES_READ);
@ -371,11 +370,11 @@ ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module,
* Read the specified word from the active NVM module. This includes the CSS * Read the specified word from the active NVM module. This includes the CSS
* header at the start of the NVM module. * header at the start of the NVM module.
*/ */
static enum ice_status static int
ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{ {
enum ice_status status;
__le16 data_local; __le16 data_local;
int status;
status = ice_read_flash_module(hw, bank, ICE_SR_1ST_NVM_BANK_PTR, offset * sizeof(u16), status = ice_read_flash_module(hw, bank, ICE_SR_1ST_NVM_BANK_PTR, offset * sizeof(u16),
(__force u8 *)&data_local, sizeof(u16)); (__force u8 *)&data_local, sizeof(u16));
@ -385,6 +384,42 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1
return status; return status;
} }
/**
* ice_get_nvm_css_hdr_len - Read the CSS header length from the NVM CSS header
* @hw: pointer to the HW struct
* @bank: whether to read from the active or inactive flash bank
* @hdr_len: storage for header length in words
*
* Read the CSS header length from the NVM CSS header and add the Authentication
* header size, and then convert to words.
*/
static int
ice_get_nvm_css_hdr_len(struct ice_hw *hw, enum ice_bank_select bank,
u32 *hdr_len)
{
u16 hdr_len_l, hdr_len_h;
u32 hdr_len_dword;
int status;
status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_L,
&hdr_len_l);
if (status)
return status;
status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_H,
&hdr_len_h);
if (status)
return status;
/* CSS header length is in DWORD, so convert to words and add
* authentication header size
*/
hdr_len_dword = hdr_len_h << 16 | hdr_len_l;
*hdr_len = (hdr_len_dword * 2) + ICE_NVM_AUTH_HEADER_LEN;
return 0;
}
/** /**
* ice_read_nvm_sr_copy - Read a word from the Shadow RAM copy in the NVM bank * ice_read_nvm_sr_copy - Read a word from the Shadow RAM copy in the NVM bank
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
@ -395,10 +430,19 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1
* Read the specified word from the copy of the Shadow RAM found in the * Read the specified word from the copy of the Shadow RAM found in the
* specified NVM module. * specified NVM module.
*/ */
static enum ice_status static int
ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{ {
return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data); u32 hdr_len;
int status;
status = ice_get_nvm_css_hdr_len(hw, bank, &hdr_len);
if (status)
return status;
hdr_len = roundup(hdr_len, 32);
return ice_read_nvm_module(hw, bank, hdr_len + offset, data);
} }
/** /**
@ -412,11 +456,11 @@ ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u
* Note that unlike the NVM module, the CSS data is stored at the end of the * Note that unlike the NVM module, the CSS data is stored at the end of the
* module instead of at the beginning. * module instead of at the beginning.
*/ */
static enum ice_status static int
ice_read_orom_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) ice_read_orom_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{ {
enum ice_status status;
__le16 data_local; __le16 data_local;
int status;
status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, offset * sizeof(u16), status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, offset * sizeof(u16),
(__force u8 *)&data_local, sizeof(u16)); (__force u8 *)&data_local, sizeof(u16));
@ -435,11 +479,11 @@ ice_read_orom_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u
* *
* Read a word from the specified netlist bank. * Read a word from the specified netlist bank.
*/ */
static enum ice_status static int
ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{ {
enum ice_status status;
__le16 data_local; __le16 data_local;
int status;
status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, offset * sizeof(u16), status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, offset * sizeof(u16),
(__force u8 *)&data_local, sizeof(u16)); (__force u8 *)&data_local, sizeof(u16));
@ -457,9 +501,9 @@ ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset
* *
* Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq. * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq.
*/ */
enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) int ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
{ {
enum ice_status status; int status;
status = ice_acquire_nvm(hw, ICE_RES_READ); status = ice_acquire_nvm(hw, ICE_RES_READ);
if (!status) { if (!status) {
@ -481,13 +525,13 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
* Area (PFA) and returns the TLV pointer and length. The caller can * Area (PFA) and returns the TLV pointer and length. The caller can
* use these to read the variable length TLV value. * use these to read the variable length TLV value.
*/ */
enum ice_status int
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type) u16 module_type)
{ {
enum ice_status status;
u16 pfa_len, pfa_ptr; u16 pfa_len, pfa_ptr;
u16 next_tlv; u16 next_tlv;
int status;
status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
if (status) { if (status) {
@ -525,7 +569,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
*module_tlv_len = tlv_len; *module_tlv_len = tlv_len;
return 0; return 0;
} }
return ICE_ERR_INVAL_SIZE; return -EINVAL;
} }
/* Check next TLV, i.e. current TLV pointer + length + 2 words /* Check next TLV, i.e. current TLV pointer + length + 2 words
* (for current TLV's type and length) * (for current TLV's type and length)
@ -533,7 +577,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
next_tlv = next_tlv + tlv_len + 2; next_tlv = next_tlv + tlv_len + 2;
} }
/* Module does not exist */ /* Module does not exist */
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
} }
/** /**
@ -544,12 +588,12 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
* *
* Reads the part number string from the NVM. * Reads the part number string from the NVM.
*/ */
enum ice_status int
ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
{ {
u16 pba_tlv, pba_tlv_len; u16 pba_tlv, pba_tlv_len;
enum ice_status status;
u16 pba_word, pba_size; u16 pba_word, pba_size;
int status;
u16 i; u16 i;
status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len, status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
@ -568,7 +612,7 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
if (pba_tlv_len < pba_size) { if (pba_tlv_len < pba_size) {
ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n"); ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n");
return ICE_ERR_INVAL_SIZE; return -EINVAL;
} }
/* Subtract one to get PBA word count (PBA Size word is included in /* Subtract one to get PBA word count (PBA Size word is included in
@ -577,7 +621,7 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
pba_size--; pba_size--;
if (pba_num_size < (((u32)pba_size * 2) + 1)) { if (pba_num_size < (((u32)pba_size * 2) + 1)) {
ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n"); ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n");
return ICE_ERR_PARAM; return -EINVAL;
} }
for (i = 0; i < pba_size; i++) { for (i = 0; i < pba_size; i++) {
@ -604,10 +648,10 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
* Read the security revision out of the CSS header of the active NVM module * Read the security revision out of the CSS header of the active NVM module
* bank. * bank.
*/ */
static enum ice_status ice_get_nvm_srev(struct ice_hw *hw, enum ice_bank_select bank, u32 *srev) static int ice_get_nvm_srev(struct ice_hw *hw, enum ice_bank_select bank, u32 *srev)
{ {
enum ice_status status;
u16 srev_l, srev_h; u16 srev_l, srev_h;
int status;
status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_SREV_L, &srev_l); status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_SREV_L, &srev_l);
if (status) if (status)
@ -631,11 +675,11 @@ static enum ice_status ice_get_nvm_srev(struct ice_hw *hw, enum ice_bank_select
* Read the NVM EETRACK ID and map version of the main NVM image bank, filling * Read the NVM EETRACK ID and map version of the main NVM image bank, filling
* in the nvm info structure. * in the nvm info structure.
*/ */
static enum ice_status static int
ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nvm_info *nvm) ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nvm_info *nvm)
{ {
u16 eetrack_lo, eetrack_hi, ver; u16 eetrack_lo, eetrack_hi, ver;
enum ice_status status; int status;
status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_DEV_STARTER_VER, &ver); status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_DEV_STARTER_VER, &ver);
if (status) { if (status) {
@ -675,7 +719,7 @@ ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nv
* inactive NVM bank. Used to access version data for a pending update that * inactive NVM bank. Used to access version data for a pending update that
* has not yet been activated. * has not yet been activated.
*/ */
enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm) int ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm)
{ {
return ice_get_nvm_ver_info(hw, ICE_INACTIVE_FLASH_BANK, nvm); return ice_get_nvm_ver_info(hw, ICE_INACTIVE_FLASH_BANK, nvm);
} }
@ -689,24 +733,28 @@ enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info
* Read the security revision out of the CSS header of the active OROM module * Read the security revision out of the CSS header of the active OROM module
* bank. * bank.
*/ */
static enum ice_status ice_get_orom_srev(struct ice_hw *hw, enum ice_bank_select bank, u32 *srev) static int ice_get_orom_srev(struct ice_hw *hw, enum ice_bank_select bank, u32 *srev)
{ {
enum ice_status status; u32 orom_size_word = hw->flash.banks.orom_size / 2;
u16 srev_l, srev_h; u16 srev_l, srev_h;
u32 css_start; u32 css_start;
u32 hdr_len;
int status;
if (hw->flash.banks.orom_size < ICE_NVM_OROM_TRAILER_LENGTH) { status = ice_get_nvm_css_hdr_len(hw, bank, &hdr_len);
if (status)
return status;
if (orom_size_word < hdr_len) {
ice_debug(hw, ICE_DBG_NVM, "Unexpected Option ROM Size of %u\n", ice_debug(hw, ICE_DBG_NVM, "Unexpected Option ROM Size of %u\n",
hw->flash.banks.orom_size); hw->flash.banks.orom_size);
return ICE_ERR_CFG; return -EIO;
} }
/* calculate how far into the Option ROM the CSS header starts. Note /* calculate how far into the Option ROM the CSS header starts. Note
* that ice_read_orom_module takes a word offset so we need to * that ice_read_orom_module takes a word offset
* divide by 2 here.
*/ */
css_start = (hw->flash.banks.orom_size - ICE_NVM_OROM_TRAILER_LENGTH) / 2; css_start = orom_size_word - hdr_len;
status = ice_read_orom_module(hw, bank, css_start + ICE_NVM_CSS_SREV_L, &srev_l); status = ice_read_orom_module(hw, bank, css_start + ICE_NVM_CSS_SREV_L, &srev_l);
if (status) if (status)
return status; return status;
@ -729,12 +777,11 @@ static enum ice_status ice_get_orom_srev(struct ice_hw *hw, enum ice_bank_select
* Searches through the Option ROM flash contents to locate the CIVD data for * Searches through the Option ROM flash contents to locate the CIVD data for
* the image. * the image.
*/ */
static enum ice_status static int
ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
struct ice_orom_civd_info *civd) struct ice_orom_civd_info *civd)
{ {
struct ice_orom_civd_info tmp; struct ice_orom_civd_info tmp;
enum ice_status status;
u32 offset; u32 offset;
/* The CIVD section is located in the Option ROM aligned to 512 bytes. /* The CIVD section is located in the Option ROM aligned to 512 bytes.
@ -744,6 +791,7 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
*/ */
for (offset = 0; (offset + 512) <= hw->flash.banks.orom_size; offset += 512) { for (offset = 0; (offset + 512) <= hw->flash.banks.orom_size; offset += 512) {
u8 sum = 0, i; u8 sum = 0, i;
int status;
status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR,
offset, (u8 *)&tmp, sizeof(tmp)); offset, (u8 *)&tmp, sizeof(tmp));
@ -758,20 +806,22 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
/* Verify that the simple checksum is zero */ /* Verify that the simple checksum is zero */
for (i = 0; i < sizeof(tmp); i++) for (i = 0; i < sizeof(tmp); i++)
#ifdef __CHECKER__
/* cppcheck-suppress objectIndex */ /* cppcheck-suppress objectIndex */
#endif /* __CHECKER__ */
sum += ((u8 *)&tmp)[i]; sum += ((u8 *)&tmp)[i];
if (sum) { if (sum) {
ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n", ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n",
sum); sum);
return ICE_ERR_NVM; return -EIO;
} }
*civd = tmp; *civd = tmp;
return 0; return 0;
} }
return ICE_ERR_NVM; return -EIO;
} }
/** /**
@ -783,12 +833,12 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
* Read Option ROM version and security revision from the Option ROM flash * Read Option ROM version and security revision from the Option ROM flash
* section. * section.
*/ */
static enum ice_status static int
ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_orom_info *orom) ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_orom_info *orom)
{ {
struct ice_orom_civd_info civd; struct ice_orom_civd_info civd;
enum ice_status status;
u32 combo_ver; u32 combo_ver;
int status;
status = ice_get_orom_civd_data(hw, bank, &civd); status = ice_get_orom_civd_data(hw, bank, &civd);
if (status) { if (status) {
@ -820,7 +870,7 @@ ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_o
* section of flash. Used to access version data for a pending update that has * section of flash. Used to access version data for a pending update that has
* not yet been activated. * not yet been activated.
*/ */
enum ice_status ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom) int ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom)
{ {
return ice_get_orom_ver_info(hw, ICE_INACTIVE_FLASH_BANK, orom); return ice_get_orom_ver_info(hw, ICE_INACTIVE_FLASH_BANK, orom);
} }
@ -835,13 +885,13 @@ enum ice_status ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_inf
* Topology section to find the Netlist ID block and extract the relevant * Topology section to find the Netlist ID block and extract the relevant
* information into the netlist version structure. * information into the netlist version structure.
*/ */
static enum ice_status static int
ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank, ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
struct ice_netlist_info *netlist) struct ice_netlist_info *netlist)
{ {
u16 module_id, length, node_count, i; u16 module_id, length, node_count, i;
enum ice_status status;
u16 *id_blk; u16 *id_blk;
int status;
status = ice_read_netlist_module(hw, bank, ICE_NETLIST_TYPE_OFFSET, &module_id); status = ice_read_netlist_module(hw, bank, ICE_NETLIST_TYPE_OFFSET, &module_id);
if (status) if (status)
@ -850,7 +900,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
if (module_id != ICE_NETLIST_LINK_TOPO_MOD_ID) { if (module_id != ICE_NETLIST_LINK_TOPO_MOD_ID) {
ice_debug(hw, ICE_DBG_NVM, "Expected netlist module_id ID of 0x%04x, but got 0x%04x\n", ice_debug(hw, ICE_DBG_NVM, "Expected netlist module_id ID of 0x%04x, but got 0x%04x\n",
ICE_NETLIST_LINK_TOPO_MOD_ID, module_id); ICE_NETLIST_LINK_TOPO_MOD_ID, module_id);
return ICE_ERR_NVM; return -EIO;
} }
status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_MODULE_LEN, &length); status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_MODULE_LEN, &length);
@ -861,7 +911,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
if (length < ICE_NETLIST_ID_BLK_SIZE) { if (length < ICE_NETLIST_ID_BLK_SIZE) {
ice_debug(hw, ICE_DBG_NVM, "Netlist Link Topology module too small. Expected at least %u words, but got %u words.\n", ice_debug(hw, ICE_DBG_NVM, "Netlist Link Topology module too small. Expected at least %u words, but got %u words.\n",
ICE_NETLIST_ID_BLK_SIZE, length); ICE_NETLIST_ID_BLK_SIZE, length);
return ICE_ERR_NVM; return -EIO;
} }
status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_NODE_COUNT, &node_count); status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_NODE_COUNT, &node_count);
@ -872,7 +922,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
id_blk = devm_kcalloc(ice_hw_to_dev(hw), ICE_NETLIST_ID_BLK_SIZE, id_blk = devm_kcalloc(ice_hw_to_dev(hw), ICE_NETLIST_ID_BLK_SIZE,
sizeof(*id_blk), GFP_KERNEL); sizeof(*id_blk), GFP_KERNEL);
if (!id_blk) if (!id_blk)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
/* Read out the entire Netlist ID Block at once. */ /* Read out the entire Netlist ID Block at once. */
status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR,
@ -903,7 +953,6 @@ exit_error:
return status; return status;
} }
/** /**
* ice_get_inactive_netlist_ver * ice_get_inactive_netlist_ver
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
@ -913,7 +962,7 @@ exit_error:
* extract version data of a pending flash update in order to display the * extract version data of a pending flash update in order to display the
* version data. * version data.
*/ */
enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist) int ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist)
{ {
return ice_get_netlist_info(hw, ICE_INACTIVE_FLASH_BANK, netlist); return ice_get_netlist_info(hw, ICE_INACTIVE_FLASH_BANK, netlist);
} }
@ -926,10 +975,10 @@ enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netli
* the actual size is smaller. Use bisection to determine the accessible size * the actual size is smaller. Use bisection to determine the accessible size
* of flash memory. * of flash memory.
*/ */
static enum ice_status ice_discover_flash_size(struct ice_hw *hw) static int ice_discover_flash_size(struct ice_hw *hw)
{ {
u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1; u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1;
enum ice_status status; int status;
status = ice_acquire_nvm(hw, ICE_RES_READ); status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status) if (status)
@ -941,7 +990,7 @@ static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
u8 data; u8 data;
status = ice_read_flat_nvm(hw, offset, &len, &data, false); status = ice_read_flat_nvm(hw, offset, &len, &data, false);
if (status == ICE_ERR_AQ_ERROR && if (status == -EIO &&
hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) { hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) {
ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n", ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n",
__func__, offset); __func__, offset);
@ -981,10 +1030,10 @@ err_read_flat_nvm:
* sector size by using the highest bit. The reported pointer value will be in * sector size by using the highest bit. The reported pointer value will be in
* bytes, intended for flat NVM reads. * bytes, intended for flat NVM reads.
*/ */
static enum ice_status static int
ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer) ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer)
{ {
enum ice_status status; int status;
u16 value; u16 value;
status = ice_read_sr_word(hw, offset, &value); status = ice_read_sr_word(hw, offset, &value);
@ -1013,10 +1062,10 @@ ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer)
* Each area size word is specified in 4KB sector units. This function reports * Each area size word is specified in 4KB sector units. This function reports
* the size in bytes, intended for flat NVM reads. * the size in bytes, intended for flat NVM reads.
*/ */
static enum ice_status static int
ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size) ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size)
{ {
enum ice_status status; int status;
u16 value; u16 value;
status = ice_read_sr_word(hw, offset, &value); status = ice_read_sr_word(hw, offset, &value);
@ -1039,12 +1088,12 @@ ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size)
* structure for later use in order to calculate the correct offset to read * structure for later use in order to calculate the correct offset to read
* from the active module. * from the active module.
*/ */
static enum ice_status static int
ice_determine_active_flash_banks(struct ice_hw *hw) ice_determine_active_flash_banks(struct ice_hw *hw)
{ {
struct ice_bank_info *banks = &hw->flash.banks; struct ice_bank_info *banks = &hw->flash.banks;
enum ice_status status;
u16 ctrl_word; u16 ctrl_word;
int status;
status = ice_read_sr_word(hw, ICE_SR_NVM_CTRL_WORD, &ctrl_word); status = ice_read_sr_word(hw, ICE_SR_NVM_CTRL_WORD, &ctrl_word);
if (status) { if (status) {
@ -1055,7 +1104,7 @@ ice_determine_active_flash_banks(struct ice_hw *hw)
/* Check that the control word indicates validity */ /* Check that the control word indicates validity */
if ((ctrl_word & ICE_SR_CTRL_WORD_1_M) >> ICE_SR_CTRL_WORD_1_S != ICE_SR_CTRL_WORD_VALID) { if ((ctrl_word & ICE_SR_CTRL_WORD_1_M) >> ICE_SR_CTRL_WORD_1_S != ICE_SR_CTRL_WORD_VALID) {
ice_debug(hw, ICE_DBG_NVM, "Shadow RAM control word is invalid\n"); ice_debug(hw, ICE_DBG_NVM, "Shadow RAM control word is invalid\n");
return ICE_ERR_CFG; return -EIO;
} }
if (!(ctrl_word & ICE_SR_CTRL_WORD_NVM_BANK)) if (!(ctrl_word & ICE_SR_CTRL_WORD_NVM_BANK))
@ -1119,12 +1168,12 @@ ice_determine_active_flash_banks(struct ice_hw *hw)
* This function reads and populates NVM settings such as Shadow RAM size, * This function reads and populates NVM settings such as Shadow RAM size,
* max_timeout, and blank_nvm_mode * max_timeout, and blank_nvm_mode
*/ */
enum ice_status ice_init_nvm(struct ice_hw *hw) int ice_init_nvm(struct ice_hw *hw)
{ {
struct ice_flash_info *flash = &hw->flash; struct ice_flash_info *flash = &hw->flash;
enum ice_status status;
u32 fla, gens_stat; u32 fla, gens_stat;
u8 sr_size; u8 sr_size;
int status;
/* The SR size is stored regardless of the NVM programming mode /* The SR size is stored regardless of the NVM programming mode
* as the blank mode may be used in the factory line. * as the blank mode may be used in the factory line.
@ -1143,7 +1192,7 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
/* Blank programming mode */ /* Blank programming mode */
flash->blank_nvm_mode = true; flash->blank_nvm_mode = true;
ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n"); ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n");
return ICE_ERR_NVM_BLANK_MODE; return -EIO;
} }
status = ice_discover_flash_size(hw); status = ice_discover_flash_size(hw);
@ -1175,18 +1224,17 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
return 0; return 0;
} }
/** /**
* ice_nvm_validate_checksum * ice_nvm_validate_checksum
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
* *
* Verify NVM PFA checksum validity (0x0706) * Verify NVM PFA checksum validity (0x0706)
*/ */
enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw) int ice_nvm_validate_checksum(struct ice_hw *hw)
{ {
struct ice_aqc_nvm_checksum *cmd; struct ice_aqc_nvm_checksum *cmd;
struct ice_aq_desc desc; struct ice_aq_desc desc;
enum ice_status status; int status;
status = ice_acquire_nvm(hw, ICE_RES_READ); status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status) if (status)
@ -1203,7 +1251,7 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
if (!status) if (!status)
if (le16_to_cpu(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT) if (le16_to_cpu(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT)
status = ICE_ERR_NVM_CHECKSUM; status = -EIO;
return status; return status;
} }
@ -1214,11 +1262,11 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
* *
* Recalculate NVM PFA checksum (0x0706) * Recalculate NVM PFA checksum (0x0706)
*/ */
enum ice_status ice_nvm_recalculate_checksum(struct ice_hw *hw) int ice_nvm_recalculate_checksum(struct ice_hw *hw)
{ {
struct ice_aqc_nvm_checksum *cmd; struct ice_aqc_nvm_checksum *cmd;
struct ice_aq_desc desc; struct ice_aq_desc desc;
enum ice_status status; int status;
status = ice_acquire_nvm(hw, ICE_RES_READ); status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status) if (status)
@ -1239,22 +1287,41 @@ enum ice_status ice_nvm_recalculate_checksum(struct ice_hw *hw)
/** /**
* ice_nvm_write_activate * ice_nvm_write_activate
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
* @cmd_flags: NVM activate admin command bits (banks to be validated) * @cmd_flags: flags for write activate command
* @response_flags: response indicators from firmware
* *
* Update the control word with the required banks' validity bits * Update the control word with the required banks' validity bits
* and dumps the Shadow RAM to flash (0x0707) * and dumps the Shadow RAM to flash (0x0707)
*
* cmd_flags controls which banks to activate, the preservation level to use
* when activating the NVM bank, and whether an EMP reset is required for
* activation.
*
* Note that the 16bit cmd_flags value is split between two separate 1 byte
* flag values in the descriptor.
*
* On successful return of the firmware command, the response_flags variable
* is updated with the flags reported by firmware indicating certain status,
* such as whether EMP reset is enabled.
*/ */
enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags) int
ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags)
{ {
struct ice_aqc_nvm *cmd; struct ice_aqc_nvm *cmd;
struct ice_aq_desc desc; struct ice_aq_desc desc;
int status;
cmd = &desc.params.nvm; cmd = &desc.params.nvm;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate);
cmd->cmd_flags = cmd_flags; cmd->cmd_flags = ICE_LO_BYTE(cmd_flags);
cmd->offset_high = ICE_HI_BYTE(cmd_flags);
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
if (!status && response_flags)
*response_flags = cmd->cmd_flags;
return status;
} }
/** /**
@ -1265,11 +1332,11 @@ enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags)
* Read the Minimum Security Revision TLV and extract the revision values from * Read the Minimum Security Revision TLV and extract the revision values from
* the flash image into a readable structure for processing. * the flash image into a readable structure for processing.
*/ */
enum ice_status int
ice_get_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs) ice_get_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs)
{ {
struct ice_aqc_nvm_minsrev data; struct ice_aqc_nvm_minsrev data;
enum ice_status status; int status;
u16 valid; u16 valid;
status = ice_acquire_nvm(hw, ICE_RES_READ); status = ice_acquire_nvm(hw, ICE_RES_READ);
@ -1321,15 +1388,15 @@ ice_get_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs)
* fields to determine what update is being requested. If the valid bit is not * fields to determine what update is being requested. If the valid bit is not
* set for that module, then the associated minsrev will be left as is. * set for that module, then the associated minsrev will be left as is.
*/ */
enum ice_status int
ice_update_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs) ice_update_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs)
{ {
struct ice_aqc_nvm_minsrev data; struct ice_aqc_nvm_minsrev data;
enum ice_status status; int status;
if (!minsrevs->nvm_valid && !minsrevs->orom_valid) { if (!minsrevs->nvm_valid && !minsrevs->orom_valid) {
ice_debug(hw, ICE_DBG_NVM, "At least one of NVM and OROM MinSrev must be valid"); ice_debug(hw, ICE_DBG_NVM, "At least one of NVM and OROM MinSrev must be valid");
return ICE_ERR_PARAM; return -EINVAL;
} }
status = ice_acquire_nvm(hw, ICE_RES_WRITE); status = ice_acquire_nvm(hw, ICE_RES_WRITE);
@ -1356,12 +1423,12 @@ ice_update_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs)
/* Update flash data */ /* Update flash data */
status = ice_aq_update_nvm(hw, ICE_AQC_NVM_MINSREV_MOD_ID, 0, sizeof(data), &data, status = ice_aq_update_nvm(hw, ICE_AQC_NVM_MINSREV_MOD_ID, 0, sizeof(data), &data,
true, ICE_AQC_NVM_SPECIAL_UPDATE, NULL); false, ICE_AQC_NVM_SPECIAL_UPDATE, NULL);
if (status) if (status)
goto exit_release_res; goto exit_release_res;
/* Dump the Shadow RAM to the flash */ /* Dump the Shadow RAM to the flash */
status = ice_nvm_write_activate(hw, 0); status = ice_nvm_write_activate(hw, 0, NULL);
exit_release_res: exit_release_res:
ice_release_nvm(hw); ice_release_nvm(hw);
@ -1377,7 +1444,7 @@ exit_release_res:
* Fill in the data section of the NVM access request with a copy of the NVM * Fill in the data section of the NVM access request with a copy of the NVM
* features structure. * features structure.
*/ */
static enum ice_status static int
ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd, ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data) union ice_nvm_access_data *data)
{ {
@ -1387,7 +1454,7 @@ ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd,
* work on older drivers. * work on older drivers.
*/ */
if (cmd->data_size < sizeof(struct ice_nvm_features)) if (cmd->data_size < sizeof(struct ice_nvm_features))
return ICE_ERR_NO_MEMORY; return -ENOMEM;
/* Initialize the data buffer to zeros */ /* Initialize the data buffer to zeros */
memset(data, 0, cmd->data_size); memset(data, 0, cmd->data_size);
@ -1443,7 +1510,7 @@ static u32 ice_nvm_access_get_adapter(struct ice_nvm_access_cmd *cmd)
* register offset. First validates that the module and flags are correct, and * register offset. First validates that the module and flags are correct, and
* then ensures that the register offset is one of the accepted registers. * then ensures that the register offset is one of the accepted registers.
*/ */
static enum ice_status static int
ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd) ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd)
{ {
u32 module, flags, offset; u32 module, flags, offset;
@ -1457,7 +1524,7 @@ ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd)
if (module != ICE_NVM_REG_RW_MODULE || if (module != ICE_NVM_REG_RW_MODULE ||
flags != ICE_NVM_REG_RW_FLAGS || flags != ICE_NVM_REG_RW_FLAGS ||
cmd->data_size != sizeof_field(union ice_nvm_access_data, regval)) cmd->data_size != sizeof_field(union ice_nvm_access_data, regval))
return ICE_ERR_PARAM; return -EINVAL;
switch (offset) { switch (offset) {
case GL_HICR: case GL_HICR:
@ -1467,6 +1534,7 @@ ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd)
case GLGEN_CSR_DEBUG_C: case GLGEN_CSR_DEBUG_C:
case GLGEN_RSTAT: case GLGEN_RSTAT:
case GLPCI_LBARCTRL: case GLPCI_LBARCTRL:
case GL_MNG_DEF_DEVID:
case GLNVM_GENS: case GLNVM_GENS:
case GLNVM_FLA: case GLNVM_FLA:
case PF_FUNC_RID: case PF_FUNC_RID:
@ -1475,16 +1543,16 @@ ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd)
break; break;
} }
for (i = 0; i <= ICE_NVM_ACCESS_GL_HIDA_MAX; i++) for (i = 0; i <= GL_HIDA_MAX_INDEX; i++)
if (offset == (u32)GL_HIDA(i)) if (offset == (u32)GL_HIDA(i))
return 0; return 0;
for (i = 0; i <= ICE_NVM_ACCESS_GL_HIBA_MAX; i++) for (i = 0; i <= GL_HIBA_MAX_INDEX; i++)
if (offset == (u32)GL_HIBA(i)) if (offset == (u32)GL_HIBA(i))
return 0; return 0;
/* All other register offsets are not valid */ /* All other register offsets are not valid */
return ICE_ERR_OUT_OF_RANGE; return -EIO;
} }
/** /**
@ -1495,11 +1563,11 @@ ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd)
* *
* Process an NVM access request to read a register. * Process an NVM access request to read a register.
*/ */
static enum ice_status static int
ice_nvm_access_read(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, ice_nvm_access_read(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data) union ice_nvm_access_data *data)
{ {
enum ice_status status; int status;
/* Always initialize the output data, even on failure */ /* Always initialize the output data, even on failure */
memset(data, 0, cmd->data_size); memset(data, 0, cmd->data_size);
@ -1526,11 +1594,11 @@ ice_nvm_access_read(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
* *
* Process an NVM access request to write a register. * Process an NVM access request to write a register.
*/ */
static enum ice_status static int
ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data) union ice_nvm_access_data *data)
{ {
enum ice_status status; int status;
/* Make sure this is a valid read/write access request */ /* Make sure this is a valid read/write access request */
status = ice_validate_nvm_rw_reg(cmd); status = ice_validate_nvm_rw_reg(cmd);
@ -1541,7 +1609,7 @@ ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
switch (cmd->offset) { switch (cmd->offset) {
case GL_HICR_EN: case GL_HICR_EN:
case GLGEN_RSTAT: case GLGEN_RSTAT:
return ICE_ERR_OUT_OF_RANGE; return -EIO;
default: default:
break; break;
} }
@ -1568,7 +1636,7 @@ ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
* For valid commands, perform the necessary function, copying the data into * For valid commands, perform the necessary function, copying the data into
* the provided data buffer. * the provided data buffer.
*/ */
enum ice_status int
ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data) union ice_nvm_access_data *data)
{ {
@ -1576,12 +1644,12 @@ ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
/* Extended flags are currently reserved and must be zero */ /* Extended flags are currently reserved and must be zero */
if ((cmd->config & ICE_NVM_CFG_EXT_FLAGS_M) != 0) if ((cmd->config & ICE_NVM_CFG_EXT_FLAGS_M) != 0)
return ICE_ERR_PARAM; return -EINVAL;
/* Adapter info must match the HW device ID */ /* Adapter info must match the HW device ID */
adapter_info = ice_nvm_access_get_adapter(cmd); adapter_info = ice_nvm_access_get_adapter(cmd);
if (adapter_info != hw->device_id) if (adapter_info != hw->device_id)
return ICE_ERR_PARAM; return -EINVAL;
switch (cmd->command) { switch (cmd->command) {
case ICE_NVM_CMD_READ: case ICE_NVM_CMD_READ:
@ -1601,7 +1669,7 @@ ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
case ICE_NVM_CMD_WRITE: case ICE_NVM_CMD_WRITE:
return ice_nvm_access_write(hw, cmd, data); return ice_nvm_access_write(hw, cmd, data);
default: default:
return ICE_ERR_PARAM; return -EINVAL;
} }
} }
@ -1612,7 +1680,7 @@ ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
* Update empr (0x0709). This command allows SW to * Update empr (0x0709). This command allows SW to
* request an EMPR to activate new FW. * request an EMPR to activate new FW.
*/ */
enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw) int ice_aq_nvm_update_empr(struct ice_hw *hw)
{ {
struct ice_aq_desc desc; struct ice_aq_desc desc;
@ -1635,7 +1703,7 @@ enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw)
* as part of the NVM update as the first cmd in the flow. * as part of the NVM update as the first cmd in the flow.
*/ */
enum ice_status int
ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data, ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
u16 length, struct ice_sq_cd *cd) u16 length, struct ice_sq_cd *cd)
{ {
@ -1643,7 +1711,7 @@ ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
struct ice_aq_desc desc; struct ice_aq_desc desc;
if (length != 0 && !data) if (length != 0 && !data)
return ICE_ERR_PARAM; return -EINVAL;
cmd = &desc.params.pkg_data; cmd = &desc.params.pkg_data;
@ -1672,17 +1740,17 @@ ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
* the TransferFlag is set to End or StartAndEnd. * the TransferFlag is set to End or StartAndEnd.
*/ */
enum ice_status int
ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length, ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length,
u8 transfer_flag, u8 *comp_response, u8 transfer_flag, u8 *comp_response,
u8 *comp_response_code, struct ice_sq_cd *cd) u8 *comp_response_code, struct ice_sq_cd *cd)
{ {
struct ice_aqc_nvm_pass_comp_tbl *cmd; struct ice_aqc_nvm_pass_comp_tbl *cmd;
struct ice_aq_desc desc; struct ice_aq_desc desc;
enum ice_status status; int status;
if (!data || !comp_response || !comp_response_code) if (!data || !comp_response || !comp_response_code)
return ICE_ERR_PARAM; return -EINVAL;
cmd = &desc.params.pass_comp_tbl; cmd = &desc.params.pass_comp_tbl;

View File

@ -63,59 +63,51 @@ union ice_nvm_access_data {
struct ice_nvm_features drv_features; /* NVM features */ struct ice_nvm_features drv_features; /* NVM features */
}; };
/* NVM Access registers */ int
#define GL_HIDA(_i) (0x00082000 + ((_i) * 4))
#define GL_HIBA(_i) (0x00081000 + ((_i) * 4))
#define GL_HICR 0x00082040
#define GL_HICR_EN 0x00082044
#define GLGEN_CSR_DEBUG_C 0x00075750
#define GLPCI_LBARCTRL 0x0009DE74
#define GLNVM_GENS 0x000B6100
#define GLNVM_FLA 0x000B6108
#define ICE_NVM_ACCESS_GL_HIDA_MAX 15
#define ICE_NVM_ACCESS_GL_HIBA_MAX 1023
enum ice_status
ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data); union ice_nvm_access_data *data);
enum ice_status int
ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access); ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_nvm(struct ice_hw *hw); void ice_release_nvm(struct ice_hw *hw);
enum ice_status int
ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
void *data, bool last_command, bool read_shadow_ram,
struct ice_sq_cd *cd);
int
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram); bool read_shadow_ram);
enum ice_status int
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type); u16 module_type);
enum ice_status int
ice_get_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs); ice_get_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs);
enum ice_status int
ice_update_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs); ice_update_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs);
enum ice_status int
ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom); ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom);
enum ice_status int
ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm); ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm);
enum ice_status int
ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist); ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist);
enum ice_status int
ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size); ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
enum ice_status ice_init_nvm(struct ice_hw *hw); int ice_init_nvm(struct ice_hw *hw);
enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data); int ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
enum ice_status int
ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd); ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd);
enum ice_status int
ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command, u8 command_flags, u16 length, void *data, bool last_command, u8 command_flags,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw); int ice_nvm_validate_checksum(struct ice_hw *hw);
enum ice_status ice_nvm_recalculate_checksum(struct ice_hw *hw); int ice_nvm_recalculate_checksum(struct ice_hw *hw);
enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags); int
enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw); ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags);
enum ice_status int ice_aq_nvm_update_empr(struct ice_hw *hw);
int
ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data, ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
u16 length, struct ice_sq_cd *cd); u16 length, struct ice_sq_cd *cd);
enum ice_status int
ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length, ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length,
u8 transfer_flag, u8 *comp_response, u8 transfer_flag, u8 *comp_response,
u8 *comp_response_code, struct ice_sq_cd *cd); u8 *comp_response_code, struct ice_sq_cd *cd);

View File

@ -5,9 +5,14 @@
#define _ICE_OSDEP_H_ #define _ICE_OSDEP_H_
#include <linux/types.h> #include <linux/types.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h> #include <linux/if_ether.h>
#include <linux/pci_ids.h>
#include "kcompat.h" #include "kcompat.h"
#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) #define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
@ -25,8 +30,8 @@ struct ice_dma_mem {
size_t size; size_t size;
}; };
#define ice_hw_to_dev(ptr) \ struct ice_hw;
(&(container_of((ptr), struct ice_pf, hw))->pdev->dev) struct device *ice_hw_to_dev(struct ice_hw *hw);
#define ice_info_fwlog(hw, rowsize, groupsize, buf, len) \ #define ice_info_fwlog(hw, rowsize, groupsize, buf, len) \
print_hex_dump(KERN_INFO, " FWLOG: ", \ print_hex_dump(KERN_INFO, " FWLOG: ", \
@ -34,12 +39,57 @@ struct ice_dma_mem {
rowsize, groupsize, buf, \ rowsize, groupsize, buf, \
len, false) len, false)
#ifdef CONFIG_SYMBOLIC_ERRNAME
/**
* ice_print_errno - logs message with appended error
* @func: logging function (such as dev_err, netdev_warn, etc.)
* @obj: first argument that func takes
* @code: standard error code (negative integer)
* @fmt: format string (without "\n" in the end)
*
* Uses kernel logging function of your choice to log provided message
* with error code and (if allowed by kernel) its symbolic
* representation apended. All additional format arguments can be
* added at the end.
* Supports only functions that take an additional
* argument before formatted string.
*/
#define ice_print_errno(func, obj, code, fmt, args...) ({ \
long code_ = (code); \
BUILD_BUG_ON(fmt[strlen(fmt) - 1] == '\n'); \
func(obj, fmt ", error: %ld (%pe)\n", \
##args, code_, ERR_PTR(code_)); \
})
/**
* ice_err_arg - replaces error code as a logging function argument
* @err: standard error code (negative integer)
*/
#define ice_err_arg(err) ERR_PTR(err)
/**
* ice_err_format - replaces %(l)d format corresponding to an error code
*/
#define ice_err_format() "%pe"
#else
#define ice_print_errno(func, obj, code, fmt, args...) ({ \
BUILD_BUG_ON(fmt[strlen(fmt) - 1] == '\n'); \
func(obj, fmt ", error: %ld\n", ##args, (long)code); \
})
#define ice_err_arg(err) ((long)err)
#define ice_err_format() "%ld"
#endif /* CONFIG_SYMBOLIC_ERRNAME */
#define ice_dev_err_errno(dev, code, fmt, args...) \
ice_print_errno(dev_err, dev, code, fmt, ##args)
#define ice_dev_warn_errno(dev, code, fmt, args...) \
ice_print_errno(dev_warn, dev, code, fmt, ##args)
#define ice_dev_info_errno(dev, code, fmt, args...) \
ice_print_errno(dev_info, dev, code, fmt, ##args)
#define ice_dev_dbg_errno(dev, code, fmt, args...) \
ice_print_errno(dev_dbg, dev, code, fmt, ##args)
#ifdef CONFIG_DYNAMIC_DEBUG #ifdef CONFIG_DYNAMIC_DEBUG
#define ice_debug(hw, type, fmt, args...) \ #define ice_debug(hw, type, fmt, args...) \
dev_dbg(ice_hw_to_dev(hw), fmt, ##args) dev_dbg(ice_hw_to_dev(hw), fmt, ##args)
#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \ #define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \
print_hex_dump_debug(KBUILD_MODNAME " ", \ print_hex_dump_debug(KBUILD_MODNAME " ", \
DUMP_PREFIX_OFFSET, rowsize, \ DUMP_PREFIX_OFFSET, rowsize, \

595
drivers/thirdparty/ice/ice_parser.c vendored Normal file
View File

@ -0,0 +1,595 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018-2021, Intel Corporation. */
#include "ice_common.h"
#include "ice_parser_util.h"
#define ICE_SEC_DATA_OFFSET 4
#define ICE_SID_RXPARSER_IMEM_ENTRY_SIZE 48
#define ICE_SID_RXPARSER_METADATA_INIT_ENTRY_SIZE 24
#define ICE_SID_RXPARSER_CAM_ENTRY_SIZE 16
#define ICE_SID_RXPARSER_PG_SPILL_ENTRY_SIZE 17
#define ICE_SID_RXPARSER_NOMATCH_CAM_ENTRY_SIZE 12
#define ICE_SID_RXPARSER_NOMATCH_SPILL_ENTRY_SIZE 13
#define ICE_SID_RXPARSER_BOOST_TCAM_ENTRY_SIZE 88
#define ICE_SID_RXPARSER_MARKER_TYPE_ENTRY_SIZE 24
#define ICE_SID_RXPARSER_MARKER_GRP_ENTRY_SIZE 8
#define ICE_SID_RXPARSER_PROTO_GRP_ENTRY_SIZE 24
#define ICE_SID_RXPARSER_FLAG_REDIR_ENTRY_SIZE 1
#define ICE_SEC_LBL_DATA_OFFSET 2
#define ICE_SID_LBL_ENTRY_SIZE 66
void ice_lbl_dump(struct ice_hw *hw, struct ice_lbl_item *item)
{
dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx);
dev_info(ice_hw_to_dev(hw), "label = %s\n", item->label);
}
void ice_parse_item_dflt(struct ice_hw *hw, u16 idx, void *item,
void *data, int size)
{
memcpy(item, data, size);
}
/**
* ice_parser_sect_item_get - parse a item from a section
* @sect_type: section type
* @section: section object
* @index: index of the item to get
* @offset: dummy as prototype of ice_pkg_enum_entry's last parameter
*/
void *ice_parser_sect_item_get(u32 sect_type, void *section,
u32 index, u32 *offset)
{
struct ice_pkg_sect_hdr *hdr;
int data_off = ICE_SEC_DATA_OFFSET;
int size;
if (!section)
return NULL;
switch (sect_type) {
case ICE_SID_RXPARSER_IMEM:
size = ICE_SID_RXPARSER_IMEM_ENTRY_SIZE;
break;
case ICE_SID_RXPARSER_METADATA_INIT:
size = ICE_SID_RXPARSER_METADATA_INIT_ENTRY_SIZE;
break;
case ICE_SID_RXPARSER_CAM:
size = ICE_SID_RXPARSER_CAM_ENTRY_SIZE;
break;
case ICE_SID_RXPARSER_PG_SPILL:
size = ICE_SID_RXPARSER_PG_SPILL_ENTRY_SIZE;
break;
case ICE_SID_RXPARSER_NOMATCH_CAM:
size = ICE_SID_RXPARSER_NOMATCH_CAM_ENTRY_SIZE;
break;
case ICE_SID_RXPARSER_NOMATCH_SPILL:
size = ICE_SID_RXPARSER_NOMATCH_SPILL_ENTRY_SIZE;
break;
case ICE_SID_RXPARSER_BOOST_TCAM:
size = ICE_SID_RXPARSER_BOOST_TCAM_ENTRY_SIZE;
break;
case ICE_SID_LBL_RXPARSER_TMEM:
data_off = ICE_SEC_LBL_DATA_OFFSET;
size = ICE_SID_LBL_ENTRY_SIZE;
break;
case ICE_SID_RXPARSER_MARKER_PTYPE:
size = ICE_SID_RXPARSER_MARKER_TYPE_ENTRY_SIZE;
break;
case ICE_SID_RXPARSER_MARKER_GRP:
size = ICE_SID_RXPARSER_MARKER_GRP_ENTRY_SIZE;
break;
case ICE_SID_RXPARSER_PROTO_GRP:
size = ICE_SID_RXPARSER_PROTO_GRP_ENTRY_SIZE;
break;
case ICE_SID_RXPARSER_FLAG_REDIR:
size = ICE_SID_RXPARSER_FLAG_REDIR_ENTRY_SIZE;
break;
default:
return NULL;
}
hdr = (struct ice_pkg_sect_hdr *)section;
if (index >= le16_to_cpu(hdr->count))
return NULL;
return (void *)((u64)section + data_off + index * size);
}
/**
* ice_parser_create_table - create a item table from a section
* @hw: pointer to the hardware structure
* @sect_type: section type
* @item_size: item size in byte
* @length: number of items in the table to create
* @item_get: the function will be parsed to ice_pkg_enum_entry
* @parse_item: the function to parse the item
* @no_offset: ignore header offset, calculate index from 0
*/
void *ice_parser_create_table(struct ice_hw *hw, u32 sect_type,
u32 item_size, u32 length,
void *(*item_get)(u32 sect_type, void *section,
u32 index, u32 *offset),
void (*parse_item)(struct ice_hw *hw, u16 idx,
void *item, void *data,
int size),
bool no_offset)
{
struct ice_seg *seg = hw->seg;
struct ice_pkg_enum state;
u16 idx = 0xffff;
void *table;
void *data;
if (!seg)
return NULL;
table = devm_kzalloc(ice_hw_to_dev(hw), item_size * length,
GFP_KERNEL);
if (!table) {
ice_debug(hw, ICE_DBG_PARSER, "failed to allocate memory for table type %d.\n",
sect_type);
return NULL;
}
memset(&state, 0, sizeof(state));
do {
data = ice_pkg_enum_entry(seg, &state, sect_type, NULL,
item_get);
seg = NULL;
if (data) {
struct ice_pkg_sect_hdr *hdr =
(struct ice_pkg_sect_hdr *)state.sect;
if (no_offset)
idx++;
else
idx = le16_to_cpu(hdr->offset) +
state.entry_idx;
parse_item(hw, idx,
(void *)((u64)table + idx * item_size),
data, item_size);
}
} while (data);
return table;
}
/**
* ice_parser_create - create a parser instance
* @hw: pointer to the hardware structure
* @psr: output parameter for a new parser instance be created
*/
int ice_parser_create(struct ice_hw *hw, struct ice_parser **psr)
{
struct ice_parser *p;
int status;
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(struct ice_parser),
GFP_KERNEL);
if (!p)
return -ENOMEM;
p->hw = hw;
p->rt.psr = p;
p->imem_table = ice_imem_table_get(hw);
if (!p->imem_table) {
status = -EINVAL;
goto err;
}
p->mi_table = ice_metainit_table_get(hw);
if (!p->mi_table) {
status = -EINVAL;
goto err;
}
p->pg_cam_table = ice_pg_cam_table_get(hw);
if (!p->pg_cam_table) {
status = -EINVAL;
goto err;
}
p->pg_sp_cam_table = ice_pg_sp_cam_table_get(hw);
if (!p->pg_sp_cam_table) {
status = -EINVAL;
goto err;
}
p->pg_nm_cam_table = ice_pg_nm_cam_table_get(hw);
if (!p->pg_nm_cam_table) {
status = -EINVAL;
goto err;
}
p->pg_nm_sp_cam_table = ice_pg_nm_sp_cam_table_get(hw);
if (!p->pg_nm_sp_cam_table) {
status = -EINVAL;
goto err;
}
p->bst_tcam_table = ice_bst_tcam_table_get(hw);
if (!p->bst_tcam_table) {
status = -EINVAL;
goto err;
}
p->bst_lbl_table = ice_bst_lbl_table_get(hw);
if (!p->bst_lbl_table) {
status = -EINVAL;
goto err;
}
p->ptype_mk_tcam_table = ice_ptype_mk_tcam_table_get(hw);
if (!p->ptype_mk_tcam_table) {
status = -EINVAL;
goto err;
}
p->mk_grp_table = ice_mk_grp_table_get(hw);
if (!p->mk_grp_table) {
status = -EINVAL;
goto err;
}
p->proto_grp_table = ice_proto_grp_table_get(hw);
if (!p->proto_grp_table) {
status = -EINVAL;
goto err;
}
p->flg_rd_table = ice_flg_rd_table_get(hw);
if (!p->flg_rd_table) {
status = -EINVAL;
goto err;
}
p->xlt_kb_sw = ice_xlt_kb_get_sw(hw);
if (!p->xlt_kb_sw) {
status = -EINVAL;
goto err;
}
p->xlt_kb_acl = ice_xlt_kb_get_acl(hw);
if (!p->xlt_kb_acl) {
status = -EINVAL;
goto err;
}
p->xlt_kb_fd = ice_xlt_kb_get_fd(hw);
if (!p->xlt_kb_fd) {
status = -EINVAL;
goto err;
}
p->xlt_kb_rss = ice_xlt_kb_get_rss(hw);
if (!p->xlt_kb_rss) {
status = -EINVAL;
goto err;
}
*psr = p;
return 0;
err:
ice_parser_destroy(p);
return status;
}
/**
* ice_parser_destroy - destroy a parser instance
* @psr: pointer to a parser instance
*/
void ice_parser_destroy(struct ice_parser *psr)
{
devm_kfree(ice_hw_to_dev(psr->hw), psr->imem_table);
devm_kfree(ice_hw_to_dev(psr->hw), psr->mi_table);
devm_kfree(ice_hw_to_dev(psr->hw), psr->pg_cam_table);
devm_kfree(ice_hw_to_dev(psr->hw), psr->pg_sp_cam_table);
devm_kfree(ice_hw_to_dev(psr->hw), psr->pg_nm_cam_table);
devm_kfree(ice_hw_to_dev(psr->hw), psr->pg_nm_sp_cam_table);
devm_kfree(ice_hw_to_dev(psr->hw), psr->bst_tcam_table);
devm_kfree(ice_hw_to_dev(psr->hw), psr->bst_lbl_table);
devm_kfree(ice_hw_to_dev(psr->hw), psr->ptype_mk_tcam_table);
devm_kfree(ice_hw_to_dev(psr->hw), psr->mk_grp_table);
devm_kfree(ice_hw_to_dev(psr->hw), psr->proto_grp_table);
devm_kfree(ice_hw_to_dev(psr->hw), psr->flg_rd_table);
devm_kfree(ice_hw_to_dev(psr->hw), psr->xlt_kb_sw);
devm_kfree(ice_hw_to_dev(psr->hw), psr->xlt_kb_acl);
devm_kfree(ice_hw_to_dev(psr->hw), psr->xlt_kb_fd);
devm_kfree(ice_hw_to_dev(psr->hw), psr->xlt_kb_rss);
devm_kfree(ice_hw_to_dev(psr->hw), psr);
}
/**
* ice_parser_run - parse on a packet in binary and return the result
* @psr: pointer to a parser instance
* @pkt_buf: packet data
* @pkt_len: packet length
* @rslt: input/output parameter to save parser result.
*/
int ice_parser_run(struct ice_parser *psr, const u8 *pkt_buf,
int pkt_len, struct ice_parser_result *rslt)
{
ice_parser_rt_reset(&psr->rt);
ice_parser_rt_pktbuf_set(&psr->rt, pkt_buf, pkt_len);
return ice_parser_rt_execute(&psr->rt, rslt);
}
/**
* ice_parser_result_dump - dump a parser result info
* @hw: pointer to the hardware structure
* @rslt: parser result info to dump
*/
void ice_parser_result_dump(struct ice_hw *hw, struct ice_parser_result *rslt)
{
int i;
dev_info(ice_hw_to_dev(hw), "ptype = %d\n", rslt->ptype);
for (i = 0; i < rslt->po_num; i++)
dev_info(ice_hw_to_dev(hw), "proto = %d, offset = %d\n",
rslt->po[i].proto_id, rslt->po[i].offset);
dev_info(ice_hw_to_dev(hw), "flags_psr = 0x%016llx\n",
(unsigned long long)rslt->flags_psr);
dev_info(ice_hw_to_dev(hw), "flags_pkt = 0x%016llx\n",
(unsigned long long)rslt->flags_pkt);
dev_info(ice_hw_to_dev(hw), "flags_sw = 0x%04x\n", rslt->flags_sw);
dev_info(ice_hw_to_dev(hw), "flags_fd = 0x%04x\n", rslt->flags_fd);
dev_info(ice_hw_to_dev(hw), "flags_rss = 0x%04x\n", rslt->flags_rss);
}
static void _bst_vm_set(struct ice_parser *psr, const char *prefix, bool on)
{
u16 i = 0;
while (true) {
struct ice_bst_tcam_item *item;
item = ice_bst_tcam_search(psr->bst_tcam_table,
psr->bst_lbl_table,
prefix, &i);
if (!item)
break;
item->key[0] = (u8)(on ? 0xff : 0xfe);
item->key_inv[0] = (u8)(on ? 0xff : 0xfe);
i++;
}
}
/**
* ice_parser_dvm_set - configure double vlan mode for parser
* @psr: pointer to a parser instance
* @on: true to turn on; false to turn off
*/
void ice_parser_dvm_set(struct ice_parser *psr, bool on)
{
_bst_vm_set(psr, "BOOST_MAC_VLAN_DVM", on);
_bst_vm_set(psr, "BOOST_MAC_VLAN_SVM", !on);
}
static int
_tunnel_port_set(struct ice_parser *psr, const char *prefix, u16 udp_port,
bool on)
{
u8 *buf = (u8 *)&udp_port;
u16 i = 0;
while (true) {
struct ice_bst_tcam_item *item;
item = ice_bst_tcam_search(psr->bst_tcam_table,
psr->bst_lbl_table,
prefix, &i);
if (!item)
break;
/* found empty slot to add */
if (on && item->key[16] == 0xfe && item->key_inv[16] == 0xfe) {
item->key_inv[15] = buf[0];
item->key_inv[16] = buf[1];
item->key[15] = (u8)(0xff - buf[0]);
item->key[16] = (u8)(0xff - buf[1]);
return 0;
/* found a matched slot to delete */
} else if (!on && (item->key_inv[15] == buf[0] ||
item->key_inv[16] == buf[1])) {
item->key_inv[15] = 0xff;
item->key_inv[16] = 0xfe;
item->key[15] = 0xff;
item->key[16] = 0xfe;
return 0;
}
i++;
}
return -EINVAL;
}
/**
* ice_parser_vxlan_tunnel_set - configure vxlan tunnel for parser
* @psr: pointer to a parser instance
* @udp_port: vxlan tunnel port in UDP header
* @on: true to turn on; false to turn off
*/
int ice_parser_vxlan_tunnel_set(struct ice_parser *psr,
u16 udp_port, bool on)
{
return _tunnel_port_set(psr, "TNL_VXLAN", udp_port, on);
}
/**
* ice_parser_geneve_tunnel_set - configure geneve tunnel for parser
* @psr: pointer to a parser instance
* @udp_port: geneve tunnel port in UDP header
* @on: true to turn on; false to turn off
*/
int ice_parser_geneve_tunnel_set(struct ice_parser *psr,
u16 udp_port, bool on)
{
return _tunnel_port_set(psr, "TNL_GENEVE", udp_port, on);
}
/**
* ice_parser_ecpri_tunnel_set - configure ecpri tunnel for parser
* @psr: pointer to a parser instance
* @udp_port: ecpri tunnel port in UDP header
* @on: true to turn on; false to turn off
*/
int ice_parser_ecpri_tunnel_set(struct ice_parser *psr,
u16 udp_port, bool on)
{
return _tunnel_port_set(psr, "TNL_UDP_ECPRI", udp_port, on);
}
static bool _nearest_proto_id(struct ice_parser_result *rslt, u16 offset,
u8 *proto_id, u16 *proto_off)
{
u16 dist = 0xffff;
u8 p = 0;
int i;
for (i = 0; i < rslt->po_num; i++) {
if (offset < rslt->po[i].offset)
continue;
if (offset - rslt->po[i].offset < dist) {
p = rslt->po[i].proto_id;
dist = offset - rslt->po[i].offset;
}
}
if (dist % 2)
return false;
*proto_id = p;
*proto_off = dist;
return true;
}
/** default flag mask to cover GTP_EH_PDU, GTP_EH_PDU_LINK and TUN2
* In future, the flag masks should learn from DDP
*/
#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_SW 0x4002
#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_ACL 0x0000
#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_FD 0x6080
#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_RSS 0x6010
/**
* ice_parser_profile_init - initialize a FXP profile base on parser result
* @rslt: a instance of a parser result
* @pkt_buf: packet data buffer
* @msk_buf: packet mask buffer
* @buf_len: packet length
* @blk: FXP pipeline stage
* @prefix_match: match protocol stack exactly or only prefix
* @prof: input/output parameter to save the profile
*/
int ice_parser_profile_init(struct ice_parser_result *rslt,
const u8 *pkt_buf, const u8 *msk_buf,
int buf_len, enum ice_block blk,
bool prefix_match,
struct ice_parser_profile *prof)
{
u8 proto_id = 0xff;
u16 proto_off = 0;
u16 off;
memset(prof, 0, sizeof(*prof));
set_bit(rslt->ptype, prof->ptypes);
if (blk == ICE_BLK_SW) {
prof->flags = rslt->flags_sw;
prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_SW;
} else if (blk == ICE_BLK_ACL) {
prof->flags = rslt->flags_acl;
prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_ACL;
} else if (blk == ICE_BLK_FD) {
prof->flags = rslt->flags_fd;
prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_FD;
} else if (blk == ICE_BLK_RSS) {
prof->flags = rslt->flags_rss;
prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_RSS;
} else {
return -EINVAL;
}
for (off = 0; off < buf_len - 1; off++) {
if (msk_buf[off] == 0 && msk_buf[off + 1] == 0)
continue;
if (!_nearest_proto_id(rslt, off, &proto_id, &proto_off))
continue;
if (prof->fv_num >= 32)
return -EINVAL;
prof->fv[prof->fv_num].proto_id = proto_id;
prof->fv[prof->fv_num].offset = proto_off;
prof->fv[prof->fv_num].spec = *(const u16 *)&pkt_buf[off];
prof->fv[prof->fv_num].msk = *(const u16 *)&msk_buf[off];
prof->fv_num++;
}
return 0;
}
/**
* ice_parser_profile_dump - dump an FXP profile info
* @hw: pointer to the hardware structure
* @prof: profile info to dump
*/
void ice_parser_profile_dump(struct ice_hw *hw, struct ice_parser_profile *prof)
{
u16 i;
dev_info(ice_hw_to_dev(hw), "ptypes:\n");
for (i = 0; i < ICE_FLOW_PTYPE_MAX; i++)
if (test_bit(i, prof->ptypes))
dev_info(ice_hw_to_dev(hw), "\t%d\n", i);
for (i = 0; i < prof->fv_num; i++)
dev_info(ice_hw_to_dev(hw),
"proto = %d, offset = %d spec = 0x%04x, mask = 0x%04x\n",
prof->fv[i].proto_id, prof->fv[i].offset,
prof->fv[i].spec, prof->fv[i].msk);
dev_info(ice_hw_to_dev(hw), "flags = 0x%04x\n", prof->flags);
dev_info(ice_hw_to_dev(hw), "flags_msk = 0x%04x\n", prof->flags_msk);
}
/**
* ice_check_ddp_support_proto_id - check DDP package file support protocol ID
* @hw: pointer to the HW struct
* @proto_id: protocol ID value
*
* This function maintains the compatibility of the program process by checking
* whether the current DDP file supports the required protocol ID.
*/
bool ice_check_ddp_support_proto_id(struct ice_hw *hw,
enum ice_prot_id proto_id)
{
struct ice_proto_grp_item *proto_grp_table;
struct ice_proto_grp_item *proto_grp;
bool exist = false;
u16 idx, i;
proto_grp_table = ice_proto_grp_table_get(hw);
if (!proto_grp_table)
return false;
for (idx = 0; idx < ICE_PROTO_GRP_TABLE_SIZE; idx++) {
proto_grp = &proto_grp_table[idx];
for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++) {
if (proto_grp->po[i].proto_id == proto_id) {
exist = true;
goto exit;
}
}
}
exit:
devm_kfree(ice_hw_to_dev(hw), proto_grp_table);
return exist;
}

114
drivers/thirdparty/ice/ice_parser.h vendored Normal file
View File

@ -0,0 +1,114 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018-2021, Intel Corporation. */
#ifndef _ICE_PARSER_H_
#define _ICE_PARSER_H_
#include "ice_metainit.h"
#include "ice_imem.h"
#include "ice_pg_cam.h"
#include "ice_bst_tcam.h"
#include "ice_ptype_mk.h"
#include "ice_mk_grp.h"
#include "ice_proto_grp.h"
#include "ice_flg_rd.h"
#include "ice_xlt_kb.h"
#include "ice_parser_rt.h"
#include "ice_tmatch.h"
struct ice_parser {
struct ice_hw *hw; /* pointer to the hardware structure */
/* load data from section ICE_SID_RX_PARSER_IMEM */
struct ice_imem_item *imem_table;
/* load data from section ICE_SID_RXPARSER_METADATA_INIT */
struct ice_metainit_item *mi_table;
/* load data from section ICE_SID_RXPARSER_CAM */
struct ice_pg_cam_item *pg_cam_table;
/* load data from section ICE_SID_RXPARSER_PG_SPILL */
struct ice_pg_cam_item *pg_sp_cam_table;
/* load data from section ICE_SID_RXPARSER_NOMATCH_CAM */
struct ice_pg_nm_cam_item *pg_nm_cam_table;
/* load data from section ICE_SID_RXPARSER_NOMATCH_SPILL */
struct ice_pg_nm_cam_item *pg_nm_sp_cam_table;
/* load data from section ICE_SID_RXPARSER_BOOST_TCAM */
struct ice_bst_tcam_item *bst_tcam_table;
/* load data from section ICE_SID_LBL_RXPARSER_TMEM */
struct ice_lbl_item *bst_lbl_table;
/* load data from section ICE_SID_RXPARSER_MARKER_PTYPE */
struct ice_ptype_mk_tcam_item *ptype_mk_tcam_table;
/* load data from section ICE_SID_RXPARSER_MARKER_GRP */
struct ice_mk_grp_item *mk_grp_table;
/* load data from section ICE_SID_RXPARSER_PROTO_GRP */
struct ice_proto_grp_item *proto_grp_table;
/* load data from section ICE_SID_RXPARSER_FLAG_REDIR */
struct ice_flg_rd_item *flg_rd_table;
/* load data from section ICE_SID_XLT_KEY_BUILDER_SW */
struct ice_xlt_kb *xlt_kb_sw;
/* load data from section ICE_SID_XLT_KEY_BUILDER_ACL */
struct ice_xlt_kb *xlt_kb_acl;
/* load data from section ICE_SID_XLT_KEY_BUILDER_FD */
struct ice_xlt_kb *xlt_kb_fd;
/* load data from section ICE_SID_XLT_KEY_BUILDER_RSS */
struct ice_xlt_kb *xlt_kb_rss;
struct ice_parser_rt rt; /* parser runtime */
};
int ice_parser_create(struct ice_hw *hw, struct ice_parser **psr);
void ice_parser_destroy(struct ice_parser *psr);
void ice_parser_dvm_set(struct ice_parser *psr, bool on);
int ice_parser_vxlan_tunnel_set(struct ice_parser *psr,
u16 udp_port, bool on);
int ice_parser_geneve_tunnel_set(struct ice_parser *psr,
u16 udp_port, bool on);
int ice_parser_ecpri_tunnel_set(struct ice_parser *psr,
u16 udp_port, bool on);
struct ice_parser_proto_off {
u8 proto_id; /* hardware protocol ID */
u16 offset; /* offset where the protocol header start */
};
struct ice_parser_result {
u16 ptype; /* 16 bits hardware PTYPE */
/* protocol and header offset pairs */
struct ice_parser_proto_off po[16];
int po_num; /* number of pairs must <= 16 */
u64 flags_psr; /* 64 bits parser flags */
u64 flags_pkt; /* 64 bits packet flags */
u16 flags_sw; /* 16 bits key builder flag for SW */
u16 flags_acl; /* 16 bits key builder flag for ACL */
u16 flags_fd; /* 16 bits key builder flag for FD */
u16 flags_rss; /* 16 bits key builder flag for RSS */
};
int ice_parser_run(struct ice_parser *psr, const u8 *pkt_buf,
int pkt_len, struct ice_parser_result *rslt);
void ice_parser_result_dump(struct ice_hw *hw, struct ice_parser_result *rslt);
struct ice_parser_fv {
u8 proto_id; /* hardware protocol ID */
u16 offset; /* offset from the start of the protocol header */
u16 spec; /* 16 bits pattern to match */
u16 msk; /* 16 bits pattern mask */
};
struct ice_parser_profile {
struct ice_parser_fv fv[48]; /* field vector arrary */
int fv_num; /* field vector number must <= 48 */
u16 flags; /* 16 bits key builder flag */
u16 flags_msk; /* key builder flag masker */
/* 1024 bits PTYPE bitmap */
DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX);
};
int ice_parser_profile_init(struct ice_parser_result *rslt,
const u8 *pkt_buf, const u8 *msk_buf,
int buf_len, enum ice_block blk,
bool prefix_match,
struct ice_parser_profile *prof);
void ice_parser_profile_dump(struct ice_hw *hw,
struct ice_parser_profile *prof);
bool ice_check_ddp_support_proto_id(struct ice_hw *hw,
enum ice_prot_id proto_id);
#endif /* _ICE_PARSER_H_ */

864
drivers/thirdparty/ice/ice_parser_rt.c vendored Normal file
View File

@ -0,0 +1,864 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018-2021, Intel Corporation. */
#include "ice_common.h"
#define GPR_HB_IDX 64
#define GPR_ERR_IDX 84
#define GPR_FLG_IDX 104
#define GPR_TSR_IDX 108
#define GPR_NN_IDX 109
#define GPR_HO_IDX 110
#define GPR_NP_IDX 111
static void _rt_tsr_set(struct ice_parser_rt *rt, u16 tsr)
{
rt->gpr[GPR_TSR_IDX] = tsr;
}
static void _rt_ho_set(struct ice_parser_rt *rt, u16 ho)
{
rt->gpr[GPR_HO_IDX] = ho;
memcpy(&rt->gpr[GPR_HB_IDX], &rt->pkt_buf[ho], 32);
}
static void _rt_np_set(struct ice_parser_rt *rt, u16 pc)
{
rt->gpr[GPR_NP_IDX] = pc;
}
static void _rt_nn_set(struct ice_parser_rt *rt, u16 node)
{
rt->gpr[GPR_NN_IDX] = node;
}
static void _rt_flag_set(struct ice_parser_rt *rt, int idx, bool val)
{
int y = idx / 16;
int x = idx % 16;
if (val)
rt->gpr[GPR_FLG_IDX + y] |= (u16)(1 << x);
else
rt->gpr[GPR_FLG_IDX + y] &= ~(u16)(1 << x);
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set parser flag %d value %d\n",
idx, val);
}
static void _rt_gpr_set(struct ice_parser_rt *rt, int idx, u16 val)
{
if (idx == GPR_HO_IDX)
_rt_ho_set(rt, val);
else
rt->gpr[idx] = val;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set GPR %d value %d\n",
idx, val);
}
static void _rt_err_set(struct ice_parser_rt *rt, int idx, bool val)
{
if (val)
rt->gpr[GPR_ERR_IDX] |= (u16)(1 << idx);
else
rt->gpr[GPR_ERR_IDX] &= ~(u16)(1 << idx);
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set parser error %d value %d\n",
idx, val);
}
/**
* ice_parser_rt_reset - reset the parser runtime
* @rt: pointer to the parser runtime
*/
void ice_parser_rt_reset(struct ice_parser_rt *rt)
{
struct ice_parser *psr = rt->psr;
struct ice_metainit_item *mi = &psr->mi_table[0];
int i;
memset(rt, 0, sizeof(*rt));
_rt_tsr_set(rt, mi->tsr);
_rt_ho_set(rt, mi->ho);
_rt_np_set(rt, mi->pc);
_rt_nn_set(rt, mi->pg_rn);
rt->psr = psr;
for (i = 0; i < 64; i++) {
if ((mi->flags & (1ul << i)) != 0ul)
_rt_flag_set(rt, i, true);
}
}
/**
* ice_parser_rt_pktbuf_set - set a packet into parser runtime
* @rt: pointer to the parser runtime
* @pkt_buf: buffer with packet data
* @pkt_len: packet buffer length
*/
void ice_parser_rt_pktbuf_set(struct ice_parser_rt *rt, const u8 *pkt_buf,
int pkt_len)
{
int len = min(ICE_PARSER_MAX_PKT_LEN, pkt_len);
u16 ho = rt->gpr[GPR_HO_IDX];
memcpy(rt->pkt_buf, pkt_buf, len);
rt->pkt_len = pkt_len;
memcpy(&rt->gpr[GPR_HB_IDX], &rt->pkt_buf[ho], 32);
}
static void _bst_key_init(struct ice_parser_rt *rt, struct ice_imem_item *imem)
{
u8 tsr = (u8)rt->gpr[GPR_TSR_IDX];
u16 ho = rt->gpr[GPR_HO_IDX];
u8 *key = rt->bst_key;
int i;
if (imem->b_kb.tsr_ctrl)
key[19] = (u8)tsr;
else
key[19] = imem->b_kb.priority;
for (i = 18; i >= 0; i--) {
int j;
j = ho + 18 - i;
if (j < ICE_PARSER_MAX_PKT_LEN)
key[i] = rt->pkt_buf[ho + 18 - i];
else
key[i] = 0;
}
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generated Boost TCAM Key:\n");
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
key[0], key[1], key[2], key[3], key[4],
key[5], key[6], key[7], key[8], key[9],
key[10], key[11], key[12], key[13], key[14],
key[15], key[16], key[17], key[18], key[19]);
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "\n");
}
static u8 _bit_rev_u8(u8 v)
{
u8 r = 0;
int i;
for (i = 0; i < 8; i++) {
r |= (u8)((v & 0x1) << (7 - i));
v >>= 1;
}
return r;
}
static u8 _bit_rev_u16(u16 v, int len)
{
u16 r = 0;
int i;
for (i = 0; i < len; i++) {
r |= (u16)((v & 0x1) << (len - 1 - i));
v >>= 1;
}
return r;
}
static u32 _bit_rev_u32(u32 v, int len)
{
u32 r = 0;
int i;
for (i = 0; i < len; i++) {
r |= (u32)((v & 0x1) << (len - 1 - i));
v >>= 1;
}
return r;
}
static u32 _hv_bit_sel(struct ice_parser_rt *rt, int start, int len)
{
u64 d64, msk;
u8 b[8];
int i;
int offset = GPR_HB_IDX + start / 16;
memcpy(b, &rt->gpr[offset], 8);
for (i = 0; i < 8; i++)
b[i] = _bit_rev_u8(b[i]);
d64 = *(u64 *)b;
msk = (1ul << len) - 1;
return _bit_rev_u32((u32)((d64 >> (start % 16)) & msk), len);
}
static u32 _pk_build(struct ice_parser_rt *rt, struct ice_np_keybuilder *kb)
{
if (kb->ops == 0)
return _hv_bit_sel(rt, kb->start_or_reg0, kb->len_or_reg1);
else if (kb->ops == 1)
return rt->gpr[kb->start_or_reg0] |
((u32)rt->gpr[kb->len_or_reg1] << 16);
else if (kb->ops == 2)
return 0;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unsupported ops %d\n", kb->ops);
return 0xffffffff;
}
static bool _flag_get(struct ice_parser_rt *rt, int index)
{
int y = index / 16;
int x = index % 16;
return (rt->gpr[GPR_FLG_IDX + y] & (u16)(1 << x)) != 0;
}
static void _imem_pgk_init(struct ice_parser_rt *rt, struct ice_imem_item *imem)
{
memset(&rt->pg_key, 0, sizeof(rt->pg_key));
rt->pg_key.next_proto = _pk_build(rt, &imem->np_kb);
if (imem->pg_kb.flag0_ena)
rt->pg_key.flag0 = _flag_get(rt, imem->pg_kb.flag0_idx);
if (imem->pg_kb.flag1_ena)
rt->pg_key.flag1 = _flag_get(rt, imem->pg_kb.flag1_idx);
if (imem->pg_kb.flag2_ena)
rt->pg_key.flag2 = _flag_get(rt, imem->pg_kb.flag2_idx);
if (imem->pg_kb.flag3_ena)
rt->pg_key.flag3 = _flag_get(rt, imem->pg_kb.flag3_idx);
rt->pg_key.alu_reg = rt->gpr[imem->pg_kb.alu_reg_idx];
rt->pg_key.node_id = rt->gpr[GPR_NN_IDX];
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generate Parse Graph Key: node_id(%d),flag0(%d), flag1(%d), flag2(%d), flag3(%d), boost_idx(%d), alu_reg(0x%04x), next_proto(0x%08x)\n",
rt->pg_key.node_id,
rt->pg_key.flag0,
rt->pg_key.flag1,
rt->pg_key.flag2,
rt->pg_key.flag3,
rt->pg_key.boost_idx,
rt->pg_key.alu_reg,
rt->pg_key.next_proto);
}
static void _imem_alu0_set(struct ice_parser_rt *rt, struct ice_imem_item *imem)
{
rt->alu0 = &imem->alu0;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU0 from imem pc %d\n",
imem->idx);
}
static void _imem_alu1_set(struct ice_parser_rt *rt, struct ice_imem_item *imem)
{
rt->alu1 = &imem->alu1;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU1 from imem pc %d\n",
imem->idx);
}
static void _imem_alu2_set(struct ice_parser_rt *rt, struct ice_imem_item *imem)
{
rt->alu2 = &imem->alu2;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU2 from imem pc %d\n",
imem->idx);
}
static void _imem_pgp_set(struct ice_parser_rt *rt, struct ice_imem_item *imem)
{
rt->pg = imem->pg;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load PG priority %d from imem pc %d\n",
rt->pg, imem->idx);
}
static void
_bst_pgk_init(struct ice_parser_rt *rt, struct ice_bst_tcam_item *bst)
{
memset(&rt->pg_key, 0, sizeof(rt->pg_key));
rt->pg_key.boost_idx = bst->hit_idx_grp;
rt->pg_key.next_proto = _pk_build(rt, &bst->np_kb);
if (bst->pg_kb.flag0_ena)
rt->pg_key.flag0 = _flag_get(rt, bst->pg_kb.flag0_idx);
if (bst->pg_kb.flag1_ena)
rt->pg_key.flag1 = _flag_get(rt, bst->pg_kb.flag1_idx);
if (bst->pg_kb.flag2_ena)
rt->pg_key.flag2 = _flag_get(rt, bst->pg_kb.flag2_idx);
if (bst->pg_kb.flag3_ena)
rt->pg_key.flag3 = _flag_get(rt, bst->pg_kb.flag3_idx);
rt->pg_key.alu_reg = rt->gpr[bst->pg_kb.alu_reg_idx];
rt->pg_key.node_id = rt->gpr[GPR_NN_IDX];
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generate Parse Graph Key: node_id(%d),flag0(%d), flag1(%d), flag2(%d), flag3(%d), boost_idx(%d), alu_reg(0x%04x), next_proto(0x%08x)\n",
rt->pg_key.node_id,
rt->pg_key.flag0,
rt->pg_key.flag1,
rt->pg_key.flag2,
rt->pg_key.flag3,
rt->pg_key.boost_idx,
rt->pg_key.alu_reg,
rt->pg_key.next_proto);
}
static void _bst_alu0_set(struct ice_parser_rt *rt,
struct ice_bst_tcam_item *bst)
{
rt->alu0 = &bst->alu0;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU0 from boost address %d\n",
bst->address);
}
static void _bst_alu1_set(struct ice_parser_rt *rt,
struct ice_bst_tcam_item *bst)
{
rt->alu1 = &bst->alu1;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU1 from boost address %d\n",
bst->address);
}
static void _bst_alu2_set(struct ice_parser_rt *rt,
struct ice_bst_tcam_item *bst)
{
rt->alu2 = &bst->alu2;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU2 from boost address %d\n",
bst->address);
}
static void _bst_pgp_set(struct ice_parser_rt *rt,
struct ice_bst_tcam_item *bst)
{
rt->pg = bst->pg_pri;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load PG priority %d from boost address %d\n",
rt->pg, bst->address);
}
static struct ice_pg_cam_item *__pg_cam_match(struct ice_parser_rt *rt)
{
struct ice_parser *psr = rt->psr;
struct ice_pg_cam_item *item;
item = ice_pg_cam_match(psr->pg_cam_table, ICE_PG_CAM_TABLE_SIZE,
&rt->pg_key);
if (item)
return item;
item = ice_pg_cam_match(psr->pg_sp_cam_table, ICE_PG_SP_CAM_TABLE_SIZE,
&rt->pg_key);
return item;
}
static struct ice_pg_nm_cam_item *__pg_nm_cam_match(struct ice_parser_rt *rt)
{
struct ice_parser *psr = rt->psr;
struct ice_pg_nm_cam_item *item;
item = ice_pg_nm_cam_match(psr->pg_nm_cam_table,
ICE_PG_NM_CAM_TABLE_SIZE, &rt->pg_key);
if (item)
return item;
item = ice_pg_nm_cam_match(psr->pg_nm_sp_cam_table,
ICE_PG_NM_SP_CAM_TABLE_SIZE,
&rt->pg_key);
return item;
}
static void _gpr_add(struct ice_parser_rt *rt, int idx, u16 val)
{
rt->pu.gpr_val_upd[idx] = true;
rt->pu.gpr_val[idx] = val;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for register %d value %d\n",
idx, val);
}
static void _pg_exe(struct ice_parser_rt *rt)
{
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ParseGraph action ...\n");
_gpr_add(rt, GPR_NP_IDX, rt->action->next_pc);
_gpr_add(rt, GPR_NN_IDX, rt->action->next_node);
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ParseGraph action done.\n");
}
static void _flg_add(struct ice_parser_rt *rt, int idx, bool val)
{
rt->pu.flg_msk |= (1ul << idx);
if (val)
rt->pu.flg_val |= (1ul << idx);
else
rt->pu.flg_val &= ~(1ul << idx);
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for flag %d value %d\n",
idx, val);
}
static void _flg_update(struct ice_parser_rt *rt, struct ice_alu *alu)
{
if (alu->dedicate_flags_ena) {
int i;
if (alu->flags_extr_imm) {
for (i = 0; i < alu->dst_len; i++)
_flg_add(rt, alu->dst_start + i,
(alu->flags_start_imm &
(1u << i)) != 0);
} else {
for (i = 0; i < alu->dst_len; i++) {
_flg_add(rt, alu->dst_start + i,
_hv_bit_sel(rt,
alu->flags_start_imm + i,
1) != 0);
}
}
}
}
static void _po_update(struct ice_parser_rt *rt, struct ice_alu *alu)
{
if (alu->proto_offset_opc == 1)
rt->po = (u16)(rt->gpr[GPR_HO_IDX] + alu->proto_offset);
else if (alu->proto_offset_opc == 2)
rt->po = (u16)(rt->gpr[GPR_HO_IDX] - alu->proto_offset);
else if (alu->proto_offset_opc == 0)
rt->po = rt->gpr[GPR_HO_IDX];
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Update Protocol Offset = %d\n",
rt->po);
}
static u16 _reg_bit_sel(struct ice_parser_rt *rt, int reg_idx,
int start, int len)
{
u32 d32, msk;
u8 b[4];
u8 v[4];
memcpy(b, &rt->gpr[reg_idx + start / 16], 4);
v[0] = _bit_rev_u8(b[0]);
v[1] = _bit_rev_u8(b[1]);
v[2] = _bit_rev_u8(b[2]);
v[3] = _bit_rev_u8(b[3]);
d32 = *(u32 *)v;
msk = (1u << len) - 1;
return _bit_rev_u16((u16)((d32 >> (start % 16)) & msk), len);
}
static void _err_add(struct ice_parser_rt *rt, int idx, bool val)
{
rt->pu.err_msk |= (u16)(1 << idx);
if (val)
rt->pu.flg_val |= (u64)(1 << idx);
else
rt->pu.flg_val &= ~(u64)(1 << idx);
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for error %d value %d\n",
idx, val);
}
static void _dst_reg_bit_set(struct ice_parser_rt *rt, struct ice_alu *alu,
bool val)
{
u16 flg_idx;
if (alu->dedicate_flags_ena) {
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "DedicatedFlagsEnable should not be enabled in opcode %d\n",
alu->opc);
return;
}
if (alu->dst_reg_id == GPR_ERR_IDX) {
if (alu->dst_start >= 16) {
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Invalid error %d\n",
alu->dst_start);
return;
}
_err_add(rt, alu->dst_start, val);
} else if (alu->dst_reg_id >= GPR_FLG_IDX) {
flg_idx = (u16)(((alu->dst_reg_id - GPR_FLG_IDX) << 4) +
alu->dst_start);
if (flg_idx >= 64) {
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Invalid flag %d\n",
flg_idx);
return;
}
_flg_add(rt, flg_idx, val);
} else {
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unexpected Dest Register Bit set, RegisterID %d Start %d\n",
alu->dst_reg_id, alu->dst_start);
}
}
static void _alu_exe(struct ice_parser_rt *rt, struct ice_alu *alu)
{
u16 dst, src, shift, imm;
if (alu->shift_xlate_select) {
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "shift_xlate_select != 0 is not expected\n");
return;
}
_po_update(rt, alu);
_flg_update(rt, alu);
dst = rt->gpr[alu->dst_reg_id];
src = _reg_bit_sel(rt, alu->src_reg_id, alu->src_start, alu->src_len);
shift = alu->shift_xlate_key;
imm = alu->imm;
switch (alu->opc) {
case ICE_ALU_PARK:
break;
case ICE_ALU_MOV_ADD:
dst = (u16)((src << shift) + imm);
_gpr_add(rt, alu->dst_reg_id, dst);
break;
case ICE_ALU_ADD:
dst += (u16)((src << shift) + imm);
_gpr_add(rt, alu->dst_reg_id, dst);
break;
case ICE_ALU_ORLT:
if (src < imm)
_dst_reg_bit_set(rt, alu, true);
_gpr_add(rt, GPR_NP_IDX, alu->branch_addr);
break;
case ICE_ALU_OREQ:
if (src == imm)
_dst_reg_bit_set(rt, alu, true);
_gpr_add(rt, GPR_NP_IDX, alu->branch_addr);
break;
case ICE_ALU_SETEQ:
if (src == imm)
_dst_reg_bit_set(rt, alu, true);
else
_dst_reg_bit_set(rt, alu, false);
_gpr_add(rt, GPR_NP_IDX, alu->branch_addr);
break;
case ICE_ALU_MOV_XOR:
dst = (u16)((u16)(src << shift) ^ (u16)imm);
_gpr_add(rt, alu->dst_reg_id, dst);
break;
default:
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unsupported ALU instruction %d\n",
alu->opc);
break;
}
}
static void _alu0_exe(struct ice_parser_rt *rt)
{
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU0 ...\n");
_alu_exe(rt, rt->alu0);
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU0 done.\n");
}
static void _alu1_exe(struct ice_parser_rt *rt)
{
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU1 ...\n");
_alu_exe(rt, rt->alu1);
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU1 done.\n");
}
static void _alu2_exe(struct ice_parser_rt *rt)
{
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU2 ...\n");
_alu_exe(rt, rt->alu2);
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU2 done.\n");
}
static void _pu_exe(struct ice_parser_rt *rt)
{
struct ice_gpr_pu *pu = &rt->pu;
int i;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Updating Registers ...\n");
for (i = 0; i < 128; i++) {
if (pu->gpr_val_upd[i])
_rt_gpr_set(rt, i, pu->gpr_val[i]);
}
for (i = 0; i < 64; i++) {
if (pu->flg_msk & (1ul << i))
_rt_flag_set(rt, i, pu->flg_val & (1ul << i));
}
for (i = 0; i < 16; i++) {
if (pu->err_msk & (1u << 1))
_rt_err_set(rt, i, pu->err_val & (1u << i));
}
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Updating Registers done.\n");
}
static void _alu_pg_exe(struct ice_parser_rt *rt)
{
memset(&rt->pu, 0, sizeof(rt->pu));
if (rt->pg == 0) {
_pg_exe(rt);
_alu0_exe(rt);
_alu1_exe(rt);
_alu2_exe(rt);
} else if (rt->pg == 1) {
_alu0_exe(rt);
_pg_exe(rt);
_alu1_exe(rt);
_alu2_exe(rt);
} else if (rt->pg == 2) {
_alu0_exe(rt);
_alu1_exe(rt);
_pg_exe(rt);
_alu2_exe(rt);
} else if (rt->pg == 3) {
_alu0_exe(rt);
_alu1_exe(rt);
_alu2_exe(rt);
_pg_exe(rt);
}
_pu_exe(rt);
if (rt->action->ho_inc == 0)
return;
if (rt->action->ho_polarity)
_rt_ho_set(rt, rt->gpr[GPR_HO_IDX] + rt->action->ho_inc);
else
_rt_ho_set(rt, rt->gpr[GPR_HO_IDX] - rt->action->ho_inc);
}
static void _proto_off_update(struct ice_parser_rt *rt)
{
struct ice_parser *psr = rt->psr;
if (rt->action->is_pg) {
struct ice_proto_grp_item *proto_grp =
&psr->proto_grp_table[rt->action->proto_id];
u16 po;
int i;
for (i = 0; i < 8; i++) {
struct ice_proto_off *entry = &proto_grp->po[i];
if (entry->proto_id == 0xff)
break;
if (!entry->polarity)
po = (u16)(rt->po + entry->offset);
else
po = (u16)(rt->po - entry->offset);
rt->protocols[entry->proto_id] = true;
rt->offsets[entry->proto_id] = po;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Protocol %d at offset %d\n",
entry->proto_id, po);
}
} else {
rt->protocols[rt->action->proto_id] = true;
rt->offsets[rt->action->proto_id] = rt->po;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Protocol %d at offset %d\n",
rt->action->proto_id, rt->po);
}
}
static void _marker_set(struct ice_parser_rt *rt, int idx)
{
int x = idx / 8;
int y = idx % 8;
rt->markers[x] |= (u8)(1u << y);
}
static void _marker_update(struct ice_parser_rt *rt)
{
struct ice_parser *psr = rt->psr;
if (rt->action->is_mg) {
struct ice_mk_grp_item *mk_grp =
&psr->mk_grp_table[rt->action->marker_id];
int i;
for (i = 0; i < 8; i++) {
u8 marker = mk_grp->markers[i];
if (marker == 71)
break;
_marker_set(rt, marker);
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Marker %d\n",
marker);
}
} else {
if (rt->action->marker_id != 71)
_marker_set(rt, rt->action->marker_id);
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Marker %d\n",
rt->action->marker_id);
}
}
static u16 _ptype_resolve(struct ice_parser_rt *rt)
{
struct ice_parser *psr = rt->psr;
struct ice_ptype_mk_tcam_item *item;
item = ice_ptype_mk_tcam_match(psr->ptype_mk_tcam_table,
rt->markers, 9);
if (item)
return item->ptype;
return 0xffff;
}
static void _proto_off_resolve(struct ice_parser_rt *rt,
struct ice_parser_result *rslt)
{
int i;
for (i = 0; i < 255; i++) {
if (rt->protocols[i]) {
rslt->po[rslt->po_num].proto_id = (u8)i;
rslt->po[rslt->po_num].offset = rt->offsets[i];
rslt->po_num++;
}
}
}
static void _result_resolve(struct ice_parser_rt *rt,
struct ice_parser_result *rslt)
{
struct ice_parser *psr = rt->psr;
memset(rslt, 0, sizeof(*rslt));
rslt->ptype = _ptype_resolve(rt);
memcpy(&rslt->flags_psr, &rt->gpr[GPR_FLG_IDX], 8);
rslt->flags_pkt = ice_flg_redirect(psr->flg_rd_table, rslt->flags_psr);
rslt->flags_sw = ice_xlt_kb_flag_get(psr->xlt_kb_sw, rslt->flags_pkt);
rslt->flags_fd = ice_xlt_kb_flag_get(psr->xlt_kb_fd, rslt->flags_pkt);
rslt->flags_rss = ice_xlt_kb_flag_get(psr->xlt_kb_rss, rslt->flags_pkt);
_proto_off_resolve(rt, rslt);
}
/**
* ice_parser_rt_execute - parser execution routine
* @rt: pointer to the parser runtime
* @rslt: input/output parameter to save parser result
*/
int ice_parser_rt_execute(struct ice_parser_rt *rt,
struct ice_parser_result *rslt)
{
struct ice_pg_nm_cam_item *pg_nm_cam;
struct ice_parser *psr = rt->psr;
struct ice_pg_cam_item *pg_cam;
int status = 0;
u16 node;
u16 pc;
node = rt->gpr[GPR_NN_IDX];
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Start with Node: %d\n", node);
while (true) {
struct ice_bst_tcam_item *bst;
struct ice_imem_item *imem;
pc = rt->gpr[GPR_NP_IDX];
imem = &psr->imem_table[pc];
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load imem at pc: %d\n",
pc);
_bst_key_init(rt, imem);
bst = ice_bst_tcam_match(psr->bst_tcam_table, rt->bst_key);
if (!bst) {
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "No Boost TCAM Match\n");
_imem_pgk_init(rt, imem);
_imem_alu0_set(rt, imem);
_imem_alu1_set(rt, imem);
_imem_alu2_set(rt, imem);
_imem_pgp_set(rt, imem);
} else {
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Boost TCAM Match address: %d\n",
bst->address);
if (imem->b_m.pg) {
_bst_pgk_init(rt, bst);
_bst_pgp_set(rt, bst);
} else {
_imem_pgk_init(rt, imem);
_imem_pgp_set(rt, imem);
}
if (imem->b_m.al0)
_bst_alu0_set(rt, bst);
else
_imem_alu0_set(rt, imem);
if (imem->b_m.al1)
_bst_alu1_set(rt, bst);
else
_imem_alu1_set(rt, imem);
if (imem->b_m.al2)
_bst_alu2_set(rt, bst);
else
_imem_alu2_set(rt, imem);
}
rt->action = NULL;
pg_cam = __pg_cam_match(rt);
if (!pg_cam) {
pg_nm_cam = __pg_nm_cam_match(rt);
if (pg_nm_cam) {
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Match ParseGraph Nomatch CAM Address %d\n",
pg_nm_cam->idx);
rt->action = &pg_nm_cam->action;
}
} else {
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Match ParseGraph CAM Address %d\n",
pg_cam->idx);
rt->action = &pg_cam->action;
}
if (!rt->action) {
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Failed to match ParseGraph CAM, stop parsing.\n");
status = -EINVAL;
break;
}
_alu_pg_exe(rt);
_marker_update(rt);
_proto_off_update(rt);
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Go to node %d\n",
rt->action->next_node);
if (rt->action->is_last_round) {
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Last Round in ParseGraph Action, stop parsing.\n");
break;
}
if (rt->gpr[GPR_HO_IDX] >= rt->pkt_len) {
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Header Offset %d is larger than packet len %d, stop parsing\n",
rt->gpr[GPR_HO_IDX], rt->pkt_len);
break;
}
}
_result_resolve(rt, rslt);
return status;
}

47
drivers/thirdparty/ice/ice_parser_rt.h vendored Normal file
View File

@ -0,0 +1,47 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018-2021, Intel Corporation. */
#ifndef _ICE_PARSER_RT_H_
#define _ICE_PARSER_RT_H_
struct ice_parser_ctx;
#define ICE_PARSER_MAX_PKT_LEN 504
#define ICE_PARSER_GPR_NUM 128
struct ice_gpr_pu {
bool gpr_val_upd[128]; /* flag to indicate if GRP needs to be updated */
u16 gpr_val[128];
u64 flg_msk;
u64 flg_val;
u16 err_msk;
u16 err_val;
};
struct ice_parser_rt {
struct ice_parser *psr;
u16 gpr[ICE_PARSER_GPR_NUM];
u8 pkt_buf[ICE_PARSER_MAX_PKT_LEN + 32];
u16 pkt_len;
u16 po;
u8 bst_key[20];
struct ice_pg_cam_key pg_key;
struct ice_alu *alu0;
struct ice_alu *alu1;
struct ice_alu *alu2;
struct ice_pg_cam_action *action;
u8 pg;
struct ice_gpr_pu pu;
u8 markers[9]; /* 8 * 9 = 72 bits*/
bool protocols[256];
u16 offsets[256];
};
void ice_parser_rt_reset(struct ice_parser_rt *rt);
void ice_parser_rt_pktbuf_set(struct ice_parser_rt *rt, const u8 *pkt_buf,
int pkt_len);
struct ice_parser_result;
int ice_parser_rt_execute(struct ice_parser_rt *rt,
struct ice_parser_result *rslt);
#endif /* _ICE_PARSER_RT_H_ */

View File

@ -0,0 +1,35 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018-2021, Intel Corporation. */
#ifndef _ICE_PARSER_UTIL_H_
#define _ICE_PARSER_UTIL_H_
#include "ice_imem.h"
#include "ice_metainit.h"
struct ice_lbl_item {
u16 idx;
char label[64];
};
struct ice_pkg_sect_hdr {
__le16 count;
__le16 offset;
};
void ice_lbl_dump(struct ice_hw *hw, struct ice_lbl_item *item);
void ice_parse_item_dflt(struct ice_hw *hw, u16 idx, void *item,
void *data, int size);
void *ice_parser_sect_item_get(u32 sect_type, void *section,
u32 index, u32 *offset);
void *ice_parser_create_table(struct ice_hw *hw, u32 sect_type,
u32 item_size, u32 length,
void *(*handler)(u32 sect_type, void *section,
u32 index, u32 *offset),
void (*parse_item)(struct ice_hw *hw, u16 idx,
void *item, void *data,
int size),
bool no_offset);
#endif /* _ICE_PARSER_UTIL_H_ */

376
drivers/thirdparty/ice/ice_pg_cam.c vendored Normal file
View File

@ -0,0 +1,376 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018-2021, Intel Corporation. */
#include "ice_common.h"
#include "ice_parser_util.h"
static void _pg_cam_key_dump(struct ice_hw *hw, struct ice_pg_cam_key *key)
{
dev_info(ice_hw_to_dev(hw), "key:\n");
dev_info(ice_hw_to_dev(hw), "\tvalid = %d\n", key->valid);
dev_info(ice_hw_to_dev(hw), "\tnode_id = %d\n", key->node_id);
dev_info(ice_hw_to_dev(hw), "\tflag0 = %d\n", key->flag0);
dev_info(ice_hw_to_dev(hw), "\tflag1 = %d\n", key->flag1);
dev_info(ice_hw_to_dev(hw), "\tflag2 = %d\n", key->flag2);
dev_info(ice_hw_to_dev(hw), "\tflag3 = %d\n", key->flag3);
dev_info(ice_hw_to_dev(hw), "\tboost_idx = %d\n", key->boost_idx);
dev_info(ice_hw_to_dev(hw), "\talu_reg = 0x%04x\n", key->alu_reg);
dev_info(ice_hw_to_dev(hw), "\tnext_proto = 0x%08x\n",
key->next_proto);
}
static void _pg_nm_cam_key_dump(struct ice_hw *hw,
struct ice_pg_nm_cam_key *key)
{
dev_info(ice_hw_to_dev(hw), "key:\n");
dev_info(ice_hw_to_dev(hw), "\tvalid = %d\n", key->valid);
dev_info(ice_hw_to_dev(hw), "\tnode_id = %d\n", key->node_id);
dev_info(ice_hw_to_dev(hw), "\tflag0 = %d\n", key->flag0);
dev_info(ice_hw_to_dev(hw), "\tflag1 = %d\n", key->flag1);
dev_info(ice_hw_to_dev(hw), "\tflag2 = %d\n", key->flag2);
dev_info(ice_hw_to_dev(hw), "\tflag3 = %d\n", key->flag3);
dev_info(ice_hw_to_dev(hw), "\tboost_idx = %d\n", key->boost_idx);
dev_info(ice_hw_to_dev(hw), "\talu_reg = 0x%04x\n", key->alu_reg);
}
static void _pg_cam_action_dump(struct ice_hw *hw,
struct ice_pg_cam_action *action)
{
dev_info(ice_hw_to_dev(hw), "action:\n");
dev_info(ice_hw_to_dev(hw), "\tnext_node = %d\n", action->next_node);
dev_info(ice_hw_to_dev(hw), "\tnext_pc = %d\n", action->next_pc);
dev_info(ice_hw_to_dev(hw), "\tis_pg = %d\n", action->is_pg);
dev_info(ice_hw_to_dev(hw), "\tproto_id = %d\n", action->proto_id);
dev_info(ice_hw_to_dev(hw), "\tis_mg = %d\n", action->is_mg);
dev_info(ice_hw_to_dev(hw), "\tmarker_id = %d\n", action->marker_id);
dev_info(ice_hw_to_dev(hw), "\tis_last_round = %d\n",
action->is_last_round);
dev_info(ice_hw_to_dev(hw), "\tho_polarity = %d\n",
action->ho_polarity);
dev_info(ice_hw_to_dev(hw), "\tho_inc = %d\n", action->ho_inc);
}
/**
* ice_pg_cam_dump - dump an parse graph cam info
* @hw: pointer to the hardware structure
* @item: parse graph cam to dump
*/
void ice_pg_cam_dump(struct ice_hw *hw, struct ice_pg_cam_item *item)
{
dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx);
_pg_cam_key_dump(hw, &item->key);
_pg_cam_action_dump(hw, &item->action);
}
/**
* ice_pg_nm_cam_dump - dump an parse graph no match cam info
* @hw: pointer to the hardware structure
* @item: parse graph no match cam to dump
*/
void ice_pg_nm_cam_dump(struct ice_hw *hw, struct ice_pg_nm_cam_item *item)
{
dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx);
_pg_nm_cam_key_dump(hw, &item->key);
_pg_cam_action_dump(hw, &item->action);
}
/** The function parses a 55 bits Parse Graph CAM Action with below format:
* BIT 0-11: Next Node ID (action->next_node)
* BIT 12-19: Next PC (action->next_pc)
* BIT 20: Is Protocol Group (action->is_pg)
* BIT 21-23: reserved
* BIT 24-31: Protocol ID (action->proto_id)
* BIT 32: Is Marker Group (action->is_mg)
* BIT 33-40: Marker ID (action->marker_id)
* BIT 41: Is Last Round (action->is_last_round)
* BIT 42: Header Offset Polarity (action->ho_poloarity)
* BIT 43-51: Header Offset Inc (action->ho_inc)
* BIT 52-54: reserved
*/
static void _pg_cam_action_init(struct ice_pg_cam_action *action, u64 data)
{
action->next_node = (u16)(data & 0x7ff);
action->next_pc = (u8)((data >> 11) & 0xff);
action->is_pg = ((data >> 19) & 0x1) != 0;
action->proto_id = ((data >> 23) & 0xff);
action->is_mg = ((data >> 31) & 0x1) != 0;
action->marker_id = ((data >> 32) & 0xff);
action->is_last_round = ((data >> 40) & 0x1) != 0;
action->ho_polarity = ((data >> 41) & 0x1) != 0;
action->ho_inc = ((data >> 42) & 0x1ff);
}
/** The function parses a 41 bits Parse Graph NoMatch CAM Key with below format:
* BIT 0: Valid (key->valid)
* BIT 1-11: Node ID (key->node_id)
* BIT 12: Flag 0 (key->flag0)
* BIT 13: Flag 1 (key->flag1)
* BIT 14: Flag 2 (key->flag2)
* BIT 15: Flag 3 (key->flag3)
* BIT 16: Boost Hit (key->boost_idx to 0 if it is 0)
* BIT 17-24: Boost Index (key->boost_idx only if Boost Hit is not 0)
* BIT 25-40: ALU Reg (key->alu_reg)
*/
static void _pg_nm_cam_key_init(struct ice_pg_nm_cam_key *key, u64 data)
{
key->valid = (data & 0x1) != 0;
key->node_id = (u16)((data >> 1) & 0x7ff);
key->flag0 = ((data >> 12) & 0x1) != 0;
key->flag1 = ((data >> 13) & 0x1) != 0;
key->flag2 = ((data >> 14) & 0x1) != 0;
key->flag3 = ((data >> 15) & 0x1) != 0;
if ((data >> 16) & 0x1)
key->boost_idx = (u8)((data >> 17) & 0xff);
else
key->boost_idx = 0;
key->alu_reg = (u16)((data >> 25) & 0xffff);
}
/** The function parses a 73 bits Parse Graph CAM Key with below format:
* BIT 0: Valid (key->valid)
* BIT 1-11: Node ID (key->node_id)
* BIT 12: Flag 0 (key->flag0)
* BIT 13: Flag 1 (key->flag1)
* BIT 14: Flag 2 (key->flag2)
* BIT 15: Flag 3 (key->flag3)
* BIT 16: Boost Hit (key->boost_idx to 0 if it is 0)
* BIT 17-24: Boost Index (key->boost_idx only if Boost Hit is not 0)
* BIT 25-40: ALU Reg (key->alu_reg)
* BIT 41-72: Next Proto Key (key->next_proto)
*/
static void _pg_cam_key_init(struct ice_pg_cam_key *key, u8 *data)
{
u64 d64 = *(u64 *)data;
key->valid = (d64 & 0x1) != 0;
key->node_id = (u16)((d64 >> 1) & 0x7ff);
key->flag0 = ((d64 >> 12) & 0x1) != 0;
key->flag1 = ((d64 >> 13) & 0x1) != 0;
key->flag2 = ((d64 >> 14) & 0x1) != 0;
key->flag3 = ((d64 >> 15) & 0x1) != 0;
if ((d64 >> 16) & 0x1)
key->boost_idx = (u8)((d64 >> 17) & 0xff);
else
key->boost_idx = 0;
key->alu_reg = (u16)((d64 >> 25) & 0xffff);
key->next_proto = (*(u32 *)&data[5] >> 1);
key->next_proto |= ((u32)(data[9] & 0x1) << 31);
}
/** The function parses a 128 bits Parse Graph CAM Entry with below format:
* BIT 0-72: Key (ci->key)
* BIT 73-127: Action (ci->action)
*/
static void _pg_cam_parse_item(struct ice_hw *hw, u16 idx, void *item,
void *data, int size)
{
struct ice_pg_cam_item *ci = (struct ice_pg_cam_item *)item;
u8 *buf = (u8 *)data;
u64 d64;
ci->idx = idx;
d64 = (*(u64 *)&buf[9] >> 1);
_pg_cam_key_init(&ci->key, buf);
_pg_cam_action_init(&ci->action, d64);
if (hw->debug_mask & ICE_DBG_PARSER)
ice_pg_cam_dump(hw, ci);
}
/** The function parses a 136 bits Parse Graph Spill CAM Entry with below
* format:
* BIT 0-55: Action (ci->key)
* BIT 56-135: Key (ci->action)
*/
static void _pg_sp_cam_parse_item(struct ice_hw *hw, u16 idx, void *item,
void *data, int size)
{
struct ice_pg_cam_item *ci = (struct ice_pg_cam_item *)item;
u8 *buf = (u8 *)data;
u64 d64;
ci->idx = idx;
d64 = *(u64 *)buf;
_pg_cam_action_init(&ci->action, d64);
_pg_cam_key_init(&ci->key, &buf[7]);
if (hw->debug_mask & ICE_DBG_PARSER)
ice_pg_cam_dump(hw, ci);
}
/** The function parses a 96 bits Parse Graph NoMatch CAM Entry with below
* format:
* BIT 0-40: Key (ci->key)
* BIT 41-95: Action (ci->action)
*/
static void _pg_nm_cam_parse_item(struct ice_hw *hw, u16 idx, void *item,
void *data, int size)
{
struct ice_pg_nm_cam_item *ci = (struct ice_pg_nm_cam_item *)item;
u8 *buf = (u8 *)data;
u64 d64;
ci->idx = idx;
d64 = *(u64 *)buf;
_pg_nm_cam_key_init(&ci->key, d64);
d64 = (*(u64 *)&buf[5] >> 1);
_pg_cam_action_init(&ci->action, d64);
if (hw->debug_mask & ICE_DBG_PARSER)
ice_pg_nm_cam_dump(hw, ci);
}
/** The function parses a 104 bits Parse Graph NoMatch Spill CAM Entry with
* below format:
* BIT 0-55: Key (ci->key)
* BIT 56-103: Action (ci->action)
*/
static void _pg_nm_sp_cam_parse_item(struct ice_hw *hw, u16 idx, void *item,
void *data, int size)
{
struct ice_pg_nm_cam_item *ci = (struct ice_pg_nm_cam_item *)item;
u8 *buf = (u8 *)data;
u64 d64;
ci->idx = idx;
d64 = *(u64 *)buf;
_pg_cam_action_init(&ci->action, d64);
d64 = *(u64 *)&buf[7];
_pg_nm_cam_key_init(&ci->key, d64);
if (hw->debug_mask & ICE_DBG_PARSER)
ice_pg_nm_cam_dump(hw, ci);
}
/**
* ice_pg_cam_table_get - create a parse graph cam table
* @hw: pointer to the hardware structure
*/
struct ice_pg_cam_item *ice_pg_cam_table_get(struct ice_hw *hw)
{
return (struct ice_pg_cam_item *)
ice_parser_create_table(hw, ICE_SID_RXPARSER_CAM,
sizeof(struct ice_pg_cam_item),
ICE_PG_CAM_TABLE_SIZE,
ice_parser_sect_item_get,
_pg_cam_parse_item, false);
}
/**
* ice_pg_sp_cam_table_get - create a parse graph spill cam table
* @hw: pointer to the hardware structure
*/
struct ice_pg_cam_item *ice_pg_sp_cam_table_get(struct ice_hw *hw)
{
return (struct ice_pg_cam_item *)
ice_parser_create_table(hw, ICE_SID_RXPARSER_PG_SPILL,
sizeof(struct ice_pg_cam_item),
ICE_PG_SP_CAM_TABLE_SIZE,
ice_parser_sect_item_get,
_pg_sp_cam_parse_item, false);
}
/**
* ice_pg_nm_cam_table_get - create a parse graph no match cam table
* @hw: pointer to the hardware structure
*/
struct ice_pg_nm_cam_item *ice_pg_nm_cam_table_get(struct ice_hw *hw)
{
return (struct ice_pg_nm_cam_item *)
ice_parser_create_table(hw, ICE_SID_RXPARSER_NOMATCH_CAM,
sizeof(struct ice_pg_nm_cam_item),
ICE_PG_NM_CAM_TABLE_SIZE,
ice_parser_sect_item_get,
_pg_nm_cam_parse_item, false);
}
/**
* ice_pg_nm_sp_cam_table_get - create a parse graph no match spill cam table
* @hw: pointer to the hardware structure
*/
struct ice_pg_nm_cam_item *ice_pg_nm_sp_cam_table_get(struct ice_hw *hw)
{
return (struct ice_pg_nm_cam_item *)
ice_parser_create_table(hw, ICE_SID_RXPARSER_NOMATCH_SPILL,
sizeof(struct ice_pg_nm_cam_item),
ICE_PG_NM_SP_CAM_TABLE_SIZE,
ice_parser_sect_item_get,
_pg_nm_sp_cam_parse_item, false);
}
static bool _pg_cam_match(struct ice_pg_cam_item *item,
struct ice_pg_cam_key *key)
{
if (!item->key.valid ||
item->key.node_id != key->node_id ||
item->key.flag0 != key->flag0 ||
item->key.flag1 != key->flag1 ||
item->key.flag2 != key->flag2 ||
item->key.flag3 != key->flag3 ||
item->key.boost_idx != key->boost_idx ||
item->key.alu_reg != key->alu_reg ||
item->key.next_proto != key->next_proto)
return false;
return true;
}
static bool _pg_nm_cam_match(struct ice_pg_nm_cam_item *item,
struct ice_pg_cam_key *key)
{
if (!item->key.valid ||
item->key.node_id != key->node_id ||
item->key.flag0 != key->flag0 ||
item->key.flag1 != key->flag1 ||
item->key.flag2 != key->flag2 ||
item->key.flag3 != key->flag3 ||
item->key.boost_idx != key->boost_idx ||
item->key.alu_reg != key->alu_reg)
return false;
return true;
}
/**
* ice_pg_cam_match - search parse graph cam table by key
* @table: parse graph cam table to search
* @size: cam table size
* @key: search key
*/
struct ice_pg_cam_item *ice_pg_cam_match(struct ice_pg_cam_item *table,
int size, struct ice_pg_cam_key *key)
{
int i;
for (i = 0; i < size; i++) {
struct ice_pg_cam_item *item = &table[i];
if (_pg_cam_match(item, key))
return item;
}
return NULL;
}
/**
* ice_pg_nm_cam_match - search parse graph no match cam table by key
* @table: parse graph no match cam table to search
* @size: cam table size
* @key: search key
*/
struct ice_pg_nm_cam_item *
ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size,
struct ice_pg_cam_key *key)
{
int i;
for (i = 0; i < size; i++) {
struct ice_pg_nm_cam_item *item = &table[i];
if (_pg_nm_cam_match(item, key))
return item;
}
return NULL;
}

73
drivers/thirdparty/ice/ice_pg_cam.h vendored Normal file
View File

@ -0,0 +1,73 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018-2021, Intel Corporation. */
#ifndef _ICE_PG_CAM_H_
#define _ICE_PG_CAM_H_
#define ICE_PG_CAM_TABLE_SIZE 2048
#define ICE_PG_SP_CAM_TABLE_SIZE 128
#define ICE_PG_NM_CAM_TABLE_SIZE 1024
#define ICE_PG_NM_SP_CAM_TABLE_SIZE 64
struct ice_pg_cam_key {
bool valid;
u16 node_id;
bool flag0;
bool flag1;
bool flag2;
bool flag3;
u8 boost_idx;
u16 alu_reg;
u32 next_proto;
};
struct ice_pg_nm_cam_key {
bool valid;
u16 node_id;
bool flag0;
bool flag1;
bool flag2;
bool flag3;
u8 boost_idx;
u16 alu_reg;
};
struct ice_pg_cam_action {
u16 next_node;
u8 next_pc;
bool is_pg;
u8 proto_id;
bool is_mg;
u8 marker_id;
bool is_last_round;
bool ho_polarity;
u16 ho_inc;
};
struct ice_pg_cam_item {
u16 idx;
struct ice_pg_cam_key key;
struct ice_pg_cam_action action;
};
struct ice_pg_nm_cam_item {
u16 idx;
struct ice_pg_nm_cam_key key;
struct ice_pg_cam_action action;
};
void ice_pg_cam_dump(struct ice_hw *hw, struct ice_pg_cam_item *item);
void ice_pg_nm_cam_dump(struct ice_hw *hw, struct ice_pg_nm_cam_item *item);
struct ice_pg_cam_item *ice_pg_cam_table_get(struct ice_hw *hw);
struct ice_pg_cam_item *ice_pg_sp_cam_table_get(struct ice_hw *hw);
struct ice_pg_nm_cam_item *ice_pg_nm_cam_table_get(struct ice_hw *hw);
struct ice_pg_nm_cam_item *ice_pg_nm_sp_cam_table_get(struct ice_hw *hw);
struct ice_pg_cam_item *ice_pg_cam_match(struct ice_pg_cam_item *table,
int size, struct ice_pg_cam_key *key);
struct ice_pg_nm_cam_item *
ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size,
struct ice_pg_cam_key *key);
#endif /* _ICE_PG_CAM_H_ */

106
drivers/thirdparty/ice/ice_proto_grp.c vendored Normal file
View File

@ -0,0 +1,106 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018-2021, Intel Corporation. */
#include "ice_common.h"
#include "ice_parser_util.h"
static void _proto_off_dump(struct ice_hw *hw, struct ice_proto_off *po,
int idx)
{
dev_info(ice_hw_to_dev(hw), "proto %d\n", idx);
dev_info(ice_hw_to_dev(hw), "\tpolarity = %d\n", po->polarity);
dev_info(ice_hw_to_dev(hw), "\tproto_id = %d\n", po->proto_id);
dev_info(ice_hw_to_dev(hw), "\toffset = %d\n", po->offset);
}
/**
* ice_proto_grp_dump - dump a proto group item info
* @hw: pointer to the hardware structure
* @item: proto group item to dump
*/
void ice_proto_grp_dump(struct ice_hw *hw, struct ice_proto_grp_item *item)
{
int i;
dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx);
for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++)
_proto_off_dump(hw, &item->po[i], i);
}
/** The function parses a 22 bits Protocol entry with below format:
* BIT 0: Polarity of Protocol Offset (po->polarity)
* BIT 1-8: Protocol ID (po->proto_id)
* BIT 9-11: reserved
* BIT 12-21: Protocol Offset (po->offset)
*/
static void _proto_off_parse(struct ice_proto_off *po, u32 data)
{
po->polarity = (data & 0x1) != 0;
po->proto_id = (u8)((data >> 1) & 0xff);
po->offset = (u16)((data >> 12) & 0x3ff);
}
/** The function parses a 192 bits Protocol Group Table entry with below
* format:
* BIT 0-21: Protocol 0 (grp->po[0])
* BIT 22-43: Protocol 1 (grp->po[1])
* BIT 44-65: Protocol 2 (grp->po[2])
* BIT 66-87: Protocol 3 (grp->po[3])
* BIT 88-109: Protocol 4 (grp->po[4])
* BIT 110-131:Protocol 5 (grp->po[5])
* BIT 132-153:Protocol 6 (grp->po[6])
* BIT 154-175:Protocol 7 (grp->po[7])
* BIT 176-191:reserved
*/
static void _proto_grp_parse_item(struct ice_hw *hw, u16 idx, void *item,
void *data, int size)
{
struct ice_proto_grp_item *grp = (struct ice_proto_grp_item *)item;
u8 *buf = (u8 *)data;
u32 d32;
grp->idx = idx;
d32 = *(u32 *)buf;
_proto_off_parse(&grp->po[0], d32);
d32 = (*(u32 *)&buf[2] >> 6);
_proto_off_parse(&grp->po[1], d32);
d32 = (*(u32 *)&buf[5] >> 4);
_proto_off_parse(&grp->po[2], d32);
d32 = (*(u32 *)&buf[8] >> 2);
_proto_off_parse(&grp->po[3], d32);
d32 = *(u32 *)&buf[11];
_proto_off_parse(&grp->po[4], d32);
d32 = (*(u32 *)&buf[13] >> 6);
_proto_off_parse(&grp->po[5], d32);
d32 = (*(u32 *)&buf[16] >> 4);
_proto_off_parse(&grp->po[6], d32);
d32 = (*(u32 *)&buf[19] >> 2);
_proto_off_parse(&grp->po[7], d32);
if (hw->debug_mask & ICE_DBG_PARSER)
ice_proto_grp_dump(hw, grp);
}
/**
* ice_proto_grp_table_get - create a proto group table
* @hw: pointer to the hardware structure
*/
struct ice_proto_grp_item *ice_proto_grp_table_get(struct ice_hw *hw)
{
return (struct ice_proto_grp_item *)
ice_parser_create_table(hw, ICE_SID_RXPARSER_PROTO_GRP,
sizeof(struct ice_proto_grp_item),
ICE_PROTO_GRP_TABLE_SIZE,
ice_parser_sect_item_get,
_proto_grp_parse_item, false);
}

23
drivers/thirdparty/ice/ice_proto_grp.h vendored Normal file
View File

@ -0,0 +1,23 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018-2021, Intel Corporation. */
#ifndef _ICE_PROTO_GRP_H_
#define _ICE_PROTO_GRP_H_
#define ICE_PROTO_COUNT_PER_GRP 8
#define ICE_PROTO_GRP_TABLE_SIZE 192
struct ice_proto_off {
bool polarity; /* true: positive, false: nagtive */
u8 proto_id;
u16 offset;
};
struct ice_proto_grp_item {
u16 idx;
struct ice_proto_off po[ICE_PROTO_COUNT_PER_GRP];
};
void ice_proto_grp_dump(struct ice_hw *hw, struct ice_proto_grp_item *item);
struct ice_proto_grp_item *ice_proto_grp_table_get(struct ice_hw *hw);
#endif /* _ICE_PROTO_GRP_H_ */

View File

@ -30,6 +30,7 @@ enum ice_protocol_type {
ICE_MAC_OFOS = 0, ICE_MAC_OFOS = 0,
ICE_MAC_IL, ICE_MAC_IL,
ICE_ETYPE_OL, ICE_ETYPE_OL,
ICE_ETYPE_IL,
ICE_VLAN_OFOS, ICE_VLAN_OFOS,
ICE_IPV4_OFOS, ICE_IPV4_OFOS,
ICE_IPV4_IL, ICE_IPV4_IL,
@ -44,15 +45,16 @@ enum ice_protocol_type {
ICE_VXLAN_GPE, ICE_VXLAN_GPE,
ICE_NVGRE, ICE_NVGRE,
ICE_GTP, ICE_GTP,
ICE_GTP_NO_PAY,
ICE_PPPOE, ICE_PPPOE,
ICE_PFCP, ICE_PFCP,
ICE_L2TPV3, ICE_L2TPV3,
ICE_ESP, ICE_ESP,
ICE_AH, ICE_AH,
ICE_NAT_T, ICE_NAT_T,
ICE_GTP_NO_PAY,
ICE_VLAN_EX, ICE_VLAN_EX,
ICE_VLAN_IN, ICE_VLAN_IN,
ICE_FLG_DIR,
ICE_PROTOCOL_LAST ICE_PROTOCOL_LAST
}; };
@ -68,6 +70,10 @@ enum ice_sw_tunnel_type {
ICE_SW_TUN_UDP, /* This means all "UDP" tunnel types: VXLAN-GPE, VXLAN ICE_SW_TUN_UDP, /* This means all "UDP" tunnel types: VXLAN-GPE, VXLAN
* and GENEVE * and GENEVE
*/ */
ICE_SW_IPV4_TCP,
ICE_SW_IPV4_UDP,
ICE_SW_IPV6_TCP,
ICE_SW_IPV6_UDP,
ICE_SW_TUN_IPV4_GTP_IPV4_TCP, ICE_SW_TUN_IPV4_GTP_IPV4_TCP,
ICE_SW_TUN_IPV4_GTP_IPV4_UDP, ICE_SW_TUN_IPV4_GTP_IPV4_UDP,
ICE_SW_TUN_IPV4_GTP_IPV6_TCP, ICE_SW_TUN_IPV4_GTP_IPV6_TCP,
@ -84,6 +90,8 @@ enum ice_sw_tunnel_type {
ICE_SW_TUN_GTP_IPV4_UDP, ICE_SW_TUN_GTP_IPV4_UDP,
ICE_SW_TUN_GTP_IPV6_TCP, ICE_SW_TUN_GTP_IPV6_TCP,
ICE_SW_TUN_GTP_IPV6_UDP, ICE_SW_TUN_GTP_IPV6_UDP,
ICE_SW_TUN_GTPU,
ICE_SW_TUN_GTPC,
ICE_SW_TUN_IPV4_GTPU_IPV4, ICE_SW_TUN_IPV4_GTPU_IPV4,
ICE_SW_TUN_IPV4_GTPU_IPV6, ICE_SW_TUN_IPV4_GTPU_IPV6,
ICE_SW_TUN_IPV6_GTPU_IPV4, ICE_SW_TUN_IPV6_GTPU_IPV4,
@ -117,8 +125,11 @@ enum ice_prot_id {
ICE_PROT_MPLS_IL = 29, ICE_PROT_MPLS_IL = 29,
ICE_PROT_IPV4_OF_OR_S = 32, ICE_PROT_IPV4_OF_OR_S = 32,
ICE_PROT_IPV4_IL = 33, ICE_PROT_IPV4_IL = 33,
ICE_PROT_IPV4_IL_IL = 34,
ICE_PROT_IPV6_OF_OR_S = 40, ICE_PROT_IPV6_OF_OR_S = 40,
ICE_PROT_IPV6_IL = 41, ICE_PROT_IPV6_IL = 41,
ICE_PROT_IPV6_IL_IL = 42,
ICE_PROT_IPV6_NEXT_PROTO = 43,
ICE_PROT_IPV6_FRAG = 47, ICE_PROT_IPV6_FRAG = 47,
ICE_PROT_TCP_IL = 49, ICE_PROT_TCP_IL = 49,
ICE_PROT_UDP_OF = 52, ICE_PROT_UDP_OF = 52,
@ -147,10 +158,11 @@ enum ice_prot_id {
#define ICE_VNI_OFFSET 12 /* offset of VNI from ICE_PROT_UDP_OF */ #define ICE_VNI_OFFSET 12 /* offset of VNI from ICE_PROT_UDP_OF */
#define ICE_NAN_OFFSET 511
#define ICE_MAC_OFOS_HW 1 #define ICE_MAC_OFOS_HW 1
#define ICE_MAC_IL_HW 4 #define ICE_MAC_IL_HW 4
#define ICE_ETYPE_OL_HW 9 #define ICE_ETYPE_OL_HW 9
#define ICE_ETYPE_IL_HW 10
#define ICE_VLAN_OF_HW 16 #define ICE_VLAN_OF_HW 16
#define ICE_VLAN_OL_HW 17 #define ICE_VLAN_OL_HW 17
#define ICE_IPV4_OFOS_HW 32 #define ICE_IPV4_OFOS_HW 32
@ -171,12 +183,15 @@ enum ice_prot_id {
*/ */
#define ICE_UDP_OF_HW 52 /* UDP Tunnels */ #define ICE_UDP_OF_HW 52 /* UDP Tunnels */
#define ICE_GRE_OF_HW 64 /* NVGRE */ #define ICE_GRE_OF_HW 64 /* NVGRE */
#define ICE_PPPOE_HW 103
#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel type */ #define ICE_META_DATA_ID_HW 255 /* this is used for tunnel type */
#define ICE_MDID_SIZE 2 #define ICE_MDID_SIZE 2
#define ICE_TUN_FLAG_MDID 21 #define ICE_TUN_FLAG_MDID 20
#define ICE_TUN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_TUN_FLAG_MDID) #define ICE_TUN_FLAG_MDID_OFF(word) \
(ICE_MDID_SIZE * (ICE_TUN_FLAG_MDID + (word)))
#define ICE_TUN_FLAG_MASK 0xFF #define ICE_TUN_FLAG_MASK 0xFF
#define ICE_DIR_FLAG_MASK 0x10
#define ICE_TUN_FLAG_VLAN_MASK 0x01 #define ICE_TUN_FLAG_VLAN_MASK 0x01
#define ICE_TUN_FLAG_FV_IND 2 #define ICE_TUN_FLAG_FV_IND 2
@ -188,7 +203,6 @@ struct ice_protocol_entry {
u8 protocol_id; u8 protocol_id;
}; };
struct ice_ether_hdr { struct ice_ether_hdr {
u8 dst_addr[ETH_ALEN]; u8 dst_addr[ETH_ALEN];
u8 src_addr[ETH_ALEN]; u8 src_addr[ETH_ALEN];
@ -205,8 +219,8 @@ struct ice_ether_vlan_hdr {
}; };
struct ice_vlan_hdr { struct ice_vlan_hdr {
__be16 vlan;
__be16 type; __be16 type;
__be16 vlan;
}; };
struct ice_ipv4_hdr { struct ice_ipv4_hdr {
@ -275,7 +289,6 @@ struct ice_udp_gtp_hdr {
u8 qfi; u8 qfi;
u8 rsvrd; u8 rsvrd;
}; };
struct ice_pppoe_hdr { struct ice_pppoe_hdr {
u8 rsrvd_ver_type; u8 rsrvd_ver_type;
u8 rsrvd_code; u8 rsrvd_code;
@ -315,7 +328,6 @@ struct ice_nat_t_hdr {
struct ice_esp_hdr esp; struct ice_esp_hdr esp;
}; };
struct ice_nvgre { struct ice_nvgre {
__be16 flags; __be16 flags;
__be16 protocol; __be16 protocol;

File diff suppressed because it is too large Load Diff

View File

@ -9,7 +9,7 @@
#include <linux/ptp_clock_kernel.h> #include <linux/ptp_clock_kernel.h>
#include <linux/ptp_classify.h> #include <linux/ptp_classify.h>
#include <linux/highuid.h> #include <linux/highuid.h>
#include "kcompat_kthread.h"
#include "ice_ptp_hw.h" #include "ice_ptp_hw.h"
enum ice_ptp_pin { enum ice_ptp_pin {
@ -20,6 +20,13 @@ enum ice_ptp_pin {
NUM_ICE_PTP_PIN NUM_ICE_PTP_PIN
}; };
/* Main timer mode */
enum ice_src_tmr_mode {
ICE_SRC_TMR_MODE_NANOSECONDS,
ICE_SRC_TMR_MODE_LOCKED,
NUM_ICE_SRC_TMR_MODE
};
#define ICE_E810T_SMA1_CTRL_MASK (ICE_E810T_P1_SMA1_DIR_EN | \ #define ICE_E810T_SMA1_CTRL_MASK (ICE_E810T_P1_SMA1_DIR_EN | \
ICE_E810T_P1_SMA1_TX_EN) ICE_E810T_P1_SMA1_TX_EN)
@ -38,13 +45,26 @@ enum ice_e810t_ptp_pins {
NUM_E810T_PTP_PINS NUM_E810T_PTP_PINS
}; };
#define ICE_SUBDEV_ID_E810_T 0x000E enum ice_phy_rclk_pins {
ICE_C827_RCLKA_PIN, /* SCL pin */
ICE_C827_RCLKB_PIN, /* SDA pin */
ICE_C827_RCLK_PINS_NUM /* number of pins */
};
static inline bool ice_is_e810t(struct ice_hw *hw) #define E810T_CGU_INPUT_C827(_phy, _pin) ((_phy) * ICE_C827_RCLK_PINS_NUM + \
{ (_pin) + ZL_REF1P)
return (hw->device_id == ICE_DEV_ID_E810C_SFP &&
hw->subsystem_device_id == ICE_SUBDEV_ID_E810_T); #define E822_CGU_RCLK_PHY_PINS_NUM 1
} #define E822_CGU_RCLK_PIN_NAME "NAC_CLK_SYNCE0_PN"
#define ICE_CGU_IN_PIN_FAIL_FLAGS (ICE_AQC_GET_CGU_IN_CFG_STATUS_SCM_FAIL | \
ICE_AQC_GET_CGU_IN_CFG_STATUS_CFM_FAIL | \
ICE_AQC_GET_CGU_IN_CFG_STATUS_GST_FAIL | \
ICE_AQC_GET_CGU_IN_CFG_STATUS_PFM_FAIL)
#define ICE_DPLL_PIN_STATE_INVALID "invalid"
#define ICE_DPLL_PIN_STATE_VALIDATING "validating"
#define ICE_DPLL_PIN_STATE_VALID "valid"
struct ice_perout_channel { struct ice_perout_channel {
bool ena; bool ena;
@ -53,6 +73,88 @@ struct ice_perout_channel {
u64 start_time; u64 start_time;
}; };
/* The ice hardware captures Tx hardware timestamps in the PHY. The timestamp
* is stored in a buffer of registers. Depending on the specific hardware,
* this buffer might be shared across multiple PHY ports.
*
* On transmit of a packet to be timestamped, software is responsible for
* selecting an open index. Hardware makes no attempt to lock or prevent
* re-use of an index for multiple packets.
*
* To handle this, timestamp indexes must be tracked by software to ensure
* that an index is not re-used for multiple transmitted packets. The
* structures and functions declared in this file track the available Tx
* register indexes, as well as provide storage for the SKB pointers.
*
* To allow multiple ports to access the shared register block independently,
* the blocks are split up so that indexes are assigned to each port based on
* hardware logical port number.
*/
/**
* struct ice_tx_tstamp - Tracking for a single Tx timestamp
* @skb: pointer to the SKB for this timestamp request
* @start: jiffies when the timestamp was first requested
* @cached_tstamp: last read timestamp
*
* This structure tracks a single timestamp request. The SKB pointer is
* provided when initiating a request. The start time is used to ensure that
* we discard old requests that were not fulfilled within a 2 second time
* window.
* Timestamp values in the PHY are read only and do not get cleared except at
* hardware reset or when a new timestamp value is captured. The cached_tstamp
* field is used to detect the case where a new timestamp has not yet been
* captured, ensuring that we avoid sending stale timestamp data to the stack.
*/
struct ice_tx_tstamp {
struct sk_buff *skb;
unsigned long start;
u64 cached_tstamp;
};
/**
* struct ice_ptp_tx - Tracking structure for Tx timestamp requests on a port
* @lock: lock to prevent concurrent access to in_use and unread bitmaps
* @tstamps: array of len to store outstanding requests
* @in_use: bitmap of len to indicate which slots are in use
* @unread: bitmap of len to indicate which slots haven't been read
* @block: which memory block (quad or port) the timestamps are captured in
* @offset: offset into timestamp block to get the real index
* @len: length of the tstamps and in_use fields.
* @init: if true, the tracker is initialized;
* @calibrating: if true, the PHY is calibrating the Tx offset. During this
* window, timestamps are temporarily disabled.
* @ll_ena: if true, the low latency timestamping feature is supported
*
* The in_use and unread bitmaps work in concert. The in_use bitmap indicates
* which slots are currently being used by hardware to capture a Tx timestamp.
* The unread bit indicates that a slot has not had its Tx timestamp read by
* software. Both bits should be set by software under lock when initiating
* a Tx timestamp request using a slot. The unread bit is used to ensure that
* only one thread reads the Tx timestamp registers. It should be tested and
* cleared under lock before reading the Tx timestamp. The in_use bit should
* be cleared under lock only after a timestamp has completed. The separation
* of the in_use and unread bits is required because we cannot hold the
* spinlock while reading the Tx timestamp register from firmware.
*/
struct ice_ptp_tx {
spinlock_t lock; /* protects access to in_use bitmap */
struct ice_tx_tstamp *tstamps;
unsigned long *in_use;
unsigned long *unread;
u8 block;
u8 offset;
u8 len;
u8 init;
u8 calibrating;
u8 ll_ena;
};
/* Quad and port information for initializing timestamp blocks */
#define INDEX_PER_QUAD 64
#define INDEX_PER_PORT_E822 16
#define INDEX_PER_PORT_E810 64
#define INDEX_PER_PORT_ETH56G 64
/** /**
* struct ice_ptp_port - data used to initialize an external port for PTP * struct ice_ptp_port - data used to initialize an external port for PTP
@ -61,22 +163,16 @@ struct ice_perout_channel {
* ready for PTP functionality. It is used to track the port initialization * ready for PTP functionality. It is used to track the port initialization
* and determine when the port's PHY offset is valid. * and determine when the port's PHY offset is valid.
* *
* @ov_task: work task for tracking when PHY offset is valid * @tx: Tx timestamp tracking for this port
* @tx_offset_ready: indicates the Tx offset for the port is ready * @ov_work: delayed work task for tracking when PHY offset is valid
* @rx_offset_ready: indicates the Rx offset for the port is ready
* @tx_offset_lock: lock used to protect the tx_offset_ready field
* @rx_offset_lock: lock used to protect the rx_offset_ready field
* @ps_lock: mutex used to protect the overall PTP PHY start procedure * @ps_lock: mutex used to protect the overall PTP PHY start procedure
* @link_up: indicates whether the link is up * @link_up: indicates whether the link is up
* @tx_fifo_busy_cnt: number of times the Tx FIFO was busy * @tx_fifo_busy_cnt: number of times the Tx FIFO was busy
* @port_num: the port number this structure represents * @port_num: the port number this structure represents
*/ */
struct ice_ptp_port { struct ice_ptp_port {
struct work_struct ov_task; struct ice_ptp_tx tx;
atomic_t tx_offset_ready; struct kthread_delayed_work ov_work;
atomic_t rx_offset_ready;
atomic_t tx_offset_lock;
atomic_t rx_offset_lock;
struct mutex ps_lock; /* protects overall PTP PHY start procedure */ struct mutex ps_lock; /* protects overall PTP PHY start procedure */
bool link_up; bool link_up;
u8 tx_fifo_busy_cnt; u8 tx_fifo_busy_cnt;
@ -88,40 +184,63 @@ struct ice_ptp_port {
/** /**
* struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK * struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK
* @port: data for the PHY port initialization procedure * @port: data for the PHY port initialization procedure
* @work: delayed work function for periodic tasks
* @extts_work: work function for handling external Tx timestamps
* @cached_phc_time: a cached copy of the PHC time for timestamp extension * @cached_phc_time: a cached copy of the PHC time for timestamp extension
* @cached_phc_jiffies: jiffies when cached_phc_time was last updated
* @ext_ts_chan: the external timestamp channel in use * @ext_ts_chan: the external timestamp channel in use
* @ext_ts_irq: the external timestamp IRQ in use * @ext_ts_irq: the external timestamp IRQ in use
* @phy_reset_lock: bit lock for preventing PHY start while resetting * @kworker: kwork thread for handling periodic work
* @ov_wq: work queue for the offset validity task
* @perout_channels: periodic output data * @perout_channels: periodic output data
* @info: structure defining PTP hardware capabilities * @info: structure defining PTP hardware capabilities
* @clock: pointer to registered PTP clock device * @clock: pointer to registered PTP clock device
* @tstamp_config: hardware timestamping configuration * @tstamp_config: hardware timestamping configuration
* @time_ref_freq: current device timer frequency (for E822 devices) * @phy_kobj: pointer to phy sysfs object
* @src_tmr_mode: current device timer mode (locked or nanoseconds) * @src_tmr_mode: current device timer mode (locked or nanoseconds)
* @reset_time: kernel time after clock stop on reset
* @tx_hwtstamp_skipped: number of Tx time stamp requests skipped
* @tx_hwtstamp_timeouts: number of Tx skbs discarded with no time stamp
* @tx_hwtstamp_flushed: number of Tx skbs flushed due to interface closed
* @tx_hwtstamp_discarded: number of Tx skbs discarded due to cached PHC time
* being too old to correctly extend timestamp
* @late_cached_phc_updates: number of times cached PHC update is late
*/ */
struct ice_ptp { struct ice_ptp {
struct ice_ptp_port port; struct ice_ptp_port port;
struct kthread_delayed_work work;
struct kthread_work extts_work;
u64 cached_phc_time; u64 cached_phc_time;
unsigned long cached_phc_jiffies;
u8 ext_ts_chan; u8 ext_ts_chan;
u8 ext_ts_irq; u8 ext_ts_irq;
atomic_t phy_reset_lock; struct kthread_worker *kworker;
struct workqueue_struct *ov_wq;
struct ice_perout_channel perout_channels[GLTSYN_TGT_H_IDX_MAX]; struct ice_perout_channel perout_channels[GLTSYN_TGT_H_IDX_MAX];
struct ptp_clock_info info; struct ptp_clock_info info;
struct ptp_clock *clock; struct ptp_clock *clock;
struct hwtstamp_config tstamp_config; struct hwtstamp_config tstamp_config;
enum ice_time_ref_freq time_ref_freq; struct kobject *phy_kobj;
enum ice_src_tmr_mode src_tmr_mode; enum ice_src_tmr_mode src_tmr_mode;
u64 reset_time;
u32 tx_hwtstamp_skipped;
u32 tx_hwtstamp_timeouts;
u32 tx_hwtstamp_flushed;
u32 tx_hwtstamp_discarded;
u32 late_cached_phc_updates;
}; };
#define __ptp_port_to_ptp(p) \ static inline struct ice_ptp *__ptp_port_to_ptp(struct ice_ptp_port *p)
container_of((p), struct ice_ptp, port) {
return container_of(p, struct ice_ptp, port);
}
#define ptp_port_to_pf(p) \ #define ptp_port_to_pf(p) \
container_of(__ptp_port_to_ptp((p)), struct ice_pf, ptp) container_of(__ptp_port_to_ptp((p)), struct ice_pf, ptp)
#define __ptp_info_to_ptp(i) \ static inline struct ice_ptp *__ptp_info_to_ptp(struct ptp_clock_info *i)
container_of((i), struct ice_ptp, info) {
return container_of(i, struct ice_ptp, info);
}
#define ptp_info_to_pf(i) \ #define ptp_info_to_pf(i) \
container_of(__ptp_info_to_ptp((i)), struct ice_pf, ptp) container_of(__ptp_info_to_ptp((i)), struct ice_pf, ptp)
@ -138,9 +257,6 @@ struct ice_ptp {
#define FIFO_EMPTY BIT(2) #define FIFO_EMPTY BIT(2)
#define FIFO_OK 0xFF #define FIFO_OK 0xFF
#define ICE_PTP_FIFO_NUM_CHECKS 5 #define ICE_PTP_FIFO_NUM_CHECKS 5
/* PHY, quad and port definitions */
#define INDEX_PER_QUAD 64
#define INDEX_PER_PORT (INDEX_PER_QUAD / ICE_PORTS_PER_QUAD)
#define TX_INTR_QUAD_MASK 0x03 #define TX_INTR_QUAD_MASK 0x03
/* Per-channel register definitions */ /* Per-channel register definitions */
#define GLTSYN_AUX_OUT(_chan, _idx) (GLTSYN_AUX_OUT_0(_idx) + ((_chan) * 8)) #define GLTSYN_AUX_OUT(_chan, _idx) (GLTSYN_AUX_OUT_0(_idx) + ((_chan) * 8))
@ -157,34 +273,68 @@ struct ice_ptp {
#define PPS_CLK_SRC_CHAN 2 #define PPS_CLK_SRC_CHAN 2
#define PPS_PIN_INDEX 5 #define PPS_PIN_INDEX 5
#define TIME_SYNC_PIN_INDEX 4 #define TIME_SYNC_PIN_INDEX 4
#define E810_N_EXT_TS 3 #define N_EXT_TS_E810 3
#define E810_N_PER_OUT 4 #define N_PER_OUT_E810 4
#define E810T_N_PER_OUT 3 #define N_PER_OUT_E810T 3
#define N_PER_OUT_NO_SMA_E810T 2
#define N_EXT_TS_NO_SMA_E810T 2
/* Macros to derive the low and high addresses for PHY */ /* Macros to derive the low and high addresses for PHY */
#define LOWER_ADDR_SIZE 16 #define LOWER_ADDR_SIZE 16
/* Macros to derive offsets for TimeStampLow and TimeStampHigh */ /* Macros to derive offsets for TimeStampLow and TimeStampHigh */
#define PORT_TIMER_ASSOC(_i) (0x0300102C + ((_i) * 256)) #define PORT_TIMER_ASSOC(_i) (0x0300102C + ((_i) * 256))
#define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4)) #define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4))
#define MAX_PIN_NAME 15
#define ICE_PTP_PIN_FREQ_1HZ 1
#define ICE_PTP_PIN_FREQ_10MHZ 10000000
/* Time allowed for programming periodic clock output */ /* Time allowed for programming periodic clock output */
#define START_OFFS_NS 100000000 #define START_OFFS_NS 100000000
#define ICE_PTP_PIN_INVALID 0xFF
/* "dpll <x> pin <y> prio <z>" (always 6 arguments) */
#define ICE_PTP_PIN_PRIO_ARG_CNT 6
/*
* Examples of possible argument lists and count:
* "in pin <n> enable <0/1>"
* "out pin <n> enable <0/1> freq <x>"
* "in pin <n> freq <x>"
* "out pin <n> freq <x> esync <z>"
* "in pin <n> freq <x> phase_delay <y> esync <0/1>"
* "out pin <n> enable <0/1> freq <x> phase_delay <y> esync <0/1>"
*
* count = 3 + x * 2
* 3 = target pin arguments (<dir> pin <n>)
* x = int [1-4] (up to 4: 'param name' + 'value' pairs)
* 2 = count of args in pair ('param name' + 'value')
*/
#define ICE_PTP_PIN_CFG_1_ARG_CNT 5
#define ICE_PTP_PIN_CFG_2_ARG_CNT 7
#define ICE_PTP_PIN_CFG_3_ARG_CNT 9
#define ICE_PTP_PIN_CFG_4_ARG_CNT 11
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) #if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
struct ice_pf; struct ice_pf;
int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr); int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr);
int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr); int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr);
int ice_ptp_get_ts_idx(struct ice_vsi *vsi); void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena);
int ice_get_ptp_clock_index(struct ice_pf *pf); int ice_get_ptp_clock_index(struct ice_pf *pf);
void ice_clean_ptp_subtask(struct ice_pf *pf); s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
void ice_ptp_set_timestamp_offsets(struct ice_pf *pf); bool ice_ptp_process_ts(struct ice_pf *pf);
u64 u64
ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts); ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts);
void ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, void ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb); struct sk_buff *skb);
void ice_ptp_reset(struct ice_pf *pf);
void ice_ptp_prepare_for_reset(struct ice_pf *pf);
void ice_ptp_init(struct ice_pf *pf); void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf); void ice_ptp_release(struct ice_pf *pf);
int ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup); void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup);
int ice_ptp_check_rx_fifo(struct ice_pf *pf, u8 port); int ice_ptp_check_rx_fifo(struct ice_pf *pf, u8 port);
int ptp_ts_enable(struct ice_pf *pf, u8 port, bool enable); int ptp_ts_enable(struct ice_pf *pf, u8 port, bool enable);
int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
@ -193,6 +343,7 @@ int ice_ptp_update_incval(struct ice_pf *pf, enum ice_time_ref_freq time_ref_fre
enum ice_src_tmr_mode src_tmr_mode); enum ice_src_tmr_mode src_tmr_mode);
int ice_ptp_get_incval(struct ice_pf *pf, enum ice_time_ref_freq *time_ref_freq, int ice_ptp_get_incval(struct ice_pf *pf, enum ice_time_ref_freq *time_ref_freq,
enum ice_src_tmr_mode *src_tmr_mode); enum ice_src_tmr_mode *src_tmr_mode);
void ice_dpll_pin_idx_to_name(struct ice_pf *pf, u8 pin, char *pin_name);
#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ #else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
static inline int ice_ptp_set_ts_config(struct ice_pf __always_unused *pf, static inline int ice_ptp_set_ts_config(struct ice_pf __always_unused *pf,
struct ifreq __always_unused *ifr) struct ifreq __always_unused *ifr)
@ -206,6 +357,7 @@ static inline int ice_ptp_get_ts_config(struct ice_pf __always_unused *pf,
return 0; return 0;
} }
static inline void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) { }
static inline int static inline int
ice_ptp_check_rx_fifo(struct ice_pf __always_unused *pf, ice_ptp_check_rx_fifo(struct ice_pf __always_unused *pf,
u8 __always_unused port) u8 __always_unused port)
@ -213,9 +365,14 @@ ice_ptp_check_rx_fifo(struct ice_pf __always_unused *pf,
return 0; return 0;
} }
static inline int ice_ptp_get_ts_idx(struct ice_vsi __always_unused *vsi) static inline s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
{ {
return 0; return -1;
}
static inline bool ice_ptp_process_ts(struct ice_pf *pf)
{
return true;
} }
static inline int ice_get_ptp_clock_index(struct ice_pf __always_unused *pf) static inline int ice_get_ptp_clock_index(struct ice_pf __always_unused *pf)
@ -223,13 +380,15 @@ static inline int ice_get_ptp_clock_index(struct ice_pf __always_unused *pf)
return 0; return 0;
} }
static inline void ice_clean_ptp_subtask(struct ice_pf *pf) { } static inline void ice_clean_ptp_subtask(struct ice_pf *pf) { }
static inline void ice_ptp_set_timestamp_offsets(struct ice_pf *pf) { }
static inline void ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring, static inline void ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb) { } struct sk_buff *skb) { }
static inline void ice_ptp_init(struct ice_pf *pf) { } static inline void ice_ptp_init(struct ice_pf *pf) { }
static inline void ice_ptp_reset(struct ice_pf *pf) { }
static inline void ice_ptp_release(struct ice_pf *pf) { } static inline void ice_ptp_release(struct ice_pf *pf) { }
static inline int ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf) { }
{ return 0; } static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
{
}
#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ #endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
#endif /* _ICE_PTP_H_ */ #endif /* _ICE_PTP_H_ */

View File

@ -82,4 +82,295 @@ const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ] = {
}, },
}; };
const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
/* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
{
/* refclk_pre_div */
1,
/* feedback_div */
197,
/* frac_n_div */
2621440,
/* post_pll_div */
6,
},
/* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
{
/* refclk_pre_div */
5,
/* feedback_div */
223,
/* frac_n_div */
524288,
/* post_pll_div */
7,
},
/* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
{
/* refclk_pre_div */
5,
/* feedback_div */
223,
/* frac_n_div */
524288,
/* post_pll_div */
7,
},
/* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
{
/* refclk_pre_div */
5,
/* feedback_div */
159,
/* frac_n_div */
1572864,
/* post_pll_div */
6,
},
/* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
{
/* refclk_pre_div */
5,
/* feedback_div */
159,
/* frac_n_div */
1572864,
/* post_pll_div */
6,
},
/* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
{
/* refclk_pre_div */
10,
/* feedback_div */
223,
/* frac_n_div */
524288,
/* post_pll_div */
7,
},
};
/*
* struct ice_vernier_info_e822
*
* E822 hardware calibrates the delay of the timestamp indication from the
* actual packet transmission or reception during the initialization of the
* PHY. To do this, the hardware mechanism uses some conversions between the
* various clocks within the PHY block. This table defines constants used to
* calculate the correct conversion ratios in the PHY registers.
*
* Many of the values relate to the PAR/PCS clock conversion registers. For
* these registers, a value of 0 means that the associated register is not
* used by this link speed, and that the register should be cleared by writing
* 0. Other values specify the clock frequency in Hz.
*/
const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD] = {
/* ICE_PTP_LNK_SPD_1G */
{
/* tx_par_clk */
31250000, /* 31.25 MHz */
/* rx_par_clk */
31250000, /* 31.25 MHz */
/* tx_pcs_clk */
125000000, /* 125 MHz */
/* rx_pcs_clk */
125000000, /* 125 MHz */
/* tx_desk_rsgb_par */
0, /* unused */
/* rx_desk_rsgb_par */
0, /* unused */
/* tx_desk_rsgb_pcs */
0, /* unused */
/* rx_desk_rsgb_pcs */
0, /* unused */
/* tx_fixed_delay */
25140,
/* pmd_adj_divisor */
10000000,
/* rx_fixed_delay */
17372,
},
/* ICE_PTP_LNK_SPD_10G */
{
/* tx_par_clk */
257812500, /* 257.8125 MHz */
/* rx_par_clk */
257812500, /* 257.8125 MHz */
/* tx_pcs_clk */
156250000, /* 156.25 MHz */
/* rx_pcs_clk */
156250000, /* 156.25 MHz */
/* tx_desk_rsgb_par */
0, /* unused */
/* rx_desk_rsgb_par */
0, /* unused */
/* tx_desk_rsgb_pcs */
0, /* unused */
/* rx_desk_rsgb_pcs */
0, /* unused */
/* tx_fixed_delay */
6938,
/* pmd_adj_divisor */
82500000,
/* rx_fixed_delay */
6212,
},
/* ICE_PTP_LNK_SPD_25G */
{
/* tx_par_clk */
644531250, /* 644.53125 MHZ */
/* rx_par_clk */
644531250, /* 644.53125 MHz */
/* tx_pcs_clk */
390625000, /* 390.625 MHz */
/* rx_pcs_clk */
390625000, /* 390.625 MHz */
/* tx_desk_rsgb_par */
0, /* unused */
/* rx_desk_rsgb_par */
0, /* unused */
/* tx_desk_rsgb_pcs */
0, /* unused */
/* rx_desk_rsgb_pcs */
0, /* unused */
/* tx_fixed_delay */
2778,
/* pmd_adj_divisor */
206250000,
/* rx_fixed_delay */
2491,
},
/* ICE_PTP_LNK_SPD_25G_RS */
{
/* tx_par_clk */
0, /* unused */
/* rx_par_clk */
0, /* unused */
/* tx_pcs_clk */
0, /* unused */
/* rx_pcs_clk */
0, /* unused */
/* tx_desk_rsgb_par */
161132812, /* 162.1328125 MHz Reed Solomon gearbox */
/* rx_desk_rsgb_par */
161132812, /* 162.1328125 MHz Reed Solomon gearbox */
/* tx_desk_rsgb_pcs */
97656250, /* 97.62625 MHz Reed Solomon gearbox */
/* rx_desk_rsgb_pcs */
97656250, /* 97.62625 MHz Reed Solomon gearbox */
/* tx_fixed_delay */
3928,
/* pmd_adj_divisor */
206250000,
/* rx_fixed_delay */
29535,
},
/* ICE_PTP_LNK_SPD_40G */
{
/* tx_par_clk */
257812500,
/* rx_par_clk */
257812500,
/* tx_pcs_clk */
156250000, /* 156.25 MHz */
/* rx_pcs_clk */
156250000, /* 156.25 MHz */
/* tx_desk_rsgb_par */
0, /* unused */
/* rx_desk_rsgb_par */
156250000, /* 156.25 MHz deskew clock */
/* tx_desk_rsgb_pcs */
0, /* unused */
/* rx_desk_rsgb_pcs */
156250000, /* 156.25 MHz deskew clock */
/* tx_fixed_delay */
5666,
/* pmd_adj_divisor */
82500000,
/* rx_fixed_delay */
4244,
},
/* ICE_PTP_LNK_SPD_50G */
{
/* tx_par_clk */
644531250, /* 644.53125 MHZ */
/* rx_par_clk */
644531250, /* 644.53125 MHZ */
/* tx_pcs_clk */
390625000, /* 390.625 MHz */
/* rx_pcs_clk */
390625000, /* 390.625 MHz */
/* tx_desk_rsgb_par */
0, /* unused */
/* rx_desk_rsgb_par */
195312500, /* 193.3125 MHz deskew clock */
/* tx_desk_rsgb_pcs */
0, /* unused */
/* rx_desk_rsgb_pcs */
195312500, /* 193.3125 MHz deskew clock */
/* tx_fixed_delay */
2778,
/* pmd_adj_divisor */
206250000,
/* rx_fixed_delay */
2868,
},
/* ICE_PTP_LNK_SPD_50G_RS */
{
/* tx_par_clk */
0, /* unused */
/* rx_par_clk */
644531250, /* 644.53125 MHz */
/* tx_pcs_clk */
0, /* unused */
/* rx_pcs_clk */
644531250, /* 644.53125 MHz */
/* tx_desk_rsgb_par */
322265625, /* 322.265625 MHz Reed Solomon gearbox */
/* rx_desk_rsgb_par */
322265625, /* 322.265625 MHz Reed Solomon gearbox */
/* tx_desk_rsgb_pcs */
644531250, /* 644.53125 MHz Reed Solomon gearbox */
/* rx_desk_rsgb_pcs */
644531250, /* 644.53125 MHz Reed Solomon gearbox */
/* tx_fixed_delay */
2095,
/* pmd_adj_divisor */
206250000,
/* rx_fixed_delay */
14524,
},
/* ICE_PTP_LNK_SPD_100G_RS */
{
/* tx_par_clk */
0, /* unused */
/* rx_par_clk */
644531250, /* 644.53125 MHz */
/* tx_pcs_clk */
0, /* unused */
/* rx_pcs_clk */
644531250, /* 644.53125 MHz */
/* tx_desk_rsgb_par */
644531250, /* 644.53125 MHz Reed Solomon gearbox */
/* rx_desk_rsgb_par */
644531250, /* 644.53125 MHz Reed Solomon gearbox */
/* tx_desk_rsgb_pcs */
644531250, /* 644.53125 MHz Reed Solomon gearbox */
/* rx_desk_rsgb_pcs */
644531250, /* 644.53125 MHz Reed Solomon gearbox */
/* tx_fixed_delay */
1620,
/* pmd_adj_divisor */
206250000,
/* rx_fixed_delay */
7775,
},
};
#endif /* _ICE_PTP_CONSTS_H_ */ #endif /* _ICE_PTP_CONSTS_H_ */

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More