From a2936f347ec7929e5b81dc37d6efa5bc077ce015 Mon Sep 17 00:00:00 2001 From: Jinliang Zheng Date: Sun, 28 Apr 2024 16:57:20 +0800 Subject: [PATCH] ice: update thirdparty ice nic driver to 1.10.1.2 Conflicts: drivers/thirdparty/ice/ice_ethtool.c Signed-off-by: Jinliang Zheng Reviewed-by: mengensun Signed-off-by: Jianping Liu --- drivers/thirdparty/Kconfig | 1 + drivers/thirdparty/ice/Makefile | 92 +- drivers/thirdparty/ice/ice.h | 489 +- drivers/thirdparty/ice/ice_acl.c | 79 +- drivers/thirdparty/ice/ice_acl.h | 50 +- drivers/thirdparty/ice/ice_acl_ctrl.c | 111 +- drivers/thirdparty/ice/ice_acl_main.c | 36 +- drivers/thirdparty/ice/ice_adminq_cmd.h | 634 +- drivers/thirdparty/ice/ice_arfs.c | 20 +- drivers/thirdparty/ice/ice_arfs.h | 4 +- drivers/thirdparty/ice/ice_base.c | 145 +- drivers/thirdparty/ice/ice_base.h | 2 +- drivers/thirdparty/ice/ice_bst_tcam.c | 297 + drivers/thirdparty/ice/ice_bst_tcam.h | 34 + drivers/thirdparty/ice/ice_cgu.h | 231 - drivers/thirdparty/ice/ice_cgu_ops.c | 248 - drivers/thirdparty/ice/ice_cgu_ops.h | 121 - drivers/thirdparty/ice/ice_cgu_regs.h | 825 -- drivers/thirdparty/ice/ice_cgu_util.c | 444 - drivers/thirdparty/ice/ice_cgu_util.h | 46 - drivers/thirdparty/ice/ice_common.c | 1954 +++-- drivers/thirdparty/ice/ice_common.h | 249 +- drivers/thirdparty/ice/ice_controlq.c | 159 +- drivers/thirdparty/ice/ice_controlq.h | 1 - drivers/thirdparty/ice/ice_dcb.c | 178 +- drivers/thirdparty/ice/ice_dcb.h | 44 +- drivers/thirdparty/ice/ice_dcb_lib.c | 217 +- drivers/thirdparty/ice/ice_dcb_lib.h | 16 +- drivers/thirdparty/ice/ice_dcb_nl.c | 79 +- drivers/thirdparty/ice/ice_dcb_nl.h | 6 +- drivers/thirdparty/ice/ice_dcf.c | 127 +- drivers/thirdparty/ice/ice_dcf.h | 4 +- drivers/thirdparty/ice/ice_ddp.c | 2586 ++++++ drivers/thirdparty/ice/ice_ddp.h | 466 ++ drivers/thirdparty/ice/ice_debugfs.c | 305 +- drivers/thirdparty/ice/ice_defs.h | 12 + drivers/thirdparty/ice/ice_devids.h | 18 +- drivers/thirdparty/ice/ice_devlink.c | 1569 +++- drivers/thirdparty/ice/ice_devlink.h | 23 +- drivers/thirdparty/ice/ice_eswitch.c | 400 +- drivers/thirdparty/ice/ice_eswitch.h | 23 +- drivers/thirdparty/ice/ice_ethtool.c | 1160 ++- drivers/thirdparty/ice/ice_ethtool.h | 334 +- drivers/thirdparty/ice/ice_ethtool_fdir.c | 112 +- drivers/thirdparty/ice/ice_fdir.c | 3407 +++++++- drivers/thirdparty/ice/ice_fdir.h | 63 +- drivers/thirdparty/ice/ice_flex_pipe.c | 2806 ++----- drivers/thirdparty/ice/ice_flex_pipe.h | 98 +- drivers/thirdparty/ice/ice_flex_type.h | 421 +- drivers/thirdparty/ice/ice_flg_rd.c | 75 + drivers/thirdparty/ice/ice_flg_rd.h | 16 + drivers/thirdparty/ice/ice_flow.c | 801 +- drivers/thirdparty/ice/ice_flow.h | 74 +- drivers/thirdparty/ice/ice_fltr.c | 351 +- drivers/thirdparty/ice/ice_fltr.h | 45 +- drivers/thirdparty/ice/ice_fw_update.c | 549 +- drivers/thirdparty/ice/ice_fw_update.h | 12 +- drivers/thirdparty/ice/ice_fwlog.c | 62 +- drivers/thirdparty/ice/ice_fwlog.h | 14 +- drivers/thirdparty/ice/ice_gnss.c | 575 ++ drivers/thirdparty/ice/ice_gnss.h | 62 + drivers/thirdparty/ice/ice_hw_autogen.h | 150 +- drivers/thirdparty/ice/ice_idc.c | 1812 ++--- drivers/thirdparty/ice/ice_idc.h | 422 - drivers/thirdparty/ice/ice_idc_int.h | 173 +- drivers/thirdparty/ice/ice_ieps.c | 1032 +++ drivers/thirdparty/ice/ice_ieps.h | 14 + drivers/thirdparty/ice/ice_imem.c | 250 + drivers/thirdparty/ice/ice_imem.h | 108 + drivers/thirdparty/ice/ice_irq.c | 377 + drivers/thirdparty/ice/ice_irq.h | 12 + drivers/thirdparty/ice/ice_lag.c | 1535 +++- drivers/thirdparty/ice/ice_lag.h | 85 +- drivers/thirdparty/ice/ice_lan_tx_rx.h | 37 +- drivers/thirdparty/ice/ice_lib.c | 1170 ++- drivers/thirdparty/ice/ice_lib.h | 26 +- drivers/thirdparty/ice/ice_main.c | 2927 ++++--- drivers/thirdparty/ice/ice_metainit.c | 155 + drivers/thirdparty/ice/ice_metainit.h | 45 + drivers/thirdparty/ice/ice_migration.c | 25 + drivers/thirdparty/ice/ice_migration.h | 18 + drivers/thirdparty/ice/ice_mk_grp.c | 54 + drivers/thirdparty/ice/ice_mk_grp.h | 14 + drivers/thirdparty/ice/ice_nvm.c | 296 +- drivers/thirdparty/ice/ice_nvm.h | 58 +- drivers/thirdparty/ice/ice_osdep.h | 56 +- drivers/thirdparty/ice/ice_parser.c | 595 ++ drivers/thirdparty/ice/ice_parser.h | 114 + drivers/thirdparty/ice/ice_parser_rt.c | 864 ++ drivers/thirdparty/ice/ice_parser_rt.h | 47 + drivers/thirdparty/ice/ice_parser_util.h | 35 + drivers/thirdparty/ice/ice_pg_cam.c | 376 + drivers/thirdparty/ice/ice_pg_cam.h | 73 + drivers/thirdparty/ice/ice_proto_grp.c | 106 + drivers/thirdparty/ice/ice_proto_grp.h | 23 + drivers/thirdparty/ice/ice_protocol_type.h | 28 +- drivers/thirdparty/ice/ice_ptp.c | 3963 +++++---- drivers/thirdparty/ice/ice_ptp.h | 243 +- drivers/thirdparty/ice/ice_ptp_consts.h | 291 + drivers/thirdparty/ice/ice_ptp_hw.c | 3158 +++++++- drivers/thirdparty/ice/ice_ptp_hw.h | 283 +- drivers/thirdparty/ice/ice_ptype_mk.c | 75 + drivers/thirdparty/ice/ice_ptype_mk.h | 20 + drivers/thirdparty/ice/ice_repr.c | 198 +- drivers/thirdparty/ice/ice_repr.h | 4 +- drivers/thirdparty/ice/ice_sched.c | 758 +- drivers/thirdparty/ice/ice_sched.h | 101 +- drivers/thirdparty/ice/ice_sched_cfg.h | 26 + drivers/thirdparty/ice/ice_siov.c | 1236 +++ drivers/thirdparty/ice/ice_siov.h | 18 + drivers/thirdparty/ice/ice_sriov.c | 2632 ++++-- drivers/thirdparty/ice/ice_sriov.h | 248 +- drivers/thirdparty/ice/ice_status.h | 47 - drivers/thirdparty/ice/ice_switch.c | 2368 ++++-- drivers/thirdparty/ice/ice_switch.h | 176 +- drivers/thirdparty/ice/ice_tc_lib.c | 2049 +++-- drivers/thirdparty/ice/ice_tc_lib.h | 74 +- drivers/thirdparty/ice/ice_tmatch.h | 43 + drivers/thirdparty/ice/ice_trace.h | 32 +- drivers/thirdparty/ice/ice_txrx.c | 567 +- drivers/thirdparty/ice/ice_txrx.h | 82 +- drivers/thirdparty/ice/ice_txrx_lib.c | 20 +- drivers/thirdparty/ice/ice_txrx_lib.h | 2 +- drivers/thirdparty/ice/ice_type.h | 346 +- drivers/thirdparty/ice/ice_vdcm.c | 1563 ++++ drivers/thirdparty/ice/ice_vdcm.h | 117 + drivers/thirdparty/ice/ice_vf_adq.c | 1469 ++++ drivers/thirdparty/ice/ice_vf_adq.h | 26 + drivers/thirdparty/ice/ice_vf_lib.c | 1525 ++++ drivers/thirdparty/ice/ice_vf_lib.h | 415 + drivers/thirdparty/ice/ice_vf_lib_private.h | 46 + drivers/thirdparty/ice/ice_vf_mbx.c | 534 ++ drivers/thirdparty/ice/ice_vf_mbx.h | 52 + drivers/thirdparty/ice/ice_vf_vsi_vlan_ops.c | 54 +- drivers/thirdparty/ice/ice_vfio_pci.c | 731 ++ .../ice/{ice_virtchnl_pf.c => ice_virtchnl.c} | 7146 +++++------------ drivers/thirdparty/ice/ice_virtchnl.h | 153 + .../thirdparty/ice/ice_virtchnl_allowlist.c | 15 +- .../thirdparty/ice/ice_virtchnl_allowlist.h | 2 +- drivers/thirdparty/ice/ice_virtchnl_fdir.c | 2258 +++++- drivers/thirdparty/ice/ice_virtchnl_fdir.h | 5 +- drivers/thirdparty/ice/ice_virtchnl_fsub.c | 865 ++ drivers/thirdparty/ice/ice_virtchnl_fsub.h | 25 + drivers/thirdparty/ice/ice_virtchnl_pf.h | 560 -- drivers/thirdparty/ice/ice_vlan.h | 2 + drivers/thirdparty/ice/ice_vlan_mode.c | 43 +- drivers/thirdparty/ice/ice_vlan_mode.h | 2 +- drivers/thirdparty/ice/ice_vsi_vlan_lib.c | 179 +- drivers/thirdparty/ice/ice_vsi_vlan_lib.h | 3 - drivers/thirdparty/ice/ice_vsi_vlan_ops.c | 1 + drivers/thirdparty/ice/ice_xlt_kb.c | 221 + drivers/thirdparty/ice/ice_xlt_kb.h | 33 + drivers/thirdparty/ice/ice_xsk.c | 106 +- drivers/thirdparty/ice/ice_xsk.h | 10 +- drivers/thirdparty/ice/idc_generic.h | 191 + drivers/thirdparty/ice/ieps_peer.h | 282 + drivers/thirdparty/ice/iidc.h | 278 + drivers/thirdparty/ice/kcompat.c | 263 +- drivers/thirdparty/ice/kcompat.h | 300 +- drivers/thirdparty/ice/kcompat_defs.h | 39 + drivers/thirdparty/ice/kcompat_gcc.h | 17 + drivers/thirdparty/ice/kcompat_impl.h | 873 ++ drivers/thirdparty/ice/kcompat_kthread.h | 48 + drivers/thirdparty/ice/kcompat_net_dim.c | 42 +- drivers/thirdparty/ice/kcompat_oracle_defs.h | 43 + drivers/thirdparty/ice/kcompat_pldmfw.c | 24 +- drivers/thirdparty/ice/kcompat_rhel_defs.h | 133 +- drivers/thirdparty/ice/kcompat_sles_defs.h | 80 +- drivers/thirdparty/ice/kcompat_std_defs.h | 230 +- drivers/thirdparty/ice/kcompat_ubuntu_defs.h | 7 + drivers/thirdparty/ice/siov_regs.h | 41 + drivers/thirdparty/ice/virtchnl.h | 476 +- .../thirdparty/ice/virtchnl_inline_ipsec.h | 548 -- drivers/thirdparty/ice/virtchnl_lan_desc.h | 528 -- 174 files changed, 53563 insertions(+), 24595 deletions(-) create mode 100644 drivers/thirdparty/ice/ice_bst_tcam.c create mode 100644 drivers/thirdparty/ice/ice_bst_tcam.h delete mode 100644 drivers/thirdparty/ice/ice_cgu.h delete mode 100644 drivers/thirdparty/ice/ice_cgu_ops.c delete mode 100644 drivers/thirdparty/ice/ice_cgu_ops.h delete mode 100644 drivers/thirdparty/ice/ice_cgu_util.c delete mode 100644 drivers/thirdparty/ice/ice_cgu_util.h create mode 100644 drivers/thirdparty/ice/ice_ddp.c create mode 100644 drivers/thirdparty/ice/ice_ddp.h create mode 100644 drivers/thirdparty/ice/ice_defs.h create mode 100644 drivers/thirdparty/ice/ice_flg_rd.c create mode 100644 drivers/thirdparty/ice/ice_flg_rd.h create mode 100644 drivers/thirdparty/ice/ice_gnss.c create mode 100644 drivers/thirdparty/ice/ice_gnss.h delete mode 100644 drivers/thirdparty/ice/ice_idc.h create mode 100644 drivers/thirdparty/ice/ice_ieps.c create mode 100644 drivers/thirdparty/ice/ice_ieps.h create mode 100644 drivers/thirdparty/ice/ice_imem.c create mode 100644 drivers/thirdparty/ice/ice_imem.h create mode 100644 drivers/thirdparty/ice/ice_irq.c create mode 100644 drivers/thirdparty/ice/ice_irq.h create mode 100644 drivers/thirdparty/ice/ice_metainit.c create mode 100644 drivers/thirdparty/ice/ice_metainit.h create mode 100644 drivers/thirdparty/ice/ice_migration.c create mode 100644 drivers/thirdparty/ice/ice_migration.h create mode 100644 drivers/thirdparty/ice/ice_mk_grp.c create mode 100644 drivers/thirdparty/ice/ice_mk_grp.h create mode 100644 drivers/thirdparty/ice/ice_parser.c create mode 100644 drivers/thirdparty/ice/ice_parser.h create mode 100644 drivers/thirdparty/ice/ice_parser_rt.c create mode 100644 drivers/thirdparty/ice/ice_parser_rt.h create mode 100644 drivers/thirdparty/ice/ice_parser_util.h create mode 100644 drivers/thirdparty/ice/ice_pg_cam.c create mode 100644 drivers/thirdparty/ice/ice_pg_cam.h create mode 100644 drivers/thirdparty/ice/ice_proto_grp.c create mode 100644 drivers/thirdparty/ice/ice_proto_grp.h create mode 100644 drivers/thirdparty/ice/ice_ptype_mk.c create mode 100644 drivers/thirdparty/ice/ice_ptype_mk.h create mode 100644 drivers/thirdparty/ice/ice_sched_cfg.h create mode 100644 drivers/thirdparty/ice/ice_siov.c create mode 100644 drivers/thirdparty/ice/ice_siov.h delete mode 100644 drivers/thirdparty/ice/ice_status.h create mode 100644 drivers/thirdparty/ice/ice_tmatch.h create mode 100644 drivers/thirdparty/ice/ice_vdcm.c create mode 100644 drivers/thirdparty/ice/ice_vdcm.h create mode 100644 drivers/thirdparty/ice/ice_vf_adq.c create mode 100644 drivers/thirdparty/ice/ice_vf_adq.h create mode 100644 drivers/thirdparty/ice/ice_vf_lib.c create mode 100644 drivers/thirdparty/ice/ice_vf_lib.h create mode 100644 drivers/thirdparty/ice/ice_vf_lib_private.h create mode 100644 drivers/thirdparty/ice/ice_vf_mbx.c create mode 100644 drivers/thirdparty/ice/ice_vf_mbx.h create mode 100644 drivers/thirdparty/ice/ice_vfio_pci.c rename drivers/thirdparty/ice/{ice_virtchnl_pf.c => ice_virtchnl.c} (53%) create mode 100644 drivers/thirdparty/ice/ice_virtchnl.h create mode 100644 drivers/thirdparty/ice/ice_virtchnl_fsub.c create mode 100644 drivers/thirdparty/ice/ice_virtchnl_fsub.h delete mode 100644 drivers/thirdparty/ice/ice_virtchnl_pf.h create mode 100644 drivers/thirdparty/ice/ice_xlt_kb.c create mode 100644 drivers/thirdparty/ice/ice_xlt_kb.h create mode 100644 drivers/thirdparty/ice/idc_generic.h create mode 100644 drivers/thirdparty/ice/ieps_peer.h create mode 100644 drivers/thirdparty/ice/iidc.h create mode 100644 drivers/thirdparty/ice/kcompat_defs.h create mode 100644 drivers/thirdparty/ice/kcompat_gcc.h create mode 100644 drivers/thirdparty/ice/kcompat_kthread.h create mode 100644 drivers/thirdparty/ice/kcompat_oracle_defs.h create mode 100644 drivers/thirdparty/ice/siov_regs.h delete mode 100644 drivers/thirdparty/ice/virtchnl_inline_ipsec.h delete mode 100644 drivers/thirdparty/ice/virtchnl_lan_desc.h diff --git a/drivers/thirdparty/Kconfig b/drivers/thirdparty/Kconfig index 0b8a6b0a90db..c7c96422bc39 100644 --- a/drivers/thirdparty/Kconfig +++ b/drivers/thirdparty/Kconfig @@ -63,6 +63,7 @@ config THIRDPARTY_ICE depends on X86 default n depends on PCI_MSI + select AUXILIARY_BUS ---help--- This driver supports Intel(R) Ethernet Connection E800 Series of devices. For more information on how to identify your adapter, go diff --git a/drivers/thirdparty/ice/Makefile b/drivers/thirdparty/ice/Makefile index bf38db0ba634..6199d065b578 100644 --- a/drivers/thirdparty/ice/Makefile +++ b/drivers/thirdparty/ice/Makefile @@ -10,47 +10,73 @@ subdir-ccflags-y += -I$(src) obj-m += ice.o -ice-y := ice_main.o \ - ice_controlq.o \ - ice_common.o \ - ice_nvm.o \ - ice_switch.o \ - ice_sched.o \ - ice_base.o \ - ice_lib.o \ - ice_txrx_lib.o \ - ice_txrx.o \ - ice_fltr.o \ - ice_pf_vsi_vlan_ops.o \ - ice_vsi_vlan_ops.o \ - ice_vsi_vlan_lib.o \ - ice_tc_lib.o \ - ice_fdir.o \ - ice_ethtool_fdir.o \ - ice_acl_main.o \ - ice_acl.o \ - ice_acl_ctrl.o \ - ice_vlan_mode.o \ - ice_flex_pipe.o \ - ice_flow.o \ - ice_lag.o \ - ice_fwlog.o \ - ice_ethtool.o \ - kcompat.o +ice-y := ice_main.o \ + ice_controlq.o \ + ice_common.o \ + ice_nvm.o \ + ice_switch.o \ + ice_sched.o \ + ice_base.o \ + ice_lib.o \ + ice_txrx_lib.o \ + ice_txrx.o \ + ice_fltr.o \ + ice_irq.o \ + ice_pf_vsi_vlan_ops.o \ + ice_vsi_vlan_ops.o \ + ice_vsi_vlan_lib.o \ + ice_gnss.o \ + ice_tc_lib.o \ + ice_fdir.o \ + ice_ethtool_fdir.o \ + ice_acl_main.o \ + ice_acl.o \ + ice_acl_ctrl.o \ + ice_vlan_mode.o \ + ice_ddp.o \ + ice_flex_pipe.o \ + ice_flow.o \ + ice_parser.o \ + ice_imem.o \ + ice_pg_cam.o \ + ice_metainit.o \ + ice_bst_tcam.o \ + ice_ptype_mk.o \ + ice_mk_grp.o \ + ice_proto_grp.o \ + ice_flg_rd.o \ + ice_xlt_kb.o \ + ice_parser_rt.o \ + ice_lag.o \ + ice_fwlog.o \ + ice_ieps.o \ + ice_ethtool.o ice-$(CONFIG_NET_DEVLINK:m=y) += ice_devlink.o ice_fw_update.o ice-$(CONFIG_NET_DEVLINK:m=y) += ice_eswitch.o ice_repr.o -ice-$(CONFIG_MFD_CORE:m=y) += ice_idc.o +ice-y += ice_idc.o ice-$(CONFIG_DEBUG_FS) += ice_debugfs.o -ice-$(CONFIG_PCI_IOV) += ice_virtchnl_allowlist.o -ice-$(CONFIG_PCI_IOV) += ice_dcf.o -ice-$(CONFIG_PCI_IOV) += ice_virtchnl_fdir.o -ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice_vf_vsi_vlan_ops.o + +ice-$(CONFIG_PCI_IOV) += \ + ice_dcf.o \ + ice_sriov.o \ + ice_vf_mbx.o \ + ice_vf_vsi_vlan_ops.o \ + ice_virtchnl_allowlist.o \ + ice_vf_adq.o \ + ice_virtchnl.o \ + ice_virtchnl_fdir.o \ + ice_virtchnl_fsub.o \ + ice_vf_lib.o + +ifneq (${ENABLE_SIOV_SUPPORT},) +ice-$(CONFIG_VFIO_MDEV:m=y) += ice_vdcm.o ice_siov.o +endif ice-$(CONFIG_PTP_1588_CLOCK:m=y) += ice_ptp.o ice_ptp_hw.o -ice-$(CONFIG_PTP_1588_CLOCK:m=y) += ice_cgu_ops.o ice_cgu_util.o ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o +ice-y += kcompat.o # Use kcompat pldmfw.c if kernel does not provide CONFIG_PLDMFW ifndef CONFIG_PLDMFW ice-y += kcompat_pldmfw.o diff --git a/drivers/thirdparty/ice/ice.h b/drivers/thirdparty/ice/ice.h index 204dc3ddfcc0..56d34dc9b548 100644 --- a/drivers/thirdparty/ice/ice.h +++ b/drivers/thirdparty/ice/ice.h @@ -17,9 +17,9 @@ #include #include #include -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS #include -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ #include #include #include @@ -30,6 +30,10 @@ #include #include #include +#ifdef HAVE_INCLUDE_BITFIELD +#include +#endif /* HAVE_INCLUDE_BITFIELD */ +#include #include #include #include @@ -38,6 +42,10 @@ #include #include #include +#include +#ifdef HAVE_LINKMODE +#include +#endif /* HAVE_LINKMODE */ #ifdef HAVE_XDP_SUPPORT #include #include @@ -49,14 +57,12 @@ #if IS_ENABLED(CONFIG_NET_DEVLINK) #include #endif /* CONFIG_NET_DEVLINK */ -#if IS_ENABLED(CONFIG_DCB) -#include -#endif /* CONFIG_DCB */ #ifdef HAVE_CONFIG_DIMLIB #include #else #include "kcompat_dim.h" #endif +#include "ice_ddp.h" #include "ice_devids.h" #include "ice_type.h" #include "ice_txrx.h" @@ -65,17 +71,20 @@ #include "ice_common.h" #include "ice_flow.h" #include "ice_sched.h" -#include +#ifdef USE_INTEL_AUX_BUS +#include "linux/auxiliary_bus.h" +#else +#include +#endif /* USE_INTEL_AUX_BUS */ #include #include "ice_idc_int.h" #include "virtchnl.h" -#include "ice_virtchnl_pf.h" #include "ice_sriov.h" +#include "ice_vf_mbx.h" #include "ice_ptp.h" -#include "ice_cgu.h" -#include "ice_cgu_ops.h" -#include "ice_cgu_util.h" #include "ice_fdir.h" +#include "ice_vdcm.h" +#include "ice_siov.h" #ifdef HAVE_AF_XDP_ZC_SUPPORT #include "ice_xsk.h" #endif /* HAVE_AF_XDP_ZC_SUPPORT */ @@ -97,6 +106,9 @@ #include #endif #endif /* HAVE_GENEVE_RX_OFFLOAD || HAVE_GENEVE_TYPE */ +#ifdef HAVE_GTP_SUPPORT +#include +#endif /* HAVE_GTP_SUPPORT */ #ifdef HAVE_UDP_ENC_RX_OFFLOAD #include #endif @@ -113,10 +125,12 @@ #include "ice_repr.h" #include "ice_eswitch.h" #include "ice_vsi_vlan_ops.h" +#include "ice_gnss.h" extern const char ice_drv_ver[]; -#define ICE_BAR0 0 -#define ICE_BAR3 3 +#define ICE_BAR0 0 +#define ICE_BAR_RDMA_DOORBELL_OFFSET 0x7f0000 +#define ICE_BAR3 3 #ifdef CONFIG_DEBUG_FS #define ICE_MAX_CSR_SPACE (8 * 1024 * 1024 - 64 * 1024) #endif /* CONFIG_DEBUG_FS */ @@ -124,7 +138,11 @@ extern const char ice_drv_ver[]; #define ICE_MIN_NUM_DESC 64 #define ICE_MAX_NUM_DESC 8160 #define ICE_DFLT_MIN_RX_DESC 512 +#ifdef CONFIG_ICE_USE_SKB +#define ICE_DFLT_NUM_RX_DESC 512 +#else #define ICE_DFLT_NUM_RX_DESC 2048 +#endif /* CONFIG_ICE_USE_SKB */ #define ICE_DFLT_NUM_TX_DESC 256 #define ICE_DFLT_TXQ_VMDQ_VSI 1 @@ -133,9 +151,10 @@ extern const char ice_drv_ver[]; #define ICE_MAX_NUM_VMDQ_VSI 16 #define ICE_MAX_TXQ_VMDQ_VSI 4 #define ICE_MAX_RXQ_VMDQ_VSI 4 -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS #define ICE_MAX_MACVLANS 64 #endif +#define ICE_MAX_SCALABLE 100 #define ICE_DFLT_TRAFFIC_CLASS BIT(0) #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) #define ICE_AQ_LEN 192 @@ -192,8 +211,9 @@ extern const char ice_drv_ver[]; #define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i])) #define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i])) -#define ICE_ACL_ENTIRE_SLICE 1 -#define ICE_ACL_HALF_SLICE 2 +#define ICE_ACL_ENTIRE_SLICE 1 +#define ICE_ACL_HALF_SLICE 2 +#define ICE_TCAM_DIVIDER_THRESHOLD 6 /* Minimum BW limit is 500 Kbps for any scheduler node */ #define ICE_MIN_BW_LIMIT 500 @@ -202,9 +222,29 @@ extern const char ice_drv_ver[]; */ #define ICE_BW_KBPS_DIVISOR 125 -#if defined(HAVE_TC_FLOWER_ENC) && defined(HAVE_TC_INDIR_BLOCK) -#define ICE_GTP_TNL_WELLKNOWN_PORT 2152 -#endif /* HAVE_TC_FLOWER_ENC && HAVE_TC_INDIR_BLOCK */ +#if defined(HAVE_TCF_MIRRED_DEV) || defined(HAVE_TC_FLOW_RULE_INFRASTRUCTURE) +#define ICE_GTPU_PORT 2152 +#endif /* HAVE_TCF_MIRRED_DEC || HAVE_TC_FLOW_RULE_INFRASTRUCTURE */ +#ifdef HAVE_GTP_SUPPORT +#define ICE_GTPC_PORT 2123 +#endif /* HAVE_GTP_SUPPORT */ + +#ifdef HAVE_TC_SETUP_CLSFLOWER +/* prio 5..7 can be used as advanced switch filter priority. Default recipes + * have prio 4 and below, hence prio value between 5..7 can be used as filter + * prio for advanced switch filter (advanced switch filter means it needs + * new recipe to be created to represent specified extraction sequence because + * default recipe extraction sequence does not represent custom extraction) + */ +#define ICE_SWITCH_FLTR_PRIO_QUEUE 7 +/* prio 6 is reserved for future use (e.g. switch filter with L3 fields + + * (Optional: IP TOS/TTL) + L4 fields + (optionally: TCP fields such as + * SYN/FIN/RST)) + */ +#define ICE_SWITCH_FLTR_PRIO_RSVD 6 +#define ICE_SWITCH_FLTR_PRIO_VSI 5 +#define ICE_SWITCH_FLTR_PRIO_QGRP ICE_SWITCH_FLTR_PRIO_VSI +#endif /* ifdef HAVE_TC_SETUP_CLSFLOWER*/ /* Macro for each VSI in a PF */ #define ice_for_each_vsi(pf, i) \ @@ -254,25 +294,15 @@ struct ice_fwlog_user_input { enum ice_feature { ICE_F_DSCP, ICE_F_PTP_EXTTS, + ICE_F_CGU, + ICE_F_PHY_RCLK, + ICE_F_SMA_CTRL, + ICE_F_GNSS, + ICE_F_FIXED_TIMING_PINS, + ICE_F_LAG, ICE_F_MAX }; - -enum ice_channel_fltr_type { - ICE_CHNL_FLTR_TYPE_INVALID, - ICE_CHNL_FLTR_TYPE_SRC_PORT, - ICE_CHNL_FLTR_TYPE_DEST_PORT, - ICE_CHNL_FLTR_TYPE_SRC_DEST_PORT, /* for future use cases */ - ICE_CHNL_FLTR_TYPE_TENANT_ID, - ICE_CHNL_FLTR_TYPE_SRC_IPV4, - ICE_CHNL_FLTR_TYPE_DEST_IPV4, - ICE_CHNL_FLTR_TYPE_SRC_DEST_IPV4, - ICE_CHNL_FLTR_TYPE_SRC_IPV6, - ICE_CHNL_FLTR_TYPE_DEST_IPV6, - ICE_CHNL_FLTR_TYPE_SRC_DEST_IPV6, - ICE_CHNL_FLTR_TYPE_LAST /* must be last */ -}; - struct ice_channel { struct list_head list; u8 type; @@ -293,7 +323,9 @@ struct ice_channel { atomic_t fd_queue; /* packets services thru' inline-FD filter */ u64 fd_pkt_cnt; - enum ice_channel_fltr_type fltr_type; + u8 inline_fd:1; + u8 qps_per_poller; + u32 poller_timeout; struct ice_vsi *ch_vsi; }; @@ -302,6 +334,8 @@ struct ice_channel { #define ICE_BW_MBIT_PS_DIVISOR 125000 /* rate / (1000000 / 8) Mbps */ #endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#define ICE_ADQ_MAX_QPS 256 + struct ice_txq_meta { u32 q_teid; /* Tx-scheduler element identifier */ u16 q_id; /* Entry in VSI's txq_map bitmap */ @@ -344,8 +378,6 @@ struct ice_sw { struct ice_pf *pf; u16 sw_id; /* switch ID for this switch */ u16 bridge_mode; /* VEB/VEPA/Port Virtualizer */ - struct ice_vsi *dflt_vsi; /* default VSI for this switch */ - u8 dflt_vsi_ena:1; /* true if above dflt_vsi is enabled */ }; enum ice_pf_state { @@ -354,9 +386,9 @@ enum ice_pf_state { ICE_NEEDS_RESTART, ICE_PREPARED_FOR_RESET, /* set by driver when prepared */ ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */ - ICE_PFR_REQ, /* set by driver and peers */ - ICE_CORER_REQ, /* set by driver and peers */ - ICE_GLOBR_REQ, /* set by driver and peers */ + ICE_PFR_REQ, /* set by driver and aux drivers */ + ICE_CORER_REQ, /* set by driver and aux drivers */ + ICE_GLOBR_REQ, /* set by driver and aux drivers */ ICE_CORER_RECV, /* set by OICR handler */ ICE_GLOBR_RECV, /* set by OICR handler */ ICE_EMPR_RECV, /* set by OICR handler */ @@ -382,8 +414,6 @@ enum ice_pf_state { ICE_VF_DIS, ICE_CFG_BUSY, ICE_SERVICE_SCHED, - ICE_PTP_TX_TS_READY, - ICE_PTP_EXT_TS_READY, ICE_SERVICE_DIS, ICE_FD_FLUSH_REQ, ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */ @@ -393,6 +423,7 @@ enum ice_pf_state { ICE_LINK_DEFAULT_OVERRIDE_PENDING, ICE_PHY_INIT_COMPLETE, ICE_FD_VF_FLUSH_CTX, /* set at FD Rx IRQ or timeout */ + ICE_AUX_ERR_PENDING, ICE_STATE_NBITS /* must be last */ }; @@ -403,14 +434,12 @@ enum ice_vsi_state { ICE_VSI_NETDEV_REGISTERED, ICE_VSI_UMAC_FLTR_CHANGED, ICE_VSI_MMAC_FLTR_CHANGED, - ICE_VSI_VLAN_FLTR_CHANGED, ICE_VSI_PROMISC_CHANGED, ICE_VSI_STATE_NBITS /* must be last */ }; enum ice_chnl_feature { ICE_CHNL_FEATURE_FD_ENA, /* for side-band flow-director */ - ICE_CHNL_FEATURE_INLINE_FD_ENA, /* for inline flow-director */ /* using the SO_MARK socket option will trigger skb->mark to be set. * Driver should act on skb->mark of not (to align flow to HW queue * binding) is additionally controlled via ethtool private flag and @@ -448,10 +477,10 @@ struct ice_vsi { struct ice_port_info *port_info; /* back pointer to port_info */ struct ice_ring **rx_rings; /* Rx ring array */ struct ice_ring **tx_rings; /* Tx ring array */ -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS /* Initial VSI tx_rings array when L2 offload is off */ struct ice_ring **base_tx_rings; -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ struct ice_q_vector **q_vectors; /* q_vector array */ irqreturn_t (*irq_handler)(int irq, void *data); @@ -472,7 +501,7 @@ struct ice_vsi { u16 vsi_num; /* HW (absolute) index of this VSI */ u16 idx; /* software index in pf->vsi[] */ - s16 vf_id; /* VF ID for SR-IOV VSIs */ + struct ice_vf *vf; /* VF associated with this VSI */ u16 ethtype; /* Ethernet protocol for pause frame */ u16 num_gfltr; @@ -516,7 +545,6 @@ struct ice_vsi { struct ice_vsi_vlan_ops outer_vlan_ops; u16 num_vlan; - /* queue information */ u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ u8 rx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ @@ -539,6 +567,7 @@ struct ice_vsi { u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ #endif /* HAVE_XDP_SUPPORT */ #ifdef HAVE_AF_XDP_ZC_SUPPORT + unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */ #ifndef HAVE_AF_XDP_NETDEV_UMEM struct xdp_umem **xsk_umems; u16 num_xsk_umems_used; @@ -549,10 +578,6 @@ struct ice_vsi { #ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO struct tc_mqprio_qopt_offload mqprio_qopt;/* queue parameters */ #endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ - DECLARE_BITMAP(ptp_tx_idx, INDEX_PER_QUAD); - struct sk_buff *ptp_tx_skb[INDEX_PER_QUAD]; - u32 tx_hwtstamp_skipped; - u8 ptp_tx:1; /* Channel Specific Fields */ struct ice_vsi *tc_map_vsi[ICE_CHNL_MAX_TC]; @@ -599,7 +624,7 @@ struct ice_vsi { u16 old_ena_tc; struct ice_channel *ch; - struct net_device **target_netdevs; + u8 num_tc_devlink_params; /* setup back reference, to which aggregator node this VSI * corresponds to @@ -756,24 +781,30 @@ struct ice_q_vector { #ifdef ADQ_PERF_COUNTERS struct ice_q_vector_ch_stats ch_stats; #endif /* ADQ_PERF_COUNTERS */ + u64 last_wd_jiffy; } ____cacheline_internodealigned_in_smp; enum ice_pf_flags { ICE_FLAG_FLTR_SYNC, ICE_FLAG_VMDQ_ENA, -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS ICE_FLAG_MACVLAN_ENA, -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ ICE_FLAG_IWARP_ENA, ICE_FLAG_RSS_ENA, ICE_FLAG_SRIOV_ENA, ICE_FLAG_SRIOV_CAPABLE, + ICE_FLAG_SIOV_ENA, + ICE_FLAG_SIOV_CAPABLE, ICE_FLAG_DCB_CAPABLE, ICE_FLAG_DCB_ENA, ICE_FLAG_FD_ENA, - ICE_FLAG_PTP_ENA, /* NVM PTP support */ + ICE_FLAG_PTP_SUPPORTED, /* NVM PTP support */ ICE_FLAG_PTP, /* PTP successfully initialized */ - ICE_FLAG_PEER_ENA, + ICE_FLAG_AUX_ENA, + ICE_FLAG_PLUG_AUX_DEV, + ICE_FLAG_UNPLUG_AUX_DEV, + ICE_FLAG_MTU_CHANGED, ICE_FLAG_ADV_FEATURES, #ifdef NETIF_F_HW_TC ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */ @@ -787,23 +818,28 @@ enum ice_pf_flags { ICE_FLAG_BASE_R_FEC, #endif /* !ETHTOOL_GFECPARAM */ ICE_FLAG_FW_LLDP_AGENT, - ICE_FLAG_CHNL_INLINE_FD_ENA, ICE_FLAG_CHNL_INLINE_FD_MARK_ENA, ICE_FLAG_CHNL_PKT_INSPECT_OPT_ENA, ICE_FLAG_CHNL_PKT_CLEAN_BP_STOP_ENA, ICE_FLAG_CHNL_PKT_CLEAN_BP_STOP_CFG, ICE_FLAG_MOD_POWER_UNSUPPORTED, + ICE_FLAG_PHY_FW_LOAD_FAILED, ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ ICE_FLAG_LEGACY_RX, ICE_FLAG_VF_TRUE_PROMISC_ENA, ICE_FLAG_MDD_AUTO_RESET_VF, - ICE_FLAG_VF_VLAN_PRUNE_DIS, + ICE_FLAG_VF_VLAN_PRUNING, ICE_FLAG_LINK_LENIENT_MODE_ENA, ICE_FLAG_ESWITCH_CAPABLE, + ICE_FLAG_DPLL_FAST_LOCK, + ICE_FLAG_DPLL_MONITOR, + ICE_FLAG_EXTTS_FILTER, + ICE_FLAG_GNSS, /* GNSS successfully initialized */ + ICE_FLAG_ALLOW_FEC_DIS_AUTO, ICE_PF_FLAGS_NBITS /* must be last */ }; -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS struct ice_macvlan { struct list_head list; int id; @@ -812,7 +848,7 @@ struct ice_macvlan { struct ice_vsi *vsi; u8 mac[ETH_ALEN]; }; -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ struct ice_switchdev_info { struct ice_vsi *control_vsi; @@ -845,27 +881,45 @@ struct ice_agg_node { u8 valid; }; -enum ice_flash_update_preservation { - /* Preserve all settings and fields */ - ICE_FLASH_UPDATE_PRESERVE_ALL = 0, - /* Preserve limited fields, such as VPD, PCI serial ID, MACs, etc */ - ICE_FLASH_UPDATE_PRESERVE_LIMITED, - /* Return all fields to factory settings */ - ICE_FLASH_UPDATE_PRESERVE_FACTORY_SETTINGS, - /* Do not perform any preservation */ - ICE_FLASH_UPDATE_PRESERVE_NONE, +#ifdef HAVE_DEVLINK_HEALTH +enum ice_mdd_src { + ICE_MDD_SRC_NONE = 0, + ICE_MDD_SRC_TX_PQM, + ICE_MDD_SRC_TX_TCLAN, + ICE_MDD_SRC_TX_TDPU, + ICE_MDD_SRC_RX }; +struct ice_mdd_event { + struct list_head list; + enum ice_mdd_src src; + u8 pf_num; + u16 vf_num; + u8 event; + u16 queue; +}; + +struct ice_mdd_reporter { + struct devlink_health_reporter *reporter; + u16 count; + struct list_head event_list; +}; +#endif /* HAVE_DEVLINK_HEALTH */ + struct ice_pf { struct pci_dev *pdev; #if IS_ENABLED(CONFIG_NET_DEVLINK) #ifdef HAVE_DEVLINK_REGIONS struct devlink_region *nvm_region; + struct devlink_region *sram_region; struct devlink_region *devcaps_region; #endif /* HAVE_DEVLINK_REGIONS */ /* devlink port data */ struct devlink_port devlink_port; +#ifdef HAVE_DEVLINK_HEALTH + struct ice_mdd_reporter mdd_reporter; +#endif /* HAVE_DEVLINK_HEALTH */ #endif /* CONFIG_NET_DEVLINK */ /* OS reserved IRQ details */ @@ -885,15 +939,7 @@ struct ice_pf { #ifdef CONFIG_DEBUG_FS struct dentry *ice_debugfs_pf; #endif /* CONFIG_DEBUG_FS */ - /* Virtchnl/SR-IOV config info */ - struct ice_vf *vf; - u16 num_alloc_vfs; /* actual number of VFs allocated */ - u16 num_vfs_supported; /* num VFs supported for this PF */ - u16 num_qps_per_vf; - u16 num_msix_per_vf; - /* used to ratelimit the MDD event logging */ - unsigned long last_printed_mdd_jiffies; - DECLARE_BITMAP(malvfs, ICE_MAX_VF_COUNT); + struct ice_vfs vfs; DECLARE_BITMAP(features, ICE_F_MAX); DECLARE_BITMAP(state, ICE_STATE_NBITS); DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS); @@ -906,28 +952,33 @@ struct ice_pf { struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */ struct mutex sw_mutex; /* lock for protecting VSI alloc flow */ struct mutex tc_mutex; /* lock to protect TC changes */ + struct mutex adev_mutex; /* lock to protect aux device access */ + struct mutex lag_mutex; /* lock protects the lag struct */ u32 msg_enable; struct ice_ptp ptp; - struct ice_cgu_info cgu_info; + struct tty_driver *ice_gnss_tty_driver; + struct tty_port *gnss_tty_port; + struct gnss_serial *gnss_serial; u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */ u16 rdma_base_vector; - struct ice_peer_obj *rdma_peer; -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS /* MACVLAN specific variables */ DECLARE_BITMAP(avail_macvlan, ICE_MAX_MACVLANS); struct list_head macvlan_list; u16 num_macvlan; u16 max_num_macvlan; -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ /* spinlock to protect the AdminQ wait list */ spinlock_t aq_wait_lock; struct hlist_head aq_wait_list; wait_queue_head_t aq_wait_queue; + bool fw_emp_reset_disabled; wait_queue_head_t reset_wait_queue; u32 hw_csum_rx_error; + u32 oicr_err_reg; u16 oicr_idx; /* Other interrupt cause MSIX vector index */ u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */ u16 max_pf_txqs; /* Total Tx queues PF wide */ @@ -975,8 +1026,8 @@ struct ice_pf { unsigned long tx_timeout_last_recovery; u32 tx_timeout_recovery_level; char int_name[ICE_INT_NAME_STR_LEN]; - struct ice_peer_obj_int **peers; - int peer_idx; + struct iidc_core_dev_info **cdev_infos; + int aux_idx; u32 sw_int_count; #ifdef HAVE_TC_SETUP_CLSFLOWER /* count of tc_flower filters specific to channel (aka where filter @@ -986,6 +1037,9 @@ struct ice_pf { struct hlist_head tc_flower_fltr_list; #endif /* HAVE_TC_SETUP_CLSFLOWER */ + u16 max_qps; + u16 max_adq_qps; + struct ice_dcf dcf; __le64 nvm_phy_type_lo; /* NVM PHY type low */ __le64 nvm_phy_type_hi; /* NVM PHY type high */ @@ -1000,13 +1054,19 @@ struct ice_pf { */ spinlock_t tnl_lock; struct list_head tnl_list; +#ifdef HAVE_UDP_TUNNEL_NIC_INFO +#ifdef HAVE_UDP_TUNNEL_NIC_SHARED + struct udp_tunnel_nic_shared udp_tunnel_shared; +#endif /* HAVE_UDP_TUNNEL_NIC_SHARED */ + struct udp_tunnel_nic_info udp_tunnel_nic; +#endif /* HAVE_UDP_TUNNEL_NIC_INFO */ struct ice_switchdev_info switchdev; #define ICE_INVALID_AGG_NODE_ID 0 #define ICE_PF_AGG_NODE_ID_START 1 #define ICE_MAX_PF_AGG_NODES 32 struct ice_agg_node pf_agg_node[ICE_MAX_PF_AGG_NODES]; -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS #define ICE_MACVLAN_AGG_NODE_ID_START (ICE_PF_AGG_NODE_ID_START + \ ICE_MAX_PF_AGG_NODES) #define ICE_MAX_MACVLAN_AGG_NODES 32 @@ -1015,8 +1075,20 @@ struct ice_pf { #define ICE_VF_AGG_NODE_ID_START 65 #define ICE_MAX_VF_AGG_NODES 32 struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES]; + enum ice_cgu_state synce_dpll_state; + u8 synce_ref_pin; + enum ice_cgu_state ptp_dpll_state; + u8 ptp_ref_pin; + s64 ptp_dpll_phase_offset; + + u32 phc_recalc; + + u8 n_quanta_prof_used; }; +extern struct workqueue_struct *ice_wq; +extern struct workqueue_struct *ice_lag_wq; + struct ice_netdev_priv { struct ice_vsi *vsi; #ifdef HAVE_TC_INDIR_BLOCK @@ -1038,9 +1110,6 @@ struct ice_netdev_priv { struct ice_repr *repr; }; -extern struct ida ice_peer_index_ida; - - /** * ice_vector_ch_enabled * @qv: pointer to q_vector, can be NULL @@ -1052,6 +1121,19 @@ static inline bool ice_vector_ch_enabled(struct ice_q_vector *qv) return !!qv->ch; /* Enable it to run with TC */ } +/** + * ice_vector_ind_poller + * @qv: pointer to q_vector + * + * This function returns true if vector is channel enabled and + * independent pollers are enabled on the associated channel. + */ +static inline bool ice_vector_ind_poller(struct ice_q_vector *qv) +{ + return (ice_vector_ch_enabled(qv) && qv->ch->qps_per_poller && + qv->ch->poller_timeout); +} + /** * ice_vector_busypoll_intr * @qv: pointer to q_vector @@ -1065,45 +1147,6 @@ static inline bool ice_vector_busypoll_intr(struct ice_q_vector *qv) !(qv->state_flags & ICE_CHNL_IN_BP); } -/** - * ice_vector_ever_in_busypoll - * @qv: pointer to q_vector - * - * This function returns true if vectors current OR previous state - * is BUSY_POLL - */ -static inline bool ice_vector_ever_in_busypoll(struct ice_q_vector *qv) -{ - return (qv->state_flags & ICE_CHNL_PREV_IN_BP) || - (qv->state_flags & ICE_CHNL_IN_BP); -} - -/** - * ice_vector_state_curr_prev_intr - * @qv: pointer to q_vector - * - * This function returns true if vectors current AND previous state - * is INTERRUPT - */ -static inline bool ice_vector_state_curr_prev_intr(struct ice_q_vector *qv) -{ - return !(qv->state_flags & ICE_CHNL_PREV_IN_BP) && - !(qv->state_flags & ICE_CHNL_IN_BP); -} - -/** - * ice_vector_intr_busypoll - * @qv: pointer to q_vector - * - * This function returns true if vector is transitioning from INTERRUPT - * to BUSY_POLL based on current and previous state of vector - */ -static inline bool ice_vector_intr_busypoll(struct ice_q_vector *qv) -{ - return !(qv->state_flags & ICE_CHNL_PREV_IN_BP) && - (qv->state_flags & ICE_CHNL_IN_BP); -} - /** * ice_adq_trigger_sw_intr * @hw: ptr to HW @@ -1151,33 +1194,6 @@ ice_sw_intr_cntr(struct ice_q_vector *q_vector, bool napi_codepath) } #endif /* ADQ_PERF_COUNTERS */ -/** - * ice_force_wb - trigger force write-back by setting WB_ON_ITR bit - * @hw: ptr to HW - * @q_vector: pointer to q_vector - * - * This function is used to force write-backs by setting WB_ON_ITR bit - * in DYN_CTLN register. WB_ON_ITR and INTENA are mutually exclusive bits. - * Setting WB_ON_ITR bits means Tx and Rx descriptors are written back based - * on ITR expiration irrespective of INTENA setting - */ -static inline void -ice_force_wb(struct ice_hw *hw, struct ice_q_vector *q_vector) -{ - if (q_vector->num_ring_rx || q_vector->num_ring_tx) { -#ifdef ADQ_PERF_COUNTERS - q_vector->ch_stats.num_wb_on_itr_set++; -#endif /* ADQ_PERF_COUNTERS */ - wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), - ICE_GLINT_DYN_CTL_WB_ON_ITR(0, ICE_RX_ITR)); - } - - /* needed to avoid triggering WB_ON_ITR again which typically - * happens from ice_set_wb_on_itr function - */ - q_vector->wb_on_itr = true; -} - /** * ice_irq_dynamic_ena - Enable default interrupt generation settings * @hw: pointer to HW struct @@ -1215,12 +1231,30 @@ static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev) return np->vsi->back; } -#ifdef HAVE_XDP_SUPPORT -static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi) +/** + * ice_kobj_to_pf - Retrieve the PF struct associated with a kobj + * @kobj: pointer to the kobject + * + * Returns a pointer to PF or NULL if there is no association. + */ +static inline struct ice_pf *ice_kobj_to_pf(struct kobject *kobj) { - return !!vsi->xdp_prog; + if (!kobj || !kobj->parent) + return NULL; + + return pci_get_drvdata(to_pci_dev(kobj_to_dev(kobj->parent))); } +static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi) +{ +#ifdef HAVE_XDP_SUPPORT + return !!vsi->xdp_prog; +#else + return false; +#endif +} + +#ifdef HAVE_XDP_SUPPORT static inline void ice_set_ring_xdp(struct ice_ring *ring) { ring->flags |= ICE_TX_FLAGS_RING_XDP; @@ -1229,37 +1263,38 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring) #endif /* HAVE_XDP_SUPPORT */ #ifdef HAVE_AF_XDP_ZC_SUPPORT /** - * ice_xsk_umem - get XDP UMEM bound to a ring - * @ring: ring to use + * ice_xsk_pool - get XSK buffer pool bound to a ring + * @ring: Rx ring to use * - * Returns a pointer to xdp_umem structure if there is an UMEM present, + * Returns a pointer to xdp_umem structure if there is a buffer pool present, * NULL otherwise. */ #ifdef HAVE_NETDEV_BPF_XSK_POOL -static inline struct xsk_buff_pool *ice_xsk_umem(struct ice_ring *ring) +static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring) #else -static inline struct xdp_umem *ice_xsk_umem(struct ice_ring *ring) +static inline struct xdp_umem *ice_xsk_pool(struct ice_ring *ring) #endif { + struct ice_vsi *vsi = ring->vsi; #ifndef HAVE_AF_XDP_NETDEV_UMEM - struct xdp_umem **umems = ring->vsi->xsk_umems; + struct xdp_umem **umems = vsi->xsk_umems; #endif /* !HAVE_AF_XDP_NETDEV_UMEM */ u16 qid = ring->q_index; if (ice_ring_is_xdp(ring)) - qid -= ring->vsi->num_xdp_txq; + qid -= vsi->num_xdp_txq; #ifndef HAVE_AF_XDP_NETDEV_UMEM - if (qid >= ring->vsi->num_xsk_umems || !umems || !umems[qid] || - !ice_is_xdp_ena_vsi(ring->vsi)) + if (qid >= vsi->num_xsk_umems || !umems || !umems[qid] || + !ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) return NULL; return umems[qid]; #else - if (!ice_is_xdp_ena_vsi(ring->vsi)) + if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) return NULL; - return xsk_get_pool_from_qid(ring->vsi->netdev, qid); + return xsk_get_pool_from_qid(vsi->netdev, qid); #endif /* !HAVE_AF_XDP_NETDEV_UMEM */ } #endif /* HAVE_AF_XDP_ZC_SUPPORT */ @@ -1272,10 +1307,7 @@ static inline struct xdp_umem *ice_xsk_umem(struct ice_ring *ring) */ static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf) { - if (pf->vsi) - return pf->vsi[0]; - - return NULL; + return pf->vsi[0]; } /** @@ -1325,6 +1357,21 @@ static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf) return pf->vsi[pf->ctrl_vsi_idx]; } +/** + * ice_find_vsi - Find the VSI from VSI ID + * @pf: The PF pointer to search in + * @vsi_num: The VSI ID to search for + */ +static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num) +{ + int i; + + ice_for_each_vsi(pf, i) + if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num) + return pf->vsi[i]; + return NULL; +} + /** * ice_find_first_vsi_by_type - Find and return first VSI of a given type * @pf: PF to search for VSI @@ -1387,19 +1434,6 @@ static inline bool ice_vsi_fd_ena(struct ice_vsi *vsi) return !!test_bit(ICE_CHNL_FEATURE_FD_ENA, vsi->features); } -/** - * ice_vsi_inline_fd_ena - * @vsi: pointer to VSI - * - * This function returns true if VSI is enabled for usage of flow-director - * otherwise returns false. This is controlled thru' ethtool priv-flag - * 'channel-inline-flow-director' - */ -static inline bool ice_vsi_inline_fd_ena(struct ice_vsi *vsi) -{ - return !!test_bit(ICE_CHNL_FEATURE_INLINE_FD_ENA, vsi->features); -} - static inline bool ice_vsi_inline_fd_mark_ena(struct ice_vsi *vsi) { return !!test_bit(ICE_CHNL_FEATURE_INLINE_FD_MARK_ENA, vsi->features); @@ -1529,12 +1563,12 @@ static inline bool ice_active_vmdqs(struct ice_pf *pf) return !!ice_find_first_vsi_by_type(pf, ICE_VSI_VMDQ2); } -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS static inline bool ice_is_offloaded_macvlan_ena(struct ice_pf *pf) { return test_bit(ICE_FLAG_MACVLAN_ENA, pf->flags); } -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ #ifdef CONFIG_DEBUG_FS void ice_debugfs_pf_init(struct ice_pf *pf); @@ -1563,12 +1597,14 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx); void ice_update_pf_stats(struct ice_pf *pf); void ice_update_vsi_stats(struct ice_vsi *vsi); int ice_up(struct ice_vsi *vsi); +void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes); int ice_down(struct ice_vsi *vsi); +int ice_down_up(struct ice_vsi *vsi); int ice_vsi_cfg(struct ice_vsi *vsi); struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi); -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS int ice_vsi_cfg_netdev_tc0(struct ice_vsi *vsi); -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ #ifdef HAVE_XDP_SUPPORT int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog); int ice_destroy_xdp_rings(struct ice_vsi *vsi); @@ -1590,33 +1626,71 @@ int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed); void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset); void ice_print_link_msg(struct ice_vsi *vsi, bool isup); -#if IS_ENABLED(CONFIG_MFD_CORE) -int ice_init_peer_devices(struct ice_pf *pf); +int ice_plug_aux_dev(struct iidc_core_dev_info *cdev_info, const char *name); +void ice_unplug_aux_dev(struct iidc_core_dev_info *cdev_info); +int ice_plug_aux_devs(struct ice_pf *pf); +void ice_unplug_aux_devs(struct ice_pf *pf); +int ice_init_aux_devices(struct ice_pf *pf); int -ice_for_each_peer(struct ice_pf *pf, void *data, - int (*fn)(struct ice_peer_obj_int *, void *)); +ice_for_each_aux(struct ice_pf *pf, void *data, + int (*fn)(struct iidc_core_dev_info *, void *)); #ifdef CONFIG_PM -void ice_peer_refresh_msix(struct ice_pf *pf); +void ice_cdev_info_refresh_msix(struct ice_pf *pf); #endif /* CONFIG_PM */ -#else /* !CONFIG_MFD_CORE */ -static inline int ice_init_peer_devices(struct ice_pf *pf) { return 0; } - -static inline int -ice_for_each_peer(struct ice_pf *pf, void *data, - int (*fn)(struct ice_peer_obj_int *, void *)) +#ifdef HAVE_NETDEV_UPPER_INFO +/** + * ice_set_sriov_cap - enable SRIOV in PF flags + * @pf: PF struct + */ +static inline void ice_set_sriov_cap(struct ice_pf *pf) { - return 0; + if (pf->hw.func_caps.common_cap.sr_iov_1_1) + set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); +} + +/** + * ice_clear_sriov_cap - disable SRIOV in PF flags + * @pf: PF struct + */ +static inline void ice_clear_sriov_cap(struct ice_pf *pf) +{ + clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); +} + +/** + * ice_set_rdma_cap - enable RDMA in PF flags + * @pf: PF struct + */ +static inline void ice_set_rdma_cap(struct ice_pf *pf) +{ + if (pf->hw.func_caps.common_cap.iwarp && pf->num_rdma_msix) { + set_bit(ICE_FLAG_IWARP_ENA, pf->flags); + } +} + +/** + * ice_clear_rdma_cap - disable RDMA in PF flags + * @pf: PF struct + */ +static inline void ice_clear_rdma_cap(struct ice_pf *pf) +{ + clear_bit(ICE_FLAG_IWARP_ENA, pf->flags); +} + +#endif /* HAVE_NETDEV_UPPER_INFO */ +/** ice_chk_rdma_cap - check the status of RDMA if PF flags + * @pf: PF struct + */ +static inline bool ice_chk_rdma_cap(struct ice_pf *pf) +{ + return test_bit(ICE_FLAG_IWARP_ENA, pf->flags); } -#ifdef CONFIG_PM -static inline void ice_peer_refresh_msix(struct ice_pf *pf) { } -#endif /* CONFIG_PM */ -#endif /* !CONFIG_MFD_CORE */ -const char *ice_stat_str(enum ice_status stat_err); const char *ice_aq_str(enum ice_aq_err aq_err); bool ice_is_wol_supported(struct ice_hw *hw); int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, struct ice_rq_event_info *event); +void ice_fdir_del_all_fltrs(struct ice_vsi *vsi); int ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, bool is_tun); @@ -1648,6 +1722,7 @@ int ice_ntuple_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input, int fltr_idx); void ice_update_ring_dest_vsi(struct ice_vsi *vsi, u16 *dest_vsi, u32 *ring); +void ice_ch_vsi_update_ring_vecs(struct ice_vsi *vsi); int ice_open(struct net_device *netdev); int ice_open_internal(struct net_device *netdev); int ice_stop(struct net_device *netdev); diff --git a/drivers/thirdparty/ice/ice_acl.c b/drivers/thirdparty/ice/ice_acl.c index fcf0fea30e17..4a5cc38d77de 100644 --- a/drivers/thirdparty/ice/ice_acl.c +++ b/drivers/thirdparty/ice/ice_acl.c @@ -12,7 +12,7 @@ * * Allocate ACL table (indirect 0x0C10) */ -enum ice_status +int ice_aq_alloc_acl_tbl(struct ice_hw *hw, struct ice_acl_alloc_tbl *tbl, struct ice_sq_cd *cd) { @@ -20,10 +20,10 @@ ice_aq_alloc_acl_tbl(struct ice_hw *hw, struct ice_acl_alloc_tbl *tbl, struct ice_aq_desc desc; if (!tbl->act_pairs_per_entry) - return ICE_ERR_PARAM; + return -EINVAL; if (tbl->act_pairs_per_entry > ICE_AQC_MAX_ACTION_MEMORIES) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; /* If this is concurrent table, then buffer shall be valid and * contain DependentAllocIDs, 'num_dependent_alloc_ids' should be valid @@ -31,10 +31,10 @@ ice_aq_alloc_acl_tbl(struct ice_hw *hw, struct ice_acl_alloc_tbl *tbl, */ if (tbl->concurr) { if (!tbl->num_dependent_alloc_ids) - return ICE_ERR_PARAM; + return -EINVAL; if (tbl->num_dependent_alloc_ids > ICE_AQC_MAX_CONCURRENT_ACL_TBL) - return ICE_ERR_INVAL_SIZE; + return -EINVAL; } ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_alloc_acl_tbl); @@ -63,7 +63,7 @@ ice_aq_alloc_acl_tbl(struct ice_hw *hw, struct ice_acl_alloc_tbl *tbl, * format is 'struct ice_aqc_acl_generic', pass ptr to that struct * as 'buf' and its size as 'buf_size' */ -enum ice_status +int ice_aq_dealloc_acl_tbl(struct ice_hw *hw, u16 alloc_id, struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd) { @@ -77,7 +77,7 @@ ice_aq_dealloc_acl_tbl(struct ice_hw *hw, u16 alloc_id, return ice_aq_send_cmd(hw, &desc, buf, sizeof(*buf), cd); } -static enum ice_status +static int ice_aq_acl_entry(struct ice_hw *hw, u16 opcode, u8 tcam_idx, u16 entry_idx, struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd) { @@ -106,7 +106,7 @@ ice_aq_acl_entry(struct ice_hw *hw, u16 opcode, u8 tcam_idx, u16 entry_idx, * * Program ACL entry (direct 0x0C20) */ -enum ice_status +int ice_aq_program_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx, struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd) { @@ -127,7 +127,7 @@ ice_aq_program_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx, * NOTE: Caller of this API to parse 'buf' appropriately since it contains * response (key and key invert) */ -enum ice_status +int ice_aq_query_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx, struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd) { @@ -136,7 +136,7 @@ ice_aq_query_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx, } /* Helper function to alloc/dealloc ACL action pair */ -static enum ice_status +static int ice_aq_actpair_a_d(struct ice_hw *hw, u16 opcode, u16 alloc_id, struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd) { @@ -162,7 +162,7 @@ ice_aq_actpair_a_d(struct ice_hw *hw, u16 opcode, u16 alloc_id, * This command doesn't need and doesn't have its own command buffer * but for response format is as specified in 'struct ice_aqc_acl_generic' */ -enum ice_status +int ice_aq_alloc_actpair(struct ice_hw *hw, u16 alloc_id, struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd) { @@ -179,7 +179,7 @@ ice_aq_alloc_actpair(struct ice_hw *hw, u16 alloc_id, * * Deallocate ACL actionpair (direct 0x0C13) */ -enum ice_status +int ice_aq_dealloc_actpair(struct ice_hw *hw, u16 alloc_id, struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd) { @@ -188,7 +188,7 @@ ice_aq_dealloc_actpair(struct ice_hw *hw, u16 alloc_id, } /* Helper function to program/query ACL action pair */ -static enum ice_status +static int ice_aq_actpair_p_q(struct ice_hw *hw, u16 opcode, u8 act_mem_idx, u16 act_entry_idx, struct ice_aqc_actpair *buf, struct ice_sq_cd *cd) @@ -218,7 +218,7 @@ ice_aq_actpair_p_q(struct ice_hw *hw, u16 opcode, u8 act_mem_idx, * * Program action entries (indirect 0x0C1C) */ -enum ice_status +int ice_aq_program_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx, struct ice_aqc_actpair *buf, struct ice_sq_cd *cd) { @@ -236,7 +236,7 @@ ice_aq_program_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx, * * Query ACL actionpair (indirect 0x0C25) */ -enum ice_status +int ice_aq_query_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx, struct ice_aqc_actpair *buf, struct ice_sq_cd *cd) { @@ -252,7 +252,7 @@ ice_aq_query_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx, * De-allocate ACL resources (direct 0x0C1A). Used by SW to release all the * resources allocated for it using a single command */ -enum ice_status ice_aq_dealloc_acl_res(struct ice_hw *hw, struct ice_sq_cd *cd) +int ice_aq_dealloc_acl_res(struct ice_hw *hw, struct ice_sq_cd *cd) { struct ice_aq_desc desc; @@ -271,7 +271,7 @@ enum ice_status ice_aq_dealloc_acl_res(struct ice_hw *hw, struct ice_sq_cd *cd) * * This function sends ACL profile commands */ -static enum ice_status +static int ice_acl_prof_aq_send(struct ice_hw *hw, u16 opc, u8 prof_id, struct ice_aqc_acl_prof_generic_frmt *buf, struct ice_sq_cd *cd) @@ -295,7 +295,7 @@ ice_acl_prof_aq_send(struct ice_hw *hw, u16 opc, u8 prof_id, * * Program ACL profile extraction (indirect 0x0C1D) */ -enum ice_status +int ice_prgm_acl_prof_xtrct(struct ice_hw *hw, u8 prof_id, struct ice_aqc_acl_prof_generic_frmt *buf, struct ice_sq_cd *cd) @@ -313,7 +313,7 @@ ice_prgm_acl_prof_xtrct(struct ice_hw *hw, u8 prof_id, * * Query ACL profile (indirect 0x0C21) */ -enum ice_status +int ice_query_acl_prof(struct ice_hw *hw, u8 prof_id, struct ice_aqc_acl_prof_generic_frmt *buf, struct ice_sq_cd *cd) @@ -329,12 +329,12 @@ ice_query_acl_prof(struct ice_hw *hw, u8 prof_id, * This function checks the counter bank range for counter type and returns * success or failure. */ -static enum ice_status ice_aq_acl_cntrs_chk_params(struct ice_acl_cntrs *cntrs) +static int ice_aq_acl_cntrs_chk_params(struct ice_acl_cntrs *cntrs) { - enum ice_status status = 0; + int status = 0; if (!cntrs || !cntrs->amount) - return ICE_ERR_PARAM; + return -EINVAL; switch (cntrs->type) { case ICE_AQC_ACL_CNT_TYPE_SINGLE: @@ -343,18 +343,18 @@ static enum ice_status ice_aq_acl_cntrs_chk_params(struct ice_acl_cntrs *cntrs) * shall be 0-3. */ if (cntrs->bank > ICE_AQC_ACL_MAX_CNT_SINGLE) - status = ICE_ERR_OUT_OF_RANGE; + status = -EIO; break; case ICE_AQC_ACL_CNT_TYPE_DUAL: /* Pair counter type - counts number of bytes and packets * The valid values for byte/packet counter duals shall be 0-1 */ if (cntrs->bank > ICE_AQC_ACL_MAX_CNT_DUAL) - status = ICE_ERR_OUT_OF_RANGE; + status = -EIO; break; default: /* Unspecified counter type - Invalid or error */ - status = ICE_ERR_PARAM; + status = -EINVAL; } return status; @@ -372,14 +372,14 @@ static enum ice_status ice_aq_acl_cntrs_chk_params(struct ice_acl_cntrs *cntrs) * unsuccessful if returned counter value is invalid. In this case it returns * an error otherwise success. */ -enum ice_status +int ice_aq_alloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs, struct ice_sq_cd *cd) { struct ice_aqc_acl_alloc_counters *cmd; u16 first_cntr, last_cntr; struct ice_aq_desc desc; - enum ice_status status; + int status; /* check for invalid params */ status = ice_aq_acl_cntrs_chk_params(cntrs); @@ -396,7 +396,7 @@ ice_aq_alloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs, last_cntr = le16_to_cpu(cmd->ops.resp.last_counter); if (first_cntr == ICE_AQC_ACL_ALLOC_CNT_INVAL || last_cntr == ICE_AQC_ACL_ALLOC_CNT_INVAL) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; cntrs->first_cntr = first_cntr; cntrs->last_cntr = last_cntr; } @@ -411,13 +411,13 @@ ice_aq_alloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs, * * De-allocate ACL counters (direct 0x0C17) */ -enum ice_status +int ice_aq_dealloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs, struct ice_sq_cd *cd) { struct ice_aqc_acl_dealloc_counters *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; /* check for invalid params */ status = ice_aq_acl_cntrs_chk_params(cntrs); @@ -433,7 +433,6 @@ ice_aq_dealloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs, return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } - /** * ice_prog_acl_prof_ranges - program ACL profile ranges * @hw: pointer to the HW struct @@ -443,7 +442,7 @@ ice_aq_dealloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs, * * Program ACL profile ranges (indirect 0x0C1E) */ -enum ice_status +int ice_prog_acl_prof_ranges(struct ice_hw *hw, u8 prof_id, struct ice_aqc_acl_profile_ranges *buf, struct ice_sq_cd *cd) @@ -466,7 +465,7 @@ ice_prog_acl_prof_ranges(struct ice_hw *hw, u8 prof_id, * * Query ACL profile ranges (indirect 0x0C22) */ -enum ice_status +int ice_query_acl_prof_ranges(struct ice_hw *hw, u8 prof_id, struct ice_aqc_acl_profile_ranges *buf, struct ice_sq_cd *cd) @@ -488,16 +487,16 @@ ice_query_acl_prof_ranges(struct ice_hw *hw, u8 prof_id, * * Allocate ACL scenario (indirect 0x0C14) */ -enum ice_status +int ice_aq_alloc_acl_scen(struct ice_hw *hw, u16 *scen_id, struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd) { struct ice_aqc_acl_alloc_scen *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (!scen_id) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_alloc_acl_scen); desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); @@ -518,7 +517,7 @@ ice_aq_alloc_acl_scen(struct ice_hw *hw, u16 *scen_id, * * Deallocate ACL scenario (direct 0x0C15) */ -enum ice_status +int ice_aq_dealloc_acl_scen(struct ice_hw *hw, u16 scen_id, struct ice_sq_cd *cd) { struct ice_aqc_acl_dealloc_scen *cmd; @@ -541,7 +540,7 @@ ice_aq_dealloc_acl_scen(struct ice_hw *hw, u16 scen_id, struct ice_sq_cd *cd) * * Calls update or query ACL scenario */ -static enum ice_status +static int ice_aq_update_query_scen(struct ice_hw *hw, u16 opcode, u16 scen_id, struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd) { @@ -566,7 +565,7 @@ ice_aq_update_query_scen(struct ice_hw *hw, u16 opcode, u16 scen_id, * * Update ACL scenario (indirect 0x0C1B) */ -enum ice_status +int ice_aq_update_acl_scen(struct ice_hw *hw, u16 scen_id, struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd) { @@ -583,7 +582,7 @@ ice_aq_update_acl_scen(struct ice_hw *hw, u16 scen_id, * * Query ACL scenario (indirect 0x0C23) */ -enum ice_status +int ice_aq_query_acl_scen(struct ice_hw *hw, u16 scen_id, struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd) { diff --git a/drivers/thirdparty/ice/ice_acl.h b/drivers/thirdparty/ice/ice_acl.h index bdfe2681935c..ff0f5d0a0393 100644 --- a/drivers/thirdparty/ice/ice_acl.h +++ b/drivers/thirdparty/ice/ice_acl.h @@ -125,78 +125,78 @@ struct ice_acl_cntrs { u16 last_cntr; }; -enum ice_status +int ice_acl_create_tbl(struct ice_hw *hw, struct ice_acl_tbl_params *params); -enum ice_status ice_acl_destroy_tbl(struct ice_hw *hw); -enum ice_status +int ice_acl_destroy_tbl(struct ice_hw *hw); +int ice_acl_create_scen(struct ice_hw *hw, u16 match_width, u16 num_entries, u16 *scen_id); -enum ice_status +int ice_aq_alloc_acl_tbl(struct ice_hw *hw, struct ice_acl_alloc_tbl *tbl, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_dealloc_acl_tbl(struct ice_hw *hw, u16 alloc_id, struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_program_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx, struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_query_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx, struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_alloc_actpair(struct ice_hw *hw, u16 alloc_id, struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_dealloc_actpair(struct ice_hw *hw, u16 alloc_id, struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_program_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx, struct ice_aqc_actpair *buf, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_query_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx, struct ice_aqc_actpair *buf, struct ice_sq_cd *cd); -enum ice_status ice_aq_dealloc_acl_res(struct ice_hw *hw, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_dealloc_acl_res(struct ice_hw *hw, struct ice_sq_cd *cd); +int ice_prgm_acl_prof_xtrct(struct ice_hw *hw, u8 prof_id, struct ice_aqc_acl_prof_generic_frmt *buf, struct ice_sq_cd *cd); -enum ice_status +int ice_query_acl_prof(struct ice_hw *hw, u8 prof_id, struct ice_aqc_acl_prof_generic_frmt *buf, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_alloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_dealloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs, struct ice_sq_cd *cd); -enum ice_status +int ice_prog_acl_prof_ranges(struct ice_hw *hw, u8 prof_id, struct ice_aqc_acl_profile_ranges *buf, struct ice_sq_cd *cd); -enum ice_status +int ice_query_acl_prof_ranges(struct ice_hw *hw, u8 prof_id, struct ice_aqc_acl_profile_ranges *buf, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_alloc_acl_scen(struct ice_hw *hw, u16 *scen_id, struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_dealloc_acl_scen(struct ice_hw *hw, u16 scen_id, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_update_acl_scen(struct ice_hw *hw, u16 scen_id, struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_query_acl_scen(struct ice_hw *hw, u16 scen_id, struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd); -enum ice_status +int ice_acl_add_entry(struct ice_hw *hw, struct ice_acl_scen *scen, enum ice_acl_entry_prio prio, u8 *keys, u8 *inverts, struct ice_acl_act_entry *acts, u8 acts_cnt, u16 *entry_idx); -enum ice_status +int ice_acl_prog_act(struct ice_hw *hw, struct ice_acl_scen *scen, struct ice_acl_act_entry *acts, u8 acts_cnt, u16 entry_idx); -enum ice_status +int ice_acl_rem_entry(struct ice_hw *hw, struct ice_acl_scen *scen, u16 entry_idx); bool ice_is_acl_empty(struct ice_hw *hw); #endif /* _ICE_ACL_H_ */ diff --git a/drivers/thirdparty/ice/ice_acl_ctrl.c b/drivers/thirdparty/ice/ice_acl_ctrl.c index a777d215f1b9..8fce1736d553 100644 --- a/drivers/thirdparty/ice/ice_acl_ctrl.c +++ b/drivers/thirdparty/ice/ice_acl_ctrl.c @@ -4,12 +4,11 @@ #include "ice_acl.h" #include "ice_flow.h" - /* Determine the TCAM index of entry 'e' within the ACL table */ -#define ICE_ACL_TBL_TCAM_IDX(e) ((e) / ICE_AQC_ACL_TCAM_DEPTH) +#define ICE_ACL_TBL_TCAM_IDX(e) ((u8)((e) / ICE_AQC_ACL_TCAM_DEPTH)) /* Determine the entry index within the TCAM */ -#define ICE_ACL_TBL_TCAM_ENTRY_IDX(e) ((e) % ICE_AQC_ACL_TCAM_DEPTH) +#define ICE_ACL_TBL_TCAM_ENTRY_IDX(e) ((u16)((e) % ICE_AQC_ACL_TCAM_DEPTH)) #define ICE_ACL_SCEN_ENTRY_INVAL 0xFFFF @@ -74,14 +73,14 @@ ice_acl_scen_assign_entry_idx(struct ice_acl_scen *scen, * * To mark an entry available in scenario */ -static enum ice_status +static int ice_acl_scen_free_entry_idx(struct ice_acl_scen *scen, u16 idx) { if (idx >= scen->num_entry) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; if (!test_and_clear_bit(idx, scen->entry_bitmap)) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; return 0; } @@ -141,18 +140,18 @@ static u16 ice_acl_tbl_calc_end_idx(u16 start, u16 num_entries, u16 width) * * Initialize the ACL table by invalidating TCAM entries and action pairs. */ -static enum ice_status ice_acl_init_tbl(struct ice_hw *hw) +static int ice_acl_init_tbl(struct ice_hw *hw) { struct ice_aqc_actpair act_buf; struct ice_aqc_acl_data buf; - enum ice_status status = 0; struct ice_acl_tbl *tbl; + int status = 0; u8 tcam_idx, i; u16 idx; tbl = hw->acl_tbl; if (!tbl) - return ICE_ERR_CFG; + return -EIO; memset(&buf, 0, sizeof(buf)); memset(&act_buf, 0, sizeof(act_buf)); @@ -251,10 +250,8 @@ ice_acl_assign_act_mems_to_tcam(struct ice_acl_tbl *tbl, u8 cur_tcam, */ static void ice_acl_divide_act_mems_to_tcams(struct ice_acl_tbl *tbl) { - u16 num_cscd, stack_level, stack_idx, min_act_mem; - u8 tcam_idx = tbl->first_tcam; - u16 max_idx_to_get_extra; - u8 mem_idx = 0; + u16 num_cscd, stack_level, stack_idx, max_idx_to_get_extra; + u8 min_act_mem, tcam_idx = tbl->first_tcam, mem_idx = 0; /* Determine number of stacked TCAMs */ stack_level = DIV_ROUND_UP(tbl->info.depth, ICE_AQC_ACL_TCAM_DEPTH); @@ -303,20 +300,20 @@ static void ice_acl_divide_act_mems_to_tcams(struct ice_acl_tbl *tbl) * values for the size of the table, but this will need to grow as more flow * entries are added by the user level. */ -enum ice_status +int ice_acl_create_tbl(struct ice_hw *hw, struct ice_acl_tbl_params *params) { u16 width, depth, first_e, last_e, i; struct ice_aqc_acl_generic *resp_buf; struct ice_acl_alloc_tbl tbl_alloc; struct ice_acl_tbl *tbl; - enum ice_status status; + int status; if (hw->acl_tbl) - return ICE_ERR_ALREADY_EXISTS; + return -EEXIST; if (!params) - return ICE_ERR_PARAM; + return -EINVAL; /* round up the width to the next TCAM width boundary. */ width = roundup(params->width, (u16)ICE_AQC_ACL_KEY_WIDTH_BYTES); @@ -324,7 +321,8 @@ ice_acl_create_tbl(struct ice_hw *hw, struct ice_acl_tbl_params *params) depth = ALIGN(params->depth, ICE_ACL_ENTRY_ALLOC_UNIT); if (params->entry_act_pairs < width / ICE_AQC_ACL_KEY_WIDTH_BYTES) { - params->entry_act_pairs = width / ICE_AQC_ACL_KEY_WIDTH_BYTES; + params->entry_act_pairs = + (u8)(width / ICE_AQC_ACL_KEY_WIDTH_BYTES); if (params->entry_act_pairs > ICE_AQC_TBL_MAX_ACTION_PAIRS) params->entry_act_pairs = ICE_AQC_TBL_MAX_ACTION_PAIRS; @@ -333,7 +331,7 @@ ice_acl_create_tbl(struct ice_hw *hw, struct ice_acl_tbl_params *params) /* Validate that width*depth will not exceed the TCAM limit */ if ((DIV_ROUND_UP(depth, ICE_AQC_ACL_TCAM_DEPTH) * (width / ICE_AQC_ACL_KEY_WIDTH_BYTES)) > ICE_AQC_ACL_SLICES) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; memset(&tbl_alloc, 0, sizeof(tbl_alloc)); tbl_alloc.width = width; @@ -364,7 +362,7 @@ ice_acl_create_tbl(struct ice_hw *hw, struct ice_acl_tbl_params *params) tbl = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tbl), GFP_KERNEL); if (!tbl) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto out; } @@ -422,7 +420,7 @@ out: * @hw: pointer to the hardware structure * @req: info of partition being allocated */ -static enum ice_status +static int ice_acl_alloc_partition(struct ice_hw *hw, struct ice_acl_scen *req) { u16 start = 0, cnt = 0, off = 0; @@ -435,7 +433,7 @@ ice_acl_alloc_partition(struct ice_hw *hw, struct ice_acl_scen *req) /* Check if we have enough TCAMs to accommodate the width */ if (width > hw->acl_tbl->last_tcam - hw->acl_tbl->first_tcam + 1) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; /* Number of entries must be multiple of ICE_ACL_ENTRY_ALLOC_UNIT's */ r_entries = ALIGN(req->num_entry, ICE_ACL_ENTRY_ALLOC_UNIT); @@ -546,7 +544,7 @@ ice_acl_alloc_partition(struct ice_hw *hw, struct ice_acl_scen *req) } } while (!done); - return cnt >= r_entries ? ICE_SUCCESS : ICE_ERR_MAX_LIMIT; + return cnt >= r_entries ? 0 : -ENOSPC; } /** @@ -584,7 +582,7 @@ ice_acl_fill_tcam_select(struct ice_aqc_acl_scen *scen_buf, */ for (j = 0; j < ICE_AQC_ACL_KEY_WIDTH_BYTES; j++) { /* PKT DIR uses the 1st location of Byte Selection Base: + 1 */ - u8 val = ICE_AQC_ACL_BYTE_SEL_BASE + 1 + idx; + u8 val = (u8)(ICE_AQC_ACL_BYTE_SEL_BASE + 1 + idx); if (tcam_idx_in_cascade == cascade_cnt - 1) { if (j == ICE_ACL_SCEN_RNG_CHK_IDX_IN_TCAM) @@ -735,21 +733,21 @@ ice_acl_commit_partition(struct ice_hw *hw, struct ice_acl_scen *scen, * @num_entries: number of entries to be allocated for the scenario * @scen_id: holds returned scenario ID if successful */ -enum ice_status +int ice_acl_create_scen(struct ice_hw *hw, u16 match_width, u16 num_entries, u16 *scen_id) { u8 cascade_cnt, first_tcam, last_tcam, i, k; struct ice_aqc_acl_scen scen_buf; struct ice_acl_scen *scen; - enum ice_status status; + int status; if (!hw->acl_tbl) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; scen = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*scen), GFP_KERNEL); if (!scen) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; scen->start = hw->acl_tbl->first_entry; scen->width = ICE_AQC_ACL_KEY_WIDTH_BYTES * @@ -789,7 +787,7 @@ ice_acl_create_scen(struct ice_hw *hw, u16 match_width, u16 num_entries, /* set the START_SET bit at the beginning of the stack */ scen_buf.tcam_cfg[k].start_cmp_set |= ICE_AQC_ACL_ALLOC_SCE_START_SET; while (k <= last_tcam) { - u8 last_tcam_idx_cascade = cascade_cnt + k - 1; + u16 last_tcam_idx_cascade = cascade_cnt + k - 1; /* set start_cmp for the first cascaded TCAM */ scen_buf.tcam_cfg[k].start_cmp_set |= @@ -842,14 +840,14 @@ out: * @hw: pointer to the HW struct * @scen_id: ID of the remove scenario */ -static enum ice_status ice_acl_destroy_scen(struct ice_hw *hw, u16 scen_id) +static int ice_acl_destroy_scen(struct ice_hw *hw, u16 scen_id) { struct ice_acl_scen *scen, *tmp_scen; struct ice_flow_prof *p, *tmp; - enum ice_status status; + int status; if (!hw->acl_tbl) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; /* Remove profiles that use "scen_id" scenario */ list_for_each_entry_safe(p, tmp, &hw->fl_profs[ICE_BLK_ACL], l_entry) @@ -885,16 +883,16 @@ static enum ice_status ice_acl_destroy_scen(struct ice_hw *hw, u16 scen_id) * ice_acl_destroy_tbl - Destroy a previously created LEM table for ACL * @hw: pointer to the HW struct */ -enum ice_status ice_acl_destroy_tbl(struct ice_hw *hw) +int ice_acl_destroy_tbl(struct ice_hw *hw) { struct ice_acl_scen *pos_scen, *tmp_scen; struct ice_aqc_acl_generic resp_buf; struct ice_aqc_acl_scen buf; - enum ice_status status; + int status; u8 i; if (!hw->acl_tbl) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; /* Mark all the created scenario's TCAM to stop the packet lookup and * delete them afterward @@ -962,23 +960,23 @@ enum ice_status ice_acl_destroy_tbl(struct ice_hw *hw) * The "keys" and "inverts" buffers must be of the size which is the same as * the scenario's width */ -enum ice_status +int ice_acl_add_entry(struct ice_hw *hw, struct ice_acl_scen *scen, enum ice_acl_entry_prio prio, u8 *keys, u8 *inverts, struct ice_acl_act_entry *acts, u8 acts_cnt, u16 *entry_idx) { - u8 i, entry_tcam, num_cscd, offset; struct ice_aqc_acl_data buf; - enum ice_status status = 0; - u16 idx; + u8 entry_tcam, offset; + u16 i, num_cscd, idx; + int status = 0; if (!scen) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; *entry_idx = ice_acl_scen_assign_entry_idx(scen, prio); if (*entry_idx >= scen->num_entry) { *entry_idx = 0; - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; } /* Determine number of cascaded TCAMs */ @@ -999,7 +997,7 @@ ice_acl_add_entry(struct ice_hw *hw, struct ice_acl_scen *scen, * be programmed first; the TCAM entry of the leftmost TCAM * should be programmed last. */ - offset = num_cscd - i - 1; + offset = (u8)(num_cscd - i - 1); memcpy(&buf.entry_key.val, &keys[offset * sizeof(buf.entry_key.val)], sizeof(buf.entry_key.val)); @@ -1037,18 +1035,17 @@ out: * * Program a scenario's action memory */ -enum ice_status +int ice_acl_prog_act(struct ice_hw *hw, struct ice_acl_scen *scen, struct ice_acl_act_entry *acts, u8 acts_cnt, u16 entry_idx) { - u8 entry_tcam, num_cscd, i, actx_idx = 0; + u16 idx, entry_tcam, num_cscd, i, actx_idx = 0; struct ice_aqc_actpair act_buf; - enum ice_status status = 0; - u16 idx; + int status = 0; if (entry_idx >= scen->num_entry) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; memset(&act_buf, 0, sizeof(act_buf)); @@ -1085,7 +1082,7 @@ ice_acl_prog_act(struct ice_hw *hw, struct ice_acl_scen *scen, } if (!status && actx_idx < acts_cnt) - status = ICE_ERR_MAX_LIMIT; + status = -ENOSPC; return status; } @@ -1096,23 +1093,23 @@ ice_acl_prog_act(struct ice_hw *hw, struct ice_acl_scen *scen, * @scen: scenario to remove the entry from * @entry_idx: the scenario-relative index of the flow entry being removed */ -enum ice_status +int ice_acl_rem_entry(struct ice_hw *hw, struct ice_acl_scen *scen, u16 entry_idx) { struct ice_aqc_actpair act_buf; struct ice_aqc_acl_data buf; - u8 entry_tcam, num_cscd, i; - enum ice_status status = 0; - u16 idx; + u16 num_cscd, idx, i; + int status = 0; + u8 entry_tcam; if (!scen) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; if (entry_idx >= scen->num_entry) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; if (!test_bit(entry_idx, scen->entry_bitmap)) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; /* Determine number of cascaded TCAMs */ num_cscd = DIV_ROUND_UP(scen->width, ICE_AQC_ACL_KEY_WIDTH_BYTES); @@ -1123,8 +1120,8 @@ ice_acl_rem_entry(struct ice_hw *hw, struct ice_acl_scen *scen, u16 entry_idx) /* invalidate the flow entry */ memset(&buf, 0, sizeof(buf)); for (i = 0; i < num_cscd; i++) { - status = ice_aq_program_acl_entry(hw, entry_tcam + i, idx, &buf, - NULL); + status = ice_aq_program_acl_entry(hw, (u8)(entry_tcam + i), + idx, &buf, NULL); if (status) ice_debug(hw, ICE_DBG_ACL, "AQ program ACL entry failed status: %d\n", status); diff --git a/drivers/thirdparty/ice/ice_acl_main.c b/drivers/thirdparty/ice/ice_acl_main.c index 81eb056f1b32..2abc164d67d7 100644 --- a/drivers/thirdparty/ice/ice_acl_main.c +++ b/drivers/thirdparty/ice/ice_acl_main.c @@ -3,7 +3,6 @@ /* ACL support for ice */ - #include "ice.h" #include "ice_lib.h" #include "ice_flow.h" @@ -127,7 +126,6 @@ ice_acl_set_ip4_usr_seg(struct ice_flow_seg_info *seg, return 0; } - /** * ice_acl_check_input_set - Checks that a given ACL input set is valid * @pf: ice PF structure @@ -144,7 +142,6 @@ ice_acl_check_input_set(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp) struct ice_flow_seg_info *seg; enum ice_fltr_ptype fltr_type; struct ice_hw *hw = &pf->hw; - enum ice_status status; struct device *dev; int err; @@ -215,12 +212,10 @@ ice_acl_check_input_set(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp) /* Adding a profile for the given flow specification with no * actions (NULL) and zero actions 0. */ - status = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX, fltr_type, - seg, 1, NULL, 0, &prof); - if (status) { - err = ice_status_to_errno(status); + err = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX, fltr_type, seg, 1, + NULL, 0, &prof); + if (err) goto err_exit; - } hw_prof->fdir_seg[0] = seg; return 0; @@ -249,7 +244,6 @@ int ice_acl_add_rule_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) struct ice_fd_hw_prof *hw_prof; struct ice_fdir_fltr *input; enum ice_fltr_ptype flow; - enum ice_status status; struct device *dev; struct ice_pf *pf; struct ice_hw *hw; @@ -279,6 +273,13 @@ int ice_acl_add_rule_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) if (ret) goto free_input; + mutex_lock(&hw->fdir_fltr_lock); + if (ice_fdir_is_dup_fltr(hw, input)) { + ret = -EINVAL; + goto release_lock; + } + mutex_unlock(&hw->fdir_fltr_lock); + memset(&acts, 0, sizeof(acts)); act_cnt = 1; if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { @@ -296,12 +297,11 @@ int ice_acl_add_rule_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) flow = ice_ethtool_flow_to_fltr(fsp->flow_type & ~FLOW_EXT); hw_prof = hw->acl_prof[flow]; - status = ice_flow_add_entry(hw, ICE_BLK_ACL, flow, fsp->location, - vsi->idx, ICE_FLOW_PRIO_NORMAL, input, acts, - act_cnt, &entry_h); - if (status) { + ret = ice_flow_add_entry(hw, ICE_BLK_ACL, flow, fsp->location, + vsi->idx, ICE_FLOW_PRIO_NORMAL, input, acts, + act_cnt, &entry_h); + if (ret) { dev_err(dev, "Could not add flow entry %d\n", flow); - ret = ice_status_to_errno(status); goto free_input; } @@ -312,12 +312,14 @@ int ice_acl_add_rule_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) input->acl_fltr = true; /* input struct is added to the HW filter list */ + mutex_lock(&hw->fdir_fltr_lock); ice_ntuple_update_list_entry(pf, input, fsp->location); - return 0; - +release_lock: + mutex_unlock(&hw->fdir_fltr_lock); free_input: - devm_kfree(dev, input); + if (ret) + devm_kfree(dev, input); return ret; } diff --git a/drivers/thirdparty/ice/ice_adminq_cmd.h b/drivers/thirdparty/ice/ice_adminq_cmd.h index c4fc3ee344d5..8ababd5d75e9 100644 --- a/drivers/thirdparty/ice/ice_adminq_cmd.h +++ b/drivers/thirdparty/ice/ice_adminq_cmd.h @@ -8,11 +8,17 @@ * descriptor format. It is shared between Firmware and Software. */ +#include "ice_osdep.h" +#include "ice_defs.h" #define ICE_MAX_VSI 768 #define ICE_AQC_TOPO_MAX_LEVEL_NUM 0x9 #define ICE_AQ_SET_MAC_FRAME_SIZE_MAX 9728 +enum ice_aq_res_access_type { + ICE_RES_READ = 1, + ICE_RES_WRITE +}; struct ice_aqc_generic { __le32 param0; @@ -21,7 +27,6 @@ struct ice_aqc_generic { __le32 addr_low; }; - /* Get version (direct 0x0001) */ struct ice_aqc_get_ver { __le32 rom_ver; @@ -36,7 +41,6 @@ struct ice_aqc_get_ver { u8 api_patch; }; - /* Send driver version (indirect 0x0002) */ struct ice_aqc_driver_ver { u8 major_ver; @@ -48,7 +52,6 @@ struct ice_aqc_driver_ver { __le32 addr_low; }; - /* Queue Shutdown (direct 0x0003) */ struct ice_aqc_q_shutdown { u8 driver_unloading; @@ -56,8 +59,6 @@ struct ice_aqc_q_shutdown { u8 reserved[15]; }; - - /* Get Expanded Error Code (0x0005, direct) */ struct ice_aqc_get_exp_err { __le32 reason; @@ -66,7 +67,6 @@ struct ice_aqc_get_exp_err { u8 rsvd[8]; }; - /* Request resource ownership (direct 0x0008) * Release resource ownership (direct 0x0009) */ @@ -99,7 +99,6 @@ struct ice_aqc_req_res { u8 reserved[2]; }; - /* Get function capabilities (indirect 0x000A) * Get device capabilities (indirect 0x000B) */ @@ -112,7 +111,6 @@ struct ice_aqc_list_caps { __le32 addr_low; }; - /* Device/Function buffer entry, repeated per reported capability */ struct ice_aqc_list_caps_elem { __le16 cap; @@ -160,6 +158,10 @@ struct ice_aqc_list_caps_elem { #define ICE_AQC_CAPS_EXT_TOPO_DEV_IMG1 0x0082 #define ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2 0x0083 #define ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3 0x0084 +#define ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE 0x0085 +#define ICE_AQC_CAPS_NAC_TOPOLOGY 0x0087 +#define ICE_AQC_CAPS_DYN_FLATTENING 0x0090 +#define ICE_AQC_CAPS_ROCEV2_LAG 0x0092 u8 major_ver; u8 minor_ver; @@ -173,7 +175,6 @@ struct ice_aqc_list_caps_elem { __le64 rsvd2; }; - /* Manage MAC address, read command - indirect (0x0107) * This struct is also used for the response */ @@ -194,7 +195,6 @@ struct ice_aqc_manage_mac_read { __le32 addr_low; }; - /* Response buffer format for manage MAC read command */ struct ice_aqc_manage_mac_read_resp { u8 lport_num; @@ -204,7 +204,6 @@ struct ice_aqc_manage_mac_read_resp { u8 mac_addr[ETH_ALEN]; }; - /* Manage MAC address, write command - direct (0x0108) */ struct ice_aqc_manage_mac_write { u8 rsvd; @@ -221,7 +220,6 @@ struct ice_aqc_manage_mac_write { __le32 addr_low; }; - /* Clear PXE Command and response (direct 0x0110) */ struct ice_aqc_clear_pxe { u8 rx_cnt; @@ -229,7 +227,6 @@ struct ice_aqc_clear_pxe { u8 reserved[15]; }; - /* Configure No-Drop Policy Command (direct 0x0112) */ struct ice_aqc_config_no_drop_policy { u8 opts; @@ -237,7 +234,6 @@ struct ice_aqc_config_no_drop_policy { u8 rsvd[15]; }; - /* Get switch configuration (0x0200) */ struct ice_aqc_get_sw_cfg { /* Reserved for command and copy of request flags for response */ @@ -255,7 +251,6 @@ struct ice_aqc_get_sw_cfg { __le32 addr_low; }; - /* Each entry in the response buffer is of the following type: */ struct ice_aqc_get_sw_cfg_resp_elem { /* VSI/Port Number */ @@ -282,8 +277,6 @@ struct ice_aqc_get_sw_cfg_resp_elem { #define ICE_AQC_GET_SW_CONF_RESP_IS_VF BIT(15) }; - - /* Set Port parameters, (direct, 0x0203) */ struct ice_aqc_set_port_params { __le16 cmd_flags; @@ -305,7 +298,6 @@ struct ice_aqc_set_port_params { u8 reserved[10]; }; - /* These resource type defines are used for all switch resource * commands where a resource type is required, such as: * Get Resource Allocation command (indirect 0x0204) @@ -360,7 +352,6 @@ struct ice_aqc_get_res_alloc { __le32 addr_low; }; - /* Get Resource Allocation Response Buffer per response */ struct ice_aqc_get_res_resp_elem { __le16 res_type; /* Types defined above cmd 0x0204 */ @@ -370,8 +361,6 @@ struct ice_aqc_get_res_resp_elem { __le16 total_free; /* Resources un-allocated/not reserved by any PF */ }; - - /* Allocate Resources command (indirect 0x0208) * Free Resources command (indirect 0x0209) */ @@ -382,7 +371,6 @@ struct ice_aqc_alloc_free_res_cmd { __le32 addr_low; }; - /* Resource descriptor */ struct ice_aqc_res_elem { union { @@ -391,7 +379,6 @@ struct ice_aqc_res_elem { } e; }; - /* Buffer for Allocate/Free Resources commands */ struct ice_aqc_alloc_free_res_elem { __le16 res_type; /* Types defined above cmd 0x0204 */ @@ -402,7 +389,6 @@ struct ice_aqc_alloc_free_res_elem { struct ice_aqc_res_elem elem[]; }; - /* Get Allocated Resource Descriptors Command (indirect 0x020A) */ struct ice_aqc_get_allocd_res_desc { union { @@ -422,8 +408,6 @@ struct ice_aqc_get_allocd_res_desc { __le32 addr_low; }; - - /* Request buffer for Set VLAN Mode AQ command (indirect 0x020C) */ struct ice_aqc_set_vlan_mode { u8 reserved; @@ -450,7 +434,6 @@ struct ice_aqc_set_vlan_mode { u8 prot_id_reserved[30]; }; - /* Response buffer for Get VLAN Mode AQ command (indirect 0x020D) */ struct ice_aqc_get_vlan_mode { u8 vlan_mode; @@ -459,7 +442,6 @@ struct ice_aqc_get_vlan_mode { u8 reserved[98]; }; - /* Add VSI (indirect 0x0210) * Update VSI (indirect 0x0211) * Get VSI (indirect 0x0212) @@ -485,7 +467,6 @@ struct ice_aqc_add_get_update_free_vsi { __le32 addr_low; }; - /* Response descriptor for: * Add VSI (indirect 0x0210) * Update VSI (indirect 0x0211) @@ -500,7 +481,6 @@ struct ice_aqc_add_update_free_vsi_resp { __le32 addr_low; }; - struct ice_aqc_get_vsi_resp { __le16 vsi_num; u8 vf_id; @@ -514,7 +494,6 @@ struct ice_aqc_get_vsi_resp { __le32 addr_low; }; - struct ice_aqc_vsi_props { __le16 valid_sections; #define ICE_AQ_VSI_PROP_SW_VALID BIT(0) @@ -539,6 +518,7 @@ struct ice_aqc_vsi_props { #define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S 0 #define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M (0xF << ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S) #define ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA BIT(0) +#define ICE_AQ_VSI_SW_FLAG_RX_PASS_PRUNE_ENA BIT(3) #define ICE_AQ_VSI_SW_FLAG_LAN_ENA BIT(4) u8 veb_stat_id; #define ICE_AQ_VSI_SW_VEB_STAT_ID_S 0 @@ -684,7 +664,6 @@ struct ice_aqc_vsi_props { u8 reserved[24]; }; - /* Add/update mirror rule - direct (0x0260) */ #define ICE_AQC_RULE_ID_VALID_S 7 #define ICE_AQC_RULE_ID_VALID_M (0x1 << ICE_AQC_RULE_ID_VALID_S) @@ -739,7 +718,6 @@ struct ice_aqc_add_update_mir_rule { __le32 addr_low; }; - /* Delete mirror rule - direct(0x0261) */ struct ice_aqc_delete_mir_rule { __le16 rule_id; @@ -756,7 +734,6 @@ struct ice_aqc_delete_mir_rule { u8 reserved[10]; }; - /* Set/Get storm config - (direct 0x0280, 0x0281) */ /* This structure holds get storm configuration response and same structure * is used to perform set_storm_cfg @@ -791,7 +768,6 @@ struct ice_aqc_storm_cfg { __le32 reserved; }; - #define ICE_MAX_NUM_RECIPES 64 /* Add/Get Recipe (indirect 0x0290/0x0292) */ @@ -803,7 +779,6 @@ struct ice_aqc_add_get_recipe { __le32 addr_low; }; - struct ice_aqc_recipe_content { u8 rid; #define ICE_AQ_RECIPE_ID_S 0 @@ -838,7 +813,6 @@ struct ice_aqc_recipe_content { #define ICE_AQ_RECIPE_DFLT_ACT_VALID BIT(31) }; - struct ice_aqc_recipe_data_elem { u8 recipe_indx; u8 resp_bits; @@ -850,7 +824,6 @@ struct ice_aqc_recipe_data_elem { u8 rsvd2[20]; }; - /* Set/Get Recipes to Profile Association (direct 0x0291/0x0293) */ struct ice_aqc_recipe_to_profile { __le16 profile_id; @@ -858,7 +831,6 @@ struct ice_aqc_recipe_to_profile { DECLARE_BITMAP(recipe_assoc, ICE_MAX_NUM_RECIPES); }; - /* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3) */ struct ice_aqc_sw_rules { @@ -873,7 +845,6 @@ struct ice_aqc_sw_rules { __le32 addr_low; }; - /* Add/Update/Get/Remove lookup Rx/Tx command/response entry * This structures describes the lookup rules and associated actions. "index" * is returned as part of a response to a successful Add command, and can be @@ -927,6 +898,8 @@ struct ice_sw_rule_lkup_rx_tx { #define ICE_SINGLE_ACT_PTR 0x2 #define ICE_SINGLE_ACT_PTR_VAL_S 4 #define ICE_SINGLE_ACT_PTR_VAL_M (0x1FFF << ICE_SINGLE_ACT_PTR_VAL_S) + /* Bit 17 should be set if pointed action includes a FWD cmd */ +#define ICE_SINGLE_ACT_PTR_HAS_FWD BIT(17) /* Bit 18 should be set to 1 */ #define ICE_SINGLE_ACT_PTR_BIT BIT(18) @@ -959,7 +932,6 @@ struct ice_sw_rule_lkup_rx_tx { u8 hdr[]; }; - /* Add/Update/Remove large action command/response entry * "index" is returned as part of a response to a successful Add command, and * can be used to identify the action for Update/Get/Remove commands. @@ -1020,7 +992,6 @@ struct ice_sw_rule_lg_act { __le32 act[]; /* array of size for actions */ }; - /* Add/Update/Remove VSI list command/response entry * "index" is returned as part of a response to a successful Add command, and * can be used to identify the VSI list for Update/Get/Remove commands. @@ -1031,14 +1002,12 @@ struct ice_sw_rule_vsi_list { __le16 vsi[]; /* Array of number_vsi VSI numbers */ }; - /* Query VSI list command/response entry */ struct ice_sw_rule_vsi_list_query { __le16 index; DECLARE_BITMAP(vsi_list, ICE_MAX_VSI); } __packed; - /* Add switch rule response: * Content of return buffer is same as the input buffer. The status field and * LUT index are updated as part of the response @@ -1061,8 +1030,6 @@ struct ice_aqc_sw_rules_elem { } __packed pdata; }; - - /* PFC Ignore (direct 0x0301) * The command and response use the same descriptor structure */ @@ -1074,7 +1041,6 @@ struct ice_aqc_pfc_ignore { u8 reserved[14]; }; - /* Set PFC Mode (direct 0x0303) * Query PFC Mode (direct 0x0302) */ @@ -1089,7 +1055,6 @@ struct ice_aqc_set_query_pfc_mode { u8 rsvd[15]; }; - /* Set DCB Parameters (direct 0x0306) */ struct ice_aqc_set_dcb_params { u8 cmd_flags; /* unused in response */ @@ -1101,7 +1066,6 @@ struct ice_aqc_set_dcb_params { u8 rsvd[14]; }; - /* Get Default Topology (indirect 0x0400) */ struct ice_aqc_get_topo { u8 port_num; @@ -1112,6 +1076,23 @@ struct ice_aqc_get_topo { __le32 addr_low; }; +/* Get/Set Tx Topology (indirect 0x0418/0x0417) */ +struct ice_aqc_get_set_tx_topo { + u8 set_flags; +#define ICE_AQC_TX_TOPO_FLAGS_CORRER BIT(0) +#define ICE_AQC_TX_TOPO_FLAGS_SRC_RAM BIT(1) +#define ICE_AQC_TX_TOPO_FLAGS_SET_PSM BIT(2) +#define ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW BIT(4) +#define ICE_AQC_TX_TOPO_FLAGS_ISSUED BIT(5) + u8 get_flags; +#define ICE_AQC_TX_TOPO_GET_NO_UPDATE 0 +#define ICE_AQC_TX_TOPO_GET_PSM 1 +#define ICE_AQC_TX_TOPO_GET_RAM 2 + __le16 reserved1; + __le32 reserved2; + __le32 addr_high; + __le32 addr_low; +}; /* Update TSE (indirect 0x0403) * Get TSE (indirect 0x0404) @@ -1129,8 +1110,6 @@ struct ice_aqc_sched_elem_cmd { __le32 addr_low; }; - - struct ice_aqc_txsched_move_grp_info_hdr { __le32 src_parent_teid; __le32 dest_parent_teid; @@ -1139,19 +1118,16 @@ struct ice_aqc_txsched_move_grp_info_hdr { u8 reserved; }; - struct ice_aqc_move_elem { struct ice_aqc_txsched_move_grp_info_hdr hdr; __le32 teid[]; }; - struct ice_aqc_elem_info_bw { __le16 bw_profile_idx; __le16 bw_alloc; }; - struct ice_aqc_txsched_elem { u8 elem_type; /* Special field, reserved for some aq calls */ #define ICE_AQC_ELEM_TYPE_UNDEFINED 0x0 @@ -1183,41 +1159,34 @@ struct ice_aqc_txsched_elem { __le16 reserved2; }; - struct ice_aqc_txsched_elem_data { __le32 parent_teid; __le32 node_teid; struct ice_aqc_txsched_elem data; }; - struct ice_aqc_txsched_topo_grp_info_hdr { __le32 parent_teid; __le16 num_elems; __le16 reserved2; }; - struct ice_aqc_add_elem { struct ice_aqc_txsched_topo_grp_info_hdr hdr; struct ice_aqc_txsched_elem_data generic[]; }; - - struct ice_aqc_get_topo_elem { struct ice_aqc_txsched_topo_grp_info_hdr hdr; struct ice_aqc_txsched_elem_data generic[ICE_AQC_TOPO_MAX_LEVEL_NUM]; }; - struct ice_aqc_delete_elem { struct ice_aqc_txsched_topo_grp_info_hdr hdr; __le32 teid[]; }; - /* Query Port ETS (indirect 0x040E) * * This indirect command is used to query port TC node configuration. @@ -1229,7 +1198,6 @@ struct ice_aqc_query_port_ets { __le32 addr_low; }; - struct ice_aqc_port_ets_elem { u8 tc_valid_bits; u8 reserved[3]; @@ -1245,7 +1213,6 @@ struct ice_aqc_port_ets_elem { __le32 tc_node_teid[8]; /* Used for response, reserved in command */ }; - /* Rate limiting profile for * Add RL profile (indirect 0x0410) * Query RL profile (indirect 0x0411) @@ -1261,7 +1228,6 @@ struct ice_aqc_rl_profile { __le32 addr_low; }; - struct ice_aqc_rl_profile_elem { u8 level; u8 flags; @@ -1281,7 +1247,21 @@ struct ice_aqc_rl_profile_elem { __le16 rl_encode; }; +/* Config Node Attributes (indirect 0x0419) + * Query Node Attributes (indirect 0x041A) + */ +struct ice_aqc_node_attr { + __le16 num_entries; /* Number of attributes structures in the buffer */ + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; +struct ice_aqc_node_attr_elem { + __le32 node_teid; + __le16 max_children; + __le16 children_level; +}; /* Configure L2 Node CGD (indirect 0x0414) * This indirect command allows configuring a congestion domain for given L2 @@ -1294,14 +1274,12 @@ struct ice_aqc_cfg_l2_node_cgd { __le32 addr_low; }; - struct ice_aqc_cfg_l2_node_cgd_elem { __le32 node_teid; u8 cgd; u8 reserved[3]; }; - /* Query Scheduler Resource Allocation (indirect 0x0412) * This indirect command retrieves the scheduler resources allocated by * EMP Firmware to the given PF. @@ -1312,7 +1290,6 @@ struct ice_aqc_query_txsched_res { __le32 addr_low; }; - struct ice_aqc_generic_sched_props { __le16 phys_levels; __le16 logical_levels; @@ -1324,7 +1301,6 @@ struct ice_aqc_generic_sched_props { u8 rsvd1[22]; }; - struct ice_aqc_layer_props { u8 logical_layer; u8 chunk_size; @@ -1338,13 +1314,11 @@ struct ice_aqc_layer_props { u8 rsvd1[14]; }; - struct ice_aqc_query_txsched_res_resp { struct ice_aqc_generic_sched_props sched_props; struct ice_aqc_layer_props layer_props[ICE_AQC_TOPO_MAX_LEVEL_NUM]; }; - /* Query Node to Root Topology (indirect 0x0413) * This command uses ice_aqc_get_elem as its data buffer. */ @@ -1355,7 +1329,6 @@ struct ice_aqc_query_node_to_root { __le32 addr_low; }; - /* Get PHY capabilities (indirect 0x0600) */ struct ice_aqc_get_phy_caps { u8 lport_num; @@ -1364,10 +1337,11 @@ struct ice_aqc_get_phy_caps { /* 18.0 - Report qualified modules */ #define ICE_AQC_GET_PHY_RQM BIT(0) /* 18.1 - 18.3 : Report mode - * 000b - Report NVM capabilities - * 001b - Report topology capabilities - * 010b - Report SW configured - * 100b - Report default capabilities + * 000b - Report topology capabilities, without media + * 001b - Report topology capabilities, with media + * 010b - Report Active configuration + * 011b - Report PHY Type and FEC mode capabilities + * 100b - Report Default capabilities */ #define ICE_AQC_REPORT_MODE_S 1 #define ICE_AQC_REPORT_MODE_M (7 << ICE_AQC_REPORT_MODE_S) @@ -1380,7 +1354,6 @@ struct ice_aqc_get_phy_caps { __le32 addr_low; }; - /* This is #define of PHY type (Extended): * The first set of defines is for phy_type_low. */ @@ -1455,7 +1428,7 @@ struct ice_aqc_get_phy_caps { #define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2) #define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3) #define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4) -#define ICE_PHY_TYPE_HIGH_MAX_INDEX 5 +#define ICE_PHY_TYPE_HIGH_MAX_INDEX 4 struct ice_aqc_get_phy_caps_data { __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ @@ -1496,6 +1469,7 @@ struct ice_aqc_get_phy_caps_data { #define ICE_AQC_PHY_FEC_25G_RS_528_REQ BIT(2) #define ICE_AQC_PHY_FEC_25G_KR_REQ BIT(3) #define ICE_AQC_PHY_FEC_25G_RS_544_REQ BIT(4) +#define ICE_AQC_PHY_FEC_DIS BIT(5) #define ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6) #define ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7) #define ICE_AQC_PHY_FEC_MASK ICE_M(0xdf, 0) @@ -1527,7 +1501,6 @@ struct ice_aqc_get_phy_caps_data { } qual_modules[ICE_AQC_QUAL_MOD_COUNT_MAX]; }; - /* Set PHY capabilities (direct 0x0601) * NOTE: This command must be followed by setup link and restart auto-neg */ @@ -1538,7 +1511,6 @@ struct ice_aqc_set_phy_cfg { __le32 addr_low; }; - /* Set PHY config command data structure */ struct ice_aqc_set_phy_cfg_data { __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ @@ -1559,7 +1531,6 @@ struct ice_aqc_set_phy_cfg_data { u8 module_compliance_enforcement; }; - /* Set MAC Config command data structure (direct 0x0603) */ struct ice_aqc_set_mac_cfg { __le16 max_frame_size; @@ -1579,7 +1550,6 @@ struct ice_aqc_set_mac_cfg { u8 reserved[7]; }; - /* Restart AN command data structure (direct 0x0605) * Also used for response, with only the lport_num field present. */ @@ -1592,7 +1562,6 @@ struct ice_aqc_restart_an { u8 reserved2[13]; }; - /* Get link status (indirect 0x0607), also used for Link Status Event */ struct ice_aqc_get_link_status { u8 lport_num; @@ -1609,6 +1578,11 @@ struct ice_aqc_get_link_status { __le32 addr_low; }; +enum ice_get_link_status_data_version { + ICE_GET_LINK_STATUS_DATA_V1 = 1, +}; + +#define ICE_GET_LINK_STATUS_DATALEN_V1 32 /* Get link status response data structure, also used for Link Status Event */ struct ice_aqc_get_link_status_data { @@ -1701,7 +1675,6 @@ struct ice_aqc_get_link_status_data { __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */ }; - /* Set event mask command (direct 0x0613) */ struct ice_aqc_set_event_mask { u8 lport_num; @@ -1722,7 +1695,6 @@ struct ice_aqc_set_event_mask { u8 reserved1[6]; }; - /* Set PHY Loopback command (direct 0x0619) */ struct ice_aqc_set_phy_lb { u8 lport_num; @@ -1740,7 +1712,6 @@ struct ice_aqc_set_phy_lb { u8 reserved2[12]; }; - /* Set MAC Loopback command (direct 0x0620) */ struct ice_aqc_set_mac_lb { u8 lb_mode; @@ -1749,8 +1720,31 @@ struct ice_aqc_set_mac_lb { u8 reserved[15]; }; +/* Set PHY recovered clock output (direct 0x0630) */ +struct ice_aqc_set_phy_rec_clk_out { + u8 phy_output; + u8 port_num; +#define ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT 0xFF + u8 flags; +#define ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN BIT(0) + u8 rsvd; + __le32 freq; + u8 rsvd2[6]; + __le16 node_handle; +}; - +/* Get PHY recovered clock output (direct 0x0631) */ +struct ice_aqc_get_phy_rec_clk_out { + u8 phy_output; + u8 port_num; +#define ICE_AQC_GET_PHY_REC_CLK_OUT_CURR_PORT 0xFF + u8 flags; +#define ICE_AQC_GET_PHY_REC_CLK_OUT_OUT_EN BIT(0) + u8 rsvd; + __le32 freq; + u8 rsvd2[6]; + __le16 node_handle; +}; /* DNL Get Status command (indirect 0x0680) * Structure used for the response, the command uses the generic @@ -1783,7 +1777,6 @@ struct ice_aqc_dnl_get_status { __le32 addr_low; }; - struct ice_aqc_dnl_get_status_data { __le16 activity_err_code; __le16 act_err_code; @@ -1813,7 +1806,6 @@ struct ice_aqc_dnl_get_status_data { u32 sb_iosf_clk_cntr; }; - /* DNL run command (direct 0x0681) */ struct ice_aqc_dnl_run_command { u8 reserved0; @@ -1833,7 +1825,6 @@ struct ice_aqc_dnl_run_command { u8 reserved1[12]; }; - /* DNL call command (indirect 0x0682) * Struct is used for both command and response */ @@ -1846,13 +1837,11 @@ struct ice_aqc_dnl_call_command { __le32 addr_low; }; - /* DNL call command/response buffer (indirect 0x0682) */ struct ice_aqc_dnl_call { __le32 stores[4]; }; - /* Used for both commands: * DNL read sto command (indirect 0x0683) * DNL write sto command (indirect 0x0684) @@ -1869,7 +1858,6 @@ struct ice_aqc_dnl_read_write_command { __le32 addr_low; /* Used for read sto only */ }; - /* Used for both command responses: * DNL read sto response (indirect 0x0683) * DNL write sto response (indirect 0x0684) @@ -1883,7 +1871,6 @@ struct ice_aqc_dnl_read_write_response { __le32 addr_low; /* Reserved for write command */ }; - /* DNL set breakpoints command (indirect 0x0686) */ struct ice_aqc_dnl_set_breakpoints_command { __le32 reserved[2]; @@ -1891,7 +1878,6 @@ struct ice_aqc_dnl_set_breakpoints_command { __le32 addr_low; }; - /* DNL set breakpoints data buffer structure (indirect 0x0686) */ struct ice_aqc_dnl_set_breakpoints { u8 ctx; @@ -1900,7 +1886,6 @@ struct ice_aqc_dnl_set_breakpoints { __le16 activity_id; }; - /* DNL read log data command(indirect 0x0687) */ struct ice_aqc_dnl_read_log_command { __le16 reserved0; @@ -1911,7 +1896,6 @@ struct ice_aqc_dnl_read_log_command { }; - /* DNL read log data response(indirect 0x0687) */ struct ice_aqc_dnl_read_log_response { __le16 reserved; @@ -1922,7 +1906,6 @@ struct ice_aqc_dnl_read_log_response { }; - struct ice_aqc_link_topo_params { u8 lport_num; u8 lport_num_valid; @@ -1939,6 +1922,9 @@ struct ice_aqc_link_topo_params { #define ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE 6 #define ICE_AQC_LINK_TOPO_NODE_TYPE_MEZZ 7 #define ICE_AQC_LINK_TOPO_NODE_TYPE_ID_EEPROM 8 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL 9 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX 10 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_GPS 11 #define ICE_AQC_LINK_TOPO_NODE_CTX_S 4 #define ICE_AQC_LINK_TOPO_NODE_CTX_M \ (0xF << ICE_AQC_LINK_TOPO_NODE_CTX_S) @@ -1951,7 +1937,6 @@ struct ice_aqc_link_topo_params { u8 index; }; - struct ice_aqc_link_topo_addr { struct ice_aqc_link_topo_params topo_params; __le16 handle; @@ -1972,16 +1957,20 @@ struct ice_aqc_link_topo_addr { (0x1FF << ICE_AQC_LINK_TOPO_HANDLE_NODE_S) }; - /* Get Link Topology Handle (direct, 0x06E0) */ struct ice_aqc_get_link_topo { struct ice_aqc_link_topo_addr addr; u8 node_part_num; -#define ICE_ACQ_GET_LINK_TOPO_NODE_NR_PCA9575 0x21 +#define ICE_ACQ_GET_LINK_TOPO_NODE_NR_PCA9575 0x21 +#define ICE_ACQ_GET_LINK_TOPO_NODE_NR_ZL30632_80032 0x24 +#define ICE_ACQ_GET_LINK_TOPO_NODE_NR_SI5383_5384 0x25 +#define ICE_ACQ_GET_LINK_TOPO_NODE_NR_E822_PHY 0x30 +#define ICE_ACQ_GET_LINK_TOPO_NODE_NR_C827 0x31 +#define ICE_ACQ_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX 0x47 +#define ICE_ACQ_GET_LINK_TOPO_NODE_NR_GEN_GPS 0x48 u8 rsvd[9]; }; - /* Get Link Topology Pin (direct, 0x06E1) */ struct ice_aqc_get_link_topo_pin { struct ice_aqc_link_topo_addr addr; @@ -2053,13 +2042,11 @@ struct ice_aqc_i2c { u8 i2c_data[4]; /* Used only by write command, reserved in read. */ }; - /* Read I2C Response (direct, 0x06E2) */ struct ice_aqc_read_i2c_resp { u8 i2c_data[16]; }; - /* Read/Write MDIO (direct, 0x06E4/0x06E5) */ struct ice_aqc_mdio { struct ice_aqc_link_topo_addr topo_addr; @@ -2076,7 +2063,6 @@ struct ice_aqc_mdio { u8 rsvd1[4]; }; - /* Set/Get GPIO By Function (direct, 0x06E6/0x06E7) */ struct ice_aqc_gpio_by_func { struct ice_aqc_link_topo_addr topo_addr; @@ -2089,7 +2075,6 @@ struct ice_aqc_gpio_by_func { u8 rsvd[8]; }; - /* Set LED (direct, 0x06E8) */ struct ice_aqc_set_led { struct ice_aqc_link_topo_addr topo_addr; @@ -2115,7 +2100,6 @@ struct ice_aqc_set_led { u8 rsvd[9]; }; - /* Set Port Identification LED (direct, 0x06E9) */ struct ice_aqc_set_port_id_led { u8 lport_num; @@ -2127,7 +2111,6 @@ struct ice_aqc_set_port_id_led { u8 rsvd[13]; }; - /* Get Port Options (indirect, 0x06EA) */ struct ice_aqc_get_port_options { u8 lport_num; @@ -2151,7 +2134,6 @@ struct ice_aqc_get_port_options { __le32 addr_low; }; - struct ice_aqc_get_port_options_elem { u8 pmd; #define ICE_AQC_PORT_INV_PORT_OPT 4 @@ -2175,7 +2157,6 @@ struct ice_aqc_get_port_options_elem { u8 pf2port_cid[2]; }; - /* Set Port Option (direct, 0x06EB) */ struct ice_aqc_set_port_option { u8 lport_num; @@ -2185,7 +2166,6 @@ struct ice_aqc_set_port_option { u8 rsvd[13]; }; - /* Set/Get GPIO (direct, 0x06EC/0x06ED) */ struct ice_aqc_gpio { __le16 gpio_ctrl_handle; @@ -2196,7 +2176,6 @@ struct ice_aqc_gpio { u8 rsvd[12]; }; - /* Read/Write SFF EEPROM command (indirect 0x06EE) */ struct ice_aqc_sff_eeprom { u8 lport_num; @@ -2224,7 +2203,6 @@ struct ice_aqc_sff_eeprom { __le32 addr_low; }; - /* SW Set GPIO command (indirect 0x6EF) * SW Get GPIO command (indirect 0x6F0) */ @@ -2241,14 +2219,12 @@ struct ice_aqc_sw_gpio { u8 rsvd[12]; }; - /* Program Topology Device NVM (direct, 0x06F2) */ struct ice_aqc_prog_topo_dev_nvm { struct ice_aqc_link_topo_params topo_params; u8 rsvd[12]; }; - /* Read Topology Device NVM (direct, 0x06F3) */ struct ice_aqc_read_topo_dev_nvm { struct ice_aqc_link_topo_params topo_params; @@ -2257,7 +2233,6 @@ struct ice_aqc_read_topo_dev_nvm { u8 data_read[ICE_AQC_READ_TOPO_DEV_NVM_DATA_READ_SIZE]; }; - /* NVM Read command (indirect 0x0701) * NVM Erase commands (direct 0x0702) * NVM Write commands (indirect 0x0703) @@ -2289,6 +2264,12 @@ struct ice_aqc_nvm { #define ICE_AQC_NVM_PERST_FLAG 1 #define ICE_AQC_NVM_EMPR_FLAG 2 #define ICE_AQC_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */ + /* For Write Activate, several flags are sent as part of a separate + * flags2 field using a separate byte. For simplicity of the software + * interface, we pass the flags as a 16 bit value so these flags are + * all offset by 8 bits + */ +#define ICE_AQC_NVM_ACTIV_REQ_EMPR BIT(8) /* NVM Write Activate only */ __le16 module_typeid; __le16 length; #define ICE_AQC_NVM_ERASE_LEN 0xFFFF @@ -2319,7 +2300,7 @@ struct ice_aqc_nvm { #define ICE_AQC_NVM_LLDP_STATUS_RD_LEN 4 /* In Bytes */ #define ICE_AQC_NVM_MINSREV_MOD_ID 0x130 - +#define ICE_AQC_NVM_TX_TOPO_MOD_ID 0x14B /* Used for reading and writing MinSRev using 0x0701 and 0x0703. Note that the * type field is excluded from the section when reading and writing from @@ -2336,6 +2317,12 @@ struct ice_aqc_nvm_minsrev { __le16 orom_minsrev_h; }; +struct ice_aqc_nvm_tx_topo_user_sel { + __le16 length; + u8 data; +#define ICE_AQC_NVM_TX_TOPO_USER_SEL BIT(4) + u8 reserved; +}; /* Used for 0x0704 as well as for 0x0705 commands */ struct ice_aqc_nvm_cfg { @@ -2351,14 +2338,12 @@ struct ice_aqc_nvm_cfg { __le32 addr_low; }; - struct ice_aqc_nvm_cfg_data { __le16 field_id; __le16 field_options; __le16 field_value; }; - /* NVM Checksum Command (direct, 0x0706) */ struct ice_aqc_nvm_checksum { u8 flags; @@ -2370,7 +2355,6 @@ struct ice_aqc_nvm_checksum { u8 rsvd2[12]; }; - /* Used for NVM Set Package Data command - 0x070A */ struct ice_aqc_nvm_pkg_data { u8 reserved[3]; @@ -2383,7 +2367,6 @@ struct ice_aqc_nvm_pkg_data { __le32 addr_low; }; - /* Used for Pass Component Table command - 0x070B */ struct ice_aqc_nvm_pass_comp_tbl { u8 component_response; /* Response only */ @@ -2414,7 +2397,6 @@ struct ice_aqc_nvm_pass_comp_tbl { __le32 addr_low; }; - struct ice_aqc_nvm_comp_tbl { __le16 comp_class; #define NVM_COMP_CLASS_ALL_FW 0x000A @@ -2435,7 +2417,6 @@ struct ice_aqc_nvm_comp_tbl { u8 cvs[]; /* Component Version String */ } __packed; - /* * Send to PF command (indirect 0x0801) ID is only used by PF * @@ -2449,8 +2430,6 @@ struct ice_aqc_pf_vf_msg { __le32 addr_low; }; - - /* Get LLDP MIB (indirect 0x0A00) * Note: This is also used by the LLDP MIB Change Event (0x0A01) * as the format is the same. @@ -2472,29 +2451,43 @@ struct ice_aqc_lldp_get_mib { #define ICE_AQ_LLDP_TX_ACTIVE 0 #define ICE_AQ_LLDP_TX_SUSPENDED 1 #define ICE_AQ_LLDP_TX_FLUSHED 3 +/* DCBX mode */ +#define ICE_AQ_LLDP_DCBX_S 6 +#define ICE_AQ_LLDP_DCBX_M (0x3 << ICE_AQ_LLDP_DCBX_S) +#define ICE_AQ_LLDP_DCBX_NA 0 +#define ICE_AQ_LLDP_DCBX_CEE 1 +#define ICE_AQ_LLDP_DCBX_IEEE 2 /* The following bytes are reserved for the Get LLDP MIB command (0x0A00) * and in the LLDP MIB Change Event (0x0A01). They are valid for the * Get LLDP MIB (0x0A00) response only. */ - u8 reserved1; + u8 state; +#define ICE_AQ_LLDP_MIB_CHANGE_STATE_S 0 +#define ICE_AQ_LLDP_MIB_CHANGE_STATE_M \ + (0x1 << ICE_AQ_LLDP_MIB_CHANGE_STATE_S) +#define ICE_AQ_LLDP_MIB_CHANGE_EXECUTED 0 +#define ICE_AQ_LLDP_MIB_CHANGE_PENDING 1 __le16 local_len; __le16 remote_len; - u8 reserved2[2]; + u8 reserved[2]; __le32 addr_high; __le32 addr_low; }; - /* Configure LLDP MIB Change Event (direct 0x0A01) */ /* For MIB Change Event use ice_aqc_lldp_get_mib structure above */ struct ice_aqc_lldp_set_mib_change { u8 command; #define ICE_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 #define ICE_AQ_LLDP_MIB_UPDATE_DIS 0x1 +#define ICE_AQ_LLDP_MIB_PENDING_S 1 +#define ICE_AQ_LLDP_MIB_PENDING_M \ + (0x1 << ICE_AQ_LLDP_MIB_PENDING_S) +#define ICE_AQ_LLDP_MIB_PENDING_DISABLE 0 +#define ICE_AQ_LLDP_MIB_PENDING_ENABLE 1 u8 reserved[15]; }; - /* Add LLDP TLV (indirect 0x0A02) * Delete LLDP TLV (indirect 0x0A04) */ @@ -2507,7 +2500,6 @@ struct ice_aqc_lldp_add_delete_tlv { __le32 addr_low; }; - /* Update LLDP TLV (indirect 0x0A03) */ struct ice_aqc_lldp_update_tlv { u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ @@ -2519,7 +2511,6 @@ struct ice_aqc_lldp_update_tlv { __le32 addr_low; }; - /* Stop LLDP (direct 0x0A05) */ struct ice_aqc_lldp_stop { u8 command; @@ -2530,7 +2521,6 @@ struct ice_aqc_lldp_stop { u8 reserved[15]; }; - /* Start LLDP (direct 0x0A06) */ struct ice_aqc_lldp_start { u8 command; @@ -2539,7 +2529,6 @@ struct ice_aqc_lldp_start { u8 reserved[15]; }; - /* Get CEE DCBX Oper Config (0x0A07) * The command uses the generic descriptor struct and * returns the struct below as an indirect response. @@ -2570,7 +2559,6 @@ struct ice_aqc_get_cee_dcb_cfg_resp { u8 reserved[12]; }; - /* Set Local LLDP MIB (indirect 0x0A08) * Used to replace the local MIB of a given LLDP agent. e.g. DCBX */ @@ -2588,7 +2576,6 @@ struct ice_aqc_lldp_set_local_mib { __le32 addr_low; }; - struct ice_aqc_lldp_set_local_mib_resp { u8 status; #define SET_LOCAL_MIB_RESP_EVENT_M BIT(0) @@ -2597,7 +2584,6 @@ struct ice_aqc_lldp_set_local_mib_resp { u8 reserved[15]; }; - /* Stop/Start LLDP Agent (direct 0x0A09) * Used for stopping/starting specific LLDP agent. e.g. DCBX. * The same structure is used for the response, with the command field @@ -2611,7 +2597,6 @@ struct ice_aqc_lldp_stop_start_specific_agent { u8 reserved[15]; }; - /* LLDP Filter Control (direct 0x0A0A) */ struct ice_aqc_lldp_filter_ctrl { u8 cmd_flags; @@ -2624,7 +2609,6 @@ struct ice_aqc_lldp_filter_ctrl { u8 reserved2[12]; }; - /* Get/Set RSS key (indirect 0x0B04/0x0B02) */ struct ice_aqc_get_set_rss_key { #define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15) @@ -2636,7 +2620,6 @@ struct ice_aqc_get_set_rss_key { __le32 addr_low; }; - #define ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE 0x28 #define ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE 0xC #define ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE \ @@ -2658,7 +2641,6 @@ struct ice_aqc_get_set_rss_keys { u8 extended_hash_key[ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE]; }; - /* Get/Set RSS LUT (indirect 0x0B05/0x0B03) */ struct ice_aqc_get_set_rss_lut { #define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15) @@ -2694,7 +2676,6 @@ struct ice_aqc_get_set_rss_lut { __le32 addr_low; }; - /* Clear FD Table Command (direct, 0x0B06) */ struct ice_aqc_clear_fd_table { u8 clear_type; @@ -2705,7 +2686,6 @@ struct ice_aqc_clear_fd_table { u8 reserved[12]; }; - /* Sideband Control Interface Commands */ /* Neighbor Device Request (indirect 0x0C00); also used for the response. */ struct ice_aqc_neigh_dev_req { @@ -2715,7 +2695,6 @@ struct ice_aqc_neigh_dev_req { __le32 addr_low; }; - /* Allocate ACL table (indirect 0x0C10) */ #define ICE_AQC_ACL_KEY_WIDTH 40 #define ICE_AQC_ACL_KEY_WIDTH_BYTES 5 @@ -2751,7 +2730,6 @@ struct ice_aqc_acl_alloc_table { __le32 addr_low; }; - /* Allocate ACL table command buffer format */ struct ice_aqc_acl_alloc_table_data { /* Dependent table AllocIDs. Each word in this 15 word array specifies @@ -2762,7 +2740,6 @@ struct ice_aqc_acl_alloc_table_data { __le16 alloc_ids[ICE_AQC_MAX_CONCURRENT_ACL_TBL]; }; - /* Deallocate ACL table (indirect 0x0C11) * Allocate ACL action-pair (indirect 0x0C12) * Deallocate ACL action-pair (indirect 0x0C13) @@ -2779,7 +2756,6 @@ struct ice_aqc_acl_tbl_actpair { __le32 addr_low; }; - /* This response structure is same in case of alloc/dealloc table, * alloc/dealloc action-pair */ @@ -2825,7 +2801,6 @@ struct ice_aqc_acl_generic { u8 act_mem[ICE_AQC_MAX_ACTION_MEMORIES]; }; - /* Allocate ACL scenario (indirect 0x0C14). This command doesn't have separate * response buffer since original command buffer gets updated with * 'scen_id' in case of success @@ -2844,7 +2819,6 @@ struct ice_aqc_acl_alloc_scen { __le32 addr_low; }; - /* De-allocate ACL scenario (direct 0x0C15). This command doesn't need * separate response buffer since nothing to be returned as a response * except status. @@ -2854,7 +2828,6 @@ struct ice_aqc_acl_dealloc_scen { u8 reserved[14]; }; - /* Update ACL scenario (direct 0x0C1B) * Query ACL scenario (direct 0x0C23) */ @@ -2865,7 +2838,6 @@ struct ice_aqc_acl_update_query_scen { __le32 addr_low; }; - /* Input buffer format in case allocate/update ACL scenario and same format * is used for response buffer in case of query ACL scenario. * NOTE: de-allocate ACL scenario is direct command and doesn't require @@ -2906,7 +2878,6 @@ struct ice_aqc_acl_scen { u8 act_mem_cfg[ICE_AQC_MAX_ACTION_MEMORIES]; }; - /* Allocate ACL counters (indirect 0x0C16) */ struct ice_aqc_acl_alloc_counters { /* Amount of contiguous counters requested. Min value is 1 and @@ -2958,7 +2929,6 @@ struct ice_aqc_acl_alloc_counters { } ops; }; - /* De-allocate ACL counters (direct 0x0C17) */ struct ice_aqc_acl_dealloc_counters { /* first counter being released */ @@ -2972,7 +2942,6 @@ struct ice_aqc_acl_dealloc_counters { u8 reserved[10]; }; - /* De-allocate ACL resources (direct 0x0C1A). Used by SW to release all the * resources allocated for it using a single command */ @@ -2980,7 +2949,6 @@ struct ice_aqc_acl_dealloc_res { u8 reserved[16]; }; - /* Program ACL actionpair (indirect 0x0C1C) * Query ACL actionpair (indirect 0x0C25) */ @@ -2995,7 +2963,6 @@ struct ice_aqc_acl_actpair { __le32 addr_low; }; - /* Input buffer format for program/query action-pair admin command */ struct ice_acl_act_entry { /* Action priority, values must be between 0..7 */ @@ -3014,13 +2981,11 @@ struct ice_acl_act_entry { __le16 value; }; - #define ICE_ACL_NUM_ACT_PER_ACT_PAIR 2 struct ice_aqc_actpair { struct ice_acl_act_entry act[ICE_ACL_NUM_ACT_PER_ACT_PAIR]; }; - /* Generic format used to describe either input or response buffer * for admin commands related to ACL profile */ @@ -3061,7 +3026,6 @@ struct ice_aqc_acl_prof_generic_frmt { u8 pf_scenario_num[ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS]; }; - /* Program ACL profile extraction (indirect 0x0C1D) * Program ACL profile ranges (indirect 0x0C1E) * Query ACL profile (indirect 0x0C21) @@ -3074,7 +3038,6 @@ struct ice_aqc_acl_profile { __le32 addr_low; }; - /* Input buffer format for program profile extraction admin command and * response buffer format for query profile admin command is as defined * in struct ice_aqc_acl_prof_generic_frmt @@ -3099,13 +3062,11 @@ struct ice_acl_rng_data { __be16 mask; }; - struct ice_aqc_acl_profile_ranges { #define ICE_AQC_ACL_PROF_RANGES_NUM_CFG 8 struct ice_acl_rng_data checker_cfg[ICE_AQC_ACL_PROF_RANGES_NUM_CFG]; }; - /* Program ACL entry (indirect 0x0C20) * Query ACL entry (indirect 0x0C24) */ @@ -3118,7 +3079,6 @@ struct ice_aqc_acl_entry { __le32 addr_low; }; - /* Input buffer format in case of program ACL entry and response buffer format * in case of query ACL entry */ @@ -3133,7 +3093,6 @@ struct ice_aqc_acl_data { } entry_key, entry_key_invert; }; - /* Query ACL counter (direct 0x0C27) */ struct ice_aqc_acl_query_counter { /* Queried counter index */ @@ -3152,7 +3111,6 @@ struct ice_aqc_acl_query_counter { } ops; }; - /* Add Tx LAN Queues (indirect 0x0C30) */ struct ice_aqc_add_txqs { u8 num_qgrps; @@ -3162,7 +3120,6 @@ struct ice_aqc_add_txqs { __le32 addr_low; }; - /* This is the descriptor of each queue entry for the Add Tx LAN Queues * command (0x0C30). Only used within struct ice_aqc_add_tx_qgrp. */ @@ -3175,7 +3132,6 @@ struct ice_aqc_add_txqs_perq { struct ice_aqc_txsched_elem info; }; - /* The format of the command buffer for Add Tx LAN Queues (0x0C30) * is an array of the following structs. Please note that the length of * each struct ice_aqc_add_tx_qgrp is variable due @@ -3188,7 +3144,6 @@ struct ice_aqc_add_tx_qgrp { struct ice_aqc_add_txqs_perq txqs[]; }; - /* Disable Tx LAN Queues (indirect 0x0C31) */ struct ice_aqc_dis_txqs { u8 cmd_type; @@ -3211,7 +3166,6 @@ struct ice_aqc_dis_txqs { __le32 addr_low; }; - /* The buffer for Disable Tx LAN Queues (indirect 0x0C31) * contains the following structures, arrayed one after the * other. @@ -3233,8 +3187,6 @@ struct ice_aqc_dis_txq_item { __le16 q_id[]; } __packed; - - /* Tx LAN Queues Cleanup Event (0x0C31) */ struct ice_aqc_txqs_cleanup { __le16 caller_opc; @@ -3242,7 +3194,6 @@ struct ice_aqc_txqs_cleanup { u8 reserved[12]; }; - /* Move / Reconfigure Tx Queues (indirect 0x0C32) */ struct ice_aqc_move_txqs { u8 cmd_type; @@ -3263,7 +3214,6 @@ struct ice_aqc_move_txqs { __le32 addr_low; }; - /* Per-queue data buffer for the Move Tx LAN Queues command/response */ struct ice_aqc_move_txqs_elem { __le16 txq_id; @@ -3272,7 +3222,6 @@ struct ice_aqc_move_txqs_elem { __le32 q_teid; }; - /* Indirect data buffer for the Move Tx LAN Queues command/response */ struct ice_aqc_move_txqs_data { __le32 src_teid; @@ -3280,7 +3229,6 @@ struct ice_aqc_move_txqs_data { struct ice_aqc_move_txqs_elem txqs[]; }; - /* Add Tx RDMA Queue Set (indirect 0x0C33) */ struct ice_aqc_add_rdma_qset { u8 num_qset_grps; @@ -3289,7 +3237,6 @@ struct ice_aqc_add_rdma_qset { __le32 addr_low; }; - /* This is the descriptor of each qset entry for the Add Tx RDMA Queue Set * command (0x0C33). Only used within struct ice_aqc_add_rdma_qset. */ @@ -3300,7 +3247,6 @@ struct ice_aqc_add_tx_rdma_qset_entry { struct ice_aqc_txsched_elem info; }; - /* The format of the command buffer for Add Tx RDMA Queue Set(0x0C33) * is an array of the following structs. Please note that the length of * each struct ice_aqc_add_rdma_qset is variable due to the variable @@ -3313,32 +3259,30 @@ struct ice_aqc_add_rdma_qset_data { struct ice_aqc_add_tx_rdma_qset_entry rdma_qsets[]; }; - /* Move RDMA Queue Set (indirect 0x0C34) */ struct ice_aqc_move_rdma_qset_cmd { u8 num_rdma_qset; /* Used by commands and response */ +#define ICE_AQC_PF_MODE_SAME_PF 0x0 +#define ICE_AQC_PF_MODE_GIVE_OWNERSHIP 0x1 +#define ICE_AQC_PF_MODE_KEEP_OWNERSHIP 0x2 u8 flags; u8 reserved[6]; __le32 addr_high; __le32 addr_low; }; - /* Buffer */ struct ice_aqc_move_rdma_qset_buffer_desc { __le16 tx_qset_id; __le16 qset_teid; }; - struct ice_aqc_move_rdma_qset_buffer { __le32 src_parent_teid; __le32 dest_parent_teid; struct ice_aqc_move_rdma_qset_buffer_desc descs[]; }; - - /* Download Package (indirect 0x0C40) */ /* Also used for Update Package (indirect 0x0C42 and 0x0C41) */ struct ice_aqc_download_pkg { @@ -3350,7 +3294,6 @@ struct ice_aqc_download_pkg { __le32 addr_low; }; - struct ice_aqc_download_pkg_resp { __le32 error_offset; __le32 error_info; @@ -3358,7 +3301,6 @@ struct ice_aqc_download_pkg_resp { __le32 addr_low; }; - /* Get Package Info List (indirect 0x0C43) */ struct ice_aqc_get_pkg_info_list { __le32 reserved1; @@ -3367,7 +3309,6 @@ struct ice_aqc_get_pkg_info_list { __le32 addr_low; }; - /* Version format for packages */ struct ice_pkg_ver { u8 major; @@ -3376,7 +3317,6 @@ struct ice_pkg_ver { u8 draft; }; - #define ICE_PKG_NAME_SIZE 32 #define ICE_SEG_ID_SIZE 28 #define ICE_SEG_NAME_SIZE 28 @@ -3391,22 +3331,248 @@ struct ice_aqc_get_pkg_info { u8 is_modified; }; - /* Get Package Info List response buffer format (0x0C43) */ struct ice_aqc_get_pkg_info_resp { __le32 count; struct ice_aqc_get_pkg_info pkg_info[]; }; +/* Configure CGU Error Reporting (direct, 0x0C60) */ +struct ice_aqc_cfg_cgu_err { + u8 cmd; +#define ICE_AQC_CFG_CGU_EVENT_SHIFT 0 +#define ICE_AQC_CFG_CGU_EVENT_MASK BIT(ICE_AQC_CFG_CGU_EVENT_SHIFT) +#define ICE_AQC_CFG_CGU_EVENT_EN (0 << ICE_AQC_CFG_CGU_EVENT_SHIFT) +#define ICE_AQC_CFG_CGU_EVENT_DIS ICE_AQC_CFG_CGU_EVENT_MASK +#define ICE_AQC_CFG_CGU_ERR_SHIFT 1 +#define ICE_AQC_CFG_CGU_ERR_MASK BIT(ICE_AQC_CFG_CGU_ERR_SHIFT) +#define ICE_AQC_CFG_CGU_ERR_EN (0 << ICE_AQC_CFG_CGU_ERR_SHIFT) +#define ICE_AQC_CFG_CGU_ERR_DIS ICE_AQC_CFG_CGU_ERR_MASK + u8 rsvd[15]; +}; +/* CGU Error Event (direct, 0x0C60) */ +struct ice_aqc_event_cgu_err { + u8 err_type; +#define ICE_AQC_CGU_ERR_SYNCE_LOCK_LOSS BIT(0) +#define ICE_AQC_CGU_ERR_HOLDOVER_CHNG BIT(1) +#define ICE_AQC_CGU_ERR_TIMESYNC_LOCK_LOSS BIT(2) + u8 rsvd[15]; +}; +/* Get CGU abilities command response data structure (indirect 0x0C61) */ +struct ice_aqc_get_cgu_abilities { + u8 num_inputs; + u8 num_outputs; + u8 pps_dpll_idx; + u8 synce_dpll_idx; + __le32 max_in_freq; + __le32 max_in_phase_adj; + __le32 max_out_freq; + __le32 max_out_phase_adj; + u8 cgu_part_num; + u8 rsvd[3]; +}; + +#define ICE_AQC_NODE_HANDLE_VALID BIT(10) +#define ICE_AQC_NODE_HANDLE ICE_M(0x3FF, 0) +#define ICE_AQC_DRIVING_CLK_NUM_SHIFT 10 +#define ICE_AQC_DRIVING_CLK_NUM ICE_M(0x3F, ICE_AQC_DRIVING_CLK_NUM_SHIFT) + +/* Set CGU input config (direct 0x0C62) */ +struct ice_aqc_set_cgu_input_config { + u8 input_idx; + u8 flags1; +#define ICE_AQC_SET_CGU_IN_CFG_FLG1_UPDATE_FREQ BIT(6) +#define ICE_AQC_SET_CGU_IN_CFG_FLG1_UPDATE_DELAY BIT(7) + u8 flags2; +#define ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN BIT(5) +#define ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN BIT(6) + u8 rsvd; + __le32 freq; + __le32 phase_delay; + u8 rsvd2[2]; + __le16 node_handle; +}; + +/* Get CGU input config response descriptor structure (direct 0x0C63) */ +struct ice_aqc_get_cgu_input_config { + u8 input_idx; + u8 status; +#define ICE_AQC_GET_CGU_IN_CFG_STATUS_LOS BIT(0) +#define ICE_AQC_GET_CGU_IN_CFG_STATUS_SCM_FAIL BIT(1) +#define ICE_AQC_GET_CGU_IN_CFG_STATUS_CFM_FAIL BIT(2) +#define ICE_AQC_GET_CGU_IN_CFG_STATUS_GST_FAIL BIT(3) +#define ICE_AQC_GET_CGU_IN_CFG_STATUS_PFM_FAIL BIT(4) +#define ICE_AQC_GET_CGU_IN_CFG_STATUS_ESYNC_FAIL BIT(6) +#define ICE_AQC_GET_CGU_IN_CFG_STATUS_ESYNC_CAP BIT(7) + u8 type; +#define ICE_AQC_GET_CGU_IN_CFG_TYPE_READ_ONLY BIT(0) +#define ICE_AQC_GET_CGU_IN_CFG_TYPE_GPS BIT(4) +#define ICE_AQC_GET_CGU_IN_CFG_TYPE_EXTERNAL BIT(5) +#define ICE_AQC_GET_CGU_IN_CFG_TYPE_PHY BIT(6) + u8 flags1; +#define ICE_AQC_GET_CGU_IN_CFG_FLG1_PHASE_DELAY_SUPP BIT(0) +#define ICE_AQC_GET_CGU_IN_CFG_FLG1_1PPS_SUPP BIT(2) +#define ICE_AQC_GET_CGU_IN_CFG_FLG1_10MHZ_SUPP BIT(3) +#define ICE_AQC_GET_CGU_IN_CFG_FLG1_ANYFREQ BIT(7) + __le32 freq; + __le32 phase_delay; + u8 flags2; +#define ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN BIT(5) +#define ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN BIT(6) + u8 rsvd[1]; + __le16 node_handle; +}; + +/* Set CGU output config (direct 0x0C64) */ +struct ice_aqc_set_cgu_output_config { + u8 output_idx; + u8 flags; +#define ICE_AQC_SET_CGU_OUT_CFG_OUT_EN BIT(0) +#define ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN BIT(1) +#define ICE_AQC_SET_CGU_OUT_CFG_UPDATE_FREQ BIT(2) +#define ICE_AQC_SET_CGU_OUT_CFG_UPDATE_PHASE BIT(3) +#define ICE_AQC_SET_CGU_OUT_CFG_UPDATE_SRC_SEL BIT(4) + u8 src_sel; +#define ICE_AQC_SET_CGU_OUT_CFG_DPLL_SRC_SEL ICE_M(0x1F, 0) + u8 rsvd; + __le32 freq; + __le32 phase_delay; + u8 rsvd2[2]; + __le16 node_handle; +}; + +/* Get CGU output config (direct 0x0C65) */ +struct ice_aqc_get_cgu_output_config { + u8 output_idx; + u8 flags; +#define ICE_AQC_GET_CGU_OUT_CFG_OUT_EN BIT(0) +#define ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN BIT(1) +#define ICE_AQC_GET_CGU_OUT_CFG_ESYNC_ABILITY BIT(2) + u8 src_sel; +#define ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL_SHIFT 0 +#define ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL \ + ICE_M(0x1F, ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL_SHIFT) +#define ICE_AQC_GET_CGU_OUT_CFG_DPLL_MODE_SHIFT 5 +#define ICE_AQC_GET_CGU_OUT_CFG_DPLL_MODE \ + ICE_M(0x7, ICE_AQC_GET_CGU_OUT_CFG_DPLL_MODE_SHIFT) + u8 rsvd; + __le32 freq; + __le32 src_freq; + u8 rsvd2[2]; + __le16 node_handle; +}; + +/* Get CGU DPLL status (direct 0x0C66) */ +struct ice_aqc_get_cgu_dpll_status { + u8 dpll_num; + u8 ref_state; +#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_LOS BIT(0) +#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_SCM BIT(1) +#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_CFM BIT(2) +#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_GST BIT(3) +#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_PFM BIT(4) +#define ICE_AQC_GET_CGU_DPLL_STATUS_FAST_LOCK_EN BIT(5) +#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_ESYNC BIT(6) + __le16 dpll_state; +#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_LOCK BIT(0) +#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_HO BIT(1) +#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_HO_READY BIT(2) +#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_FLHIT BIT(5) +#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_PSLHIT BIT(7) +#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_CLK_REF_SHIFT 8 +#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_CLK_REF_SEL \ + ICE_M(0x1F, ICE_AQC_GET_CGU_DPLL_STATUS_STATE_CLK_REF_SHIFT) +#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_MODE_SHIFT 13 +#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_MODE \ + ICE_M(0x7, ICE_AQC_GET_CGU_DPLL_STATUS_STATE_MODE_SHIFT) + __le32 phase_offset_h; + __le32 phase_offset_l; + u8 eec_mode; +#define ICE_AQC_GET_CGU_DPLL_STATUS_EEC_MODE_1 0xA +#define ICE_AQC_GET_CGU_DPLL_STATUS_EEC_MODE_2 0xB +#define ICE_AQC_GET_CGU_DPLL_STATUS_EEC_MODE_UNKNOWN 0xF + u8 rsvd[1]; + __le16 node_handle; +}; + +/* Set CGU DPLL config (direct 0x0C67) */ +struct ice_aqc_set_cgu_dpll_config { + u8 dpll_num; + u8 ref_state; +#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_LOS BIT(0) +#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_SCM BIT(1) +#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_CFM BIT(2) +#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_GST BIT(3) +#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_PFM BIT(4) +#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_FLOCK_EN BIT(5) +#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_ESYNC BIT(6) + u8 rsvd; + u8 config; +#define ICE_AQC_SET_CGU_DPLL_CONFIG_CLK_REF_SEL ICE_M(0x1F, 0) +#define ICE_AQC_SET_CGU_DPLL_CONFIG_MODE ICE_M(0x7, 5) + u8 rsvd2[8]; + u8 eec_mode; + u8 rsvd3[1]; + __le16 node_handle; +}; + +/* Set CGU reference priority (direct 0x0C68) */ +struct ice_aqc_set_cgu_ref_prio { + u8 dpll_num; + u8 ref_idx; + u8 ref_priority; + u8 rsvd[11]; + __le16 node_handle; +}; + +/* Get CGU reference priority (direct 0x0C69) */ +struct ice_aqc_get_cgu_ref_prio { + u8 dpll_num; + u8 ref_idx; + u8 ref_priority; /* Valid only in response */ + u8 rsvd[13]; +}; + +/* Get CGU info (direct 0x0C6A) */ +struct ice_aqc_get_cgu_info { + __le32 cgu_id; + __le32 cgu_cfg_ver; + __le32 cgu_fw_ver; + u8 node_part_num; + u8 dev_rev; + __le16 node_handle; +}; + +/* Read CGU register (direct 0x0C6E) */ +struct ice_aqc_read_cgu_reg { + __le16 offset; +#define ICE_AQC_READ_CGU_REG_MAX_DATA_LEN 16 + u8 data_len; + u8 rsvd[13]; +}; + +/* Read CGU register response (direct 0x0C6E) */ +struct ice_aqc_read_cgu_reg_resp { + u8 data[ICE_AQC_READ_CGU_REG_MAX_DATA_LEN]; +}; + +/* Write CGU register (direct 0x0C6F) */ +struct ice_aqc_write_cgu_reg { + __le16 offset; +#define ICE_AQC_WRITE_CGU_REG_MAX_DATA_LEN 7 + u8 data_len; + u8 data[ICE_AQC_WRITE_CGU_REG_MAX_DATA_LEN]; + u8 rsvd[6]; +}; /* Driver Shared Parameters (direct, 0x0C90) */ struct ice_aqc_driver_shared_params { u8 set_or_get_op; #define ICE_AQC_DRIVER_PARAM_OP_MASK BIT(0) -#define ICE_AQC_DRIVER_PARAM_SET 0 -#define ICE_AQC_DRIVER_PARAM_GET 1 +#define ICE_AQC_DRIVER_PARAM_SET ((u8)0) +#define ICE_AQC_DRIVER_PARAM_GET ((u8)1) u8 param_indx; #define ICE_AQC_DRIVER_PARAM_MAX_IDX 15 u8 rsvd[2]; @@ -3420,12 +3586,13 @@ enum ice_aqc_driver_params { ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0 = 0, /* OS clock index for PTP timer Domain 1 */ ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1, + /* Request ID to recalibrate PHC logic */ + ICE_AQC_DRIVER_PARAM_PHC_RECALC, /* Add new parameters above */ ICE_AQC_DRIVER_PARAM_MAX = 16, }; - /* Lan Queue Overflow Event (direct, 0x1001) */ struct ice_aqc_event_lan_overflow { __le32 prtdcb_ruptq; @@ -3433,8 +3600,28 @@ struct ice_aqc_event_lan_overflow { u8 reserved[8]; }; - - +/* Debug Dump Internal Data (indirect 0xFF08) */ +struct ice_aqc_debug_dump_internals { + u8 cluster_id; +#define ICE_AQC_DBG_DUMP_CLUSTER_ID_SW 0 +#define ICE_AQC_DBG_DUMP_CLUSTER_ID_ACL 1 +#define ICE_AQC_DBG_DUMP_CLUSTER_ID_TXSCHED 2 +#define ICE_AQC_DBG_DUMP_CLUSTER_ID_PROFILES 3 +/* EMP_DRAM only dumpable in device debug mode */ +#define ICE_AQC_DBG_DUMP_CLUSTER_ID_EMP_DRAM 4 +#define ICE_AQC_DBG_DUMP_CLUSTER_ID_LINK 5 +/* AUX_REGS only dumpable in device debug mode */ +#define ICE_AQC_DBG_DUMP_CLUSTER_ID_AUX_REGS 6 +#define ICE_AQC_DBG_DUMP_CLUSTER_ID_DCB 7 +#define ICE_AQC_DBG_DUMP_CLUSTER_ID_L2P 8 +#define ICE_AQC_DBG_DUMP_CLUSTER_ID_QUEUE_MNG 9 +#define ICE_AQC_DBG_DUMP_CLUSTER_ID_FULL_CSR_SPACE 21 + u8 reserved; + __le16 table_id; /* Used only for non-memory clusters */ + __le32 idx; /* In table entries for tables, in bytes for memory */ + __le32 addr_high; + __le32 addr_low; +}; enum ice_aqc_fw_logging_mod { ICE_AQC_FW_LOG_ID_GENERAL = 0, @@ -3472,8 +3659,6 @@ enum ice_aqc_fw_logging_mod { ICE_AQC_FW_LOG_ID_MAX, }; - - /* Set Health Status (direct 0xFF20) */ struct ice_aqc_set_health_status_config { u8 event_source; @@ -3483,7 +3668,6 @@ struct ice_aqc_set_health_status_config { u8 reserved[15]; }; - #define ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT 0x101 #define ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE 0x102 #define ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL 0x103 @@ -3492,6 +3676,7 @@ struct ice_aqc_set_health_status_config { #define ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT 0x106 #define ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED 0x107 #define ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT 0x108 +#define ICE_AQC_HEALTH_STATUS_ERR_MOD_DIAGNOSTIC_FEATURE 0x109 #define ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG 0x10B #define ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS 0x10C #define ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE 0x10D @@ -3513,7 +3698,16 @@ struct ice_aqc_set_health_status_config { #define ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH 0x504 #define ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT 0x505 #define ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT 0x506 +#define ICE_AQC_HEALTH_STATUS_ERR_NVM_SEC_VIOLATION 0x507 +#define ICE_AQC_HEALTH_STATUS_ERR_OROM_SEC_VIOLATION 0x508 #define ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB 0x509 +#define ICE_AQC_HEALTH_STATUS_ERR_MNG_TIMEOUT 0x50A +#define ICE_AQC_HEALTH_STATUS_ERR_BMC_RESET 0x50B +#define ICE_AQC_HEALTH_STATUS_ERR_LAST_MNG_FAIL 0x50C +#define ICE_AQC_HEALTH_STATUS_ERR_RESOURCE_ALLOC_FAIL 0x50D +#define ICE_AQC_HEALTH_STATUS_ERR_FW_LOOP 0x1000 +#define ICE_AQC_HEALTH_STATUS_ERR_FW_PFR_FAIL 0x1001 +#define ICE_AQC_HEALTH_STATUS_ERR_LAST_FAIL_AQ 0x1002 /* Get Health Status codes (indirect 0xFF21) */ struct ice_aqc_get_supported_health_status_codes { @@ -3523,7 +3717,6 @@ struct ice_aqc_get_supported_health_status_codes { __le32 addr_low; }; - /* Get Health Status (indirect 0xFF22) */ struct ice_aqc_get_health_status { __le16 health_status_count; @@ -3532,7 +3725,6 @@ struct ice_aqc_get_health_status { __le32 addr_low; }; - /* Get Health Status event buffer entry, (0xFF22) * repeated per reported health status */ @@ -3547,13 +3739,11 @@ struct ice_aqc_health_status_elem { __le32 internal_data2; }; - /* Clear Health Status (direct 0xFF23) */ struct ice_aqc_clear_health_status { __le32 reserved[4]; }; - /* Set FW Logging configuration (indirect 0xFF30) * Register for FW Logging (indirect 0xFF31) * Query FW Logging (indirect 0xFF32) @@ -3588,7 +3778,6 @@ struct ice_aqc_fw_log { __le32 addr_low; }; - /* Response Buffer for: * Set Firmware Logging Configuration (0xFF30) * Query FW Logging (0xFF32) @@ -3599,7 +3788,6 @@ struct ice_aqc_fw_log_cfg_resp { u8 rsvd0; }; - /** * struct ice_aq_desc - Admin Queue (AQ) descriptor * @flags: ICE_AQ_FLAG_* flags @@ -3642,6 +3830,8 @@ struct ice_aq_desc { struct ice_aqc_get_phy_caps get_phy; struct ice_aqc_set_phy_cfg set_phy; struct ice_aqc_restart_an restart_an; + struct ice_aqc_set_phy_rec_clk_out set_phy_rec_clk_out; + struct ice_aqc_get_phy_rec_clk_out get_phy_rec_clk_out; struct ice_aqc_dnl_get_status get_status; struct ice_aqc_dnl_run_command dnl_run; struct ice_aqc_dnl_call_command dnl_call; @@ -3655,6 +3845,7 @@ struct ice_aq_desc { struct ice_aqc_mdio read_write_mdio; struct ice_aqc_gpio_by_func read_write_gpio_by_func; struct ice_aqc_gpio read_write_gpio; + struct ice_aqc_sw_gpio sw_read_write_gpio; struct ice_aqc_set_led set_led; struct ice_aqc_mdio read_mdio; struct ice_aqc_mdio write_mdio; @@ -3675,6 +3866,7 @@ struct ice_aq_desc { struct ice_aqc_cfg_l2_node_cgd cfg_l2_node_cgd; struct ice_aqc_query_port_ets port_ets; struct ice_aqc_rl_profile rl_profile; + struct ice_aqc_node_attr node_attr; struct ice_aqc_nvm nvm; struct ice_aqc_nvm_cfg nvm_cfg; struct ice_aqc_nvm_checksum nvm_checksum; @@ -3713,14 +3905,30 @@ struct ice_aq_desc { struct ice_aqc_dis_txqs dis_txqs; struct ice_aqc_move_txqs move_txqs; struct ice_aqc_add_rdma_qset add_rdma_qset; + struct ice_aqc_move_rdma_qset_cmd move_rdma_qset; struct ice_aqc_txqs_cleanup txqs_cleanup; struct ice_aqc_add_get_update_free_vsi vsi_cmd; struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res; struct ice_aqc_get_vsi_resp get_vsi_resp; struct ice_aqc_download_pkg download_pkg; struct ice_aqc_get_pkg_info_list get_pkg_info_list; + struct ice_aqc_cfg_cgu_err config_cgu_err; + struct ice_aqc_event_cgu_err cgu_err; + struct ice_aqc_set_cgu_input_config set_cgu_input_config; + struct ice_aqc_get_cgu_input_config get_cgu_input_config; + struct ice_aqc_set_cgu_output_config set_cgu_output_config; + struct ice_aqc_get_cgu_output_config get_cgu_output_config; + struct ice_aqc_get_cgu_dpll_status get_cgu_dpll_status; + struct ice_aqc_set_cgu_dpll_config set_cgu_dpll_config; + struct ice_aqc_set_cgu_ref_prio set_cgu_ref_prio; + struct ice_aqc_get_cgu_ref_prio get_cgu_ref_prio; + struct ice_aqc_get_cgu_info get_cgu_info; + struct ice_aqc_read_cgu_reg read_cgu_reg; + struct ice_aqc_read_cgu_reg_resp read_cgu_reg_resp; + struct ice_aqc_write_cgu_reg write_cgu_reg; struct ice_aqc_driver_shared_params drv_shared_params; struct ice_aqc_fw_log fw_log; + struct ice_aqc_debug_dump_internals debug_dump; struct ice_aqc_set_mac_lb set_mac_lb; struct ice_aqc_alloc_free_res_cmd sw_res_ctrl; struct ice_aqc_get_res_alloc get_res; @@ -3739,10 +3947,10 @@ struct ice_aq_desc { struct ice_aqc_clear_health_status clear_health_status; struct ice_aqc_prog_topo_dev_nvm prog_topo_dev_nvm; struct ice_aqc_read_topo_dev_nvm read_topo_dev_nvm; + struct ice_aqc_get_set_tx_topo get_set_tx_topo; } params; }; - /* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */ #define ICE_AQ_LG_BUF 512 @@ -3896,6 +4104,10 @@ enum ice_adminq_opc { ice_aqc_opc_query_node_to_root = 0x0413, ice_aqc_opc_cfg_l2_node_cgd = 0x0414, ice_aqc_opc_remove_rl_profiles = 0x0415, + ice_aqc_opc_set_tx_topo = 0x0417, + ice_aqc_opc_get_tx_topo = 0x0418, + ice_aqc_opc_cfg_node_attr = 0x0419, + ice_aqc_opc_query_node_attr = 0x041A, /* PHY commands */ ice_aqc_opc_get_phy_caps = 0x0600, @@ -3905,6 +4117,8 @@ enum ice_adminq_opc { ice_aqc_opc_get_link_status = 0x0607, ice_aqc_opc_set_event_mask = 0x0613, ice_aqc_opc_set_mac_lb = 0x0620, + ice_aqc_opc_set_phy_rec_clk_out = 0x0630, + ice_aqc_opc_get_phy_rec_clk_out = 0x0631, ice_aqc_opc_dnl_get_status = 0x0680, ice_aqc_opc_dnl_run = 0x0681, ice_aqc_opc_dnl_call = 0x0682, @@ -3964,6 +4178,7 @@ enum ice_adminq_opc { ice_aqc_opc_lldp_set_local_mib = 0x0A08, ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09, ice_aqc_opc_lldp_filter_ctrl = 0x0A0A, + ice_execute_pending_lldp_mib = 0x0A0B, /* RSS commands */ ice_aqc_opc_set_rss_key = 0x0B02, @@ -4009,11 +4224,30 @@ enum ice_adminq_opc { ice_aqc_opc_update_pkg = 0x0C42, ice_aqc_opc_get_pkg_info_list = 0x0C43, + /* 1588/SyncE commands/events */ + ice_aqc_opc_cfg_cgu_err = 0x0C60, + ice_aqc_opc_event_cgu_err = 0x0C60, + ice_aqc_opc_get_cgu_abilities = 0x0C61, + ice_aqc_opc_set_cgu_input_config = 0x0C62, + ice_aqc_opc_get_cgu_input_config = 0x0C63, + ice_aqc_opc_set_cgu_output_config = 0x0C64, + ice_aqc_opc_get_cgu_output_config = 0x0C65, + ice_aqc_opc_get_cgu_dpll_status = 0x0C66, + ice_aqc_opc_set_cgu_dpll_config = 0x0C67, + ice_aqc_opc_set_cgu_ref_prio = 0x0C68, + ice_aqc_opc_get_cgu_ref_prio = 0x0C69, + ice_aqc_opc_get_cgu_info = 0x0C6A, + ice_aqc_opc_read_cgu_reg = 0x0C6E, + ice_aqc_opc_write_cgu_reg = 0x0C6F, ice_aqc_opc_driver_shared_params = 0x0C90, /* Standalone Commands/Events */ ice_aqc_opc_event_lan_overflow = 0x1001, + + /* debug commands */ + ice_aqc_opc_debug_dump_internals = 0xFF08, + /* SystemDiagnostic commands */ ice_aqc_opc_set_health_status_config = 0xFF20, ice_aqc_opc_get_supported_health_status_codes = 0xFF21, diff --git a/drivers/thirdparty/ice/ice_arfs.c b/drivers/thirdparty/ice/ice_arfs.c index b059cd91a7e0..568225b6c2eb 100644 --- a/drivers/thirdparty/ice/ice_arfs.c +++ b/drivers/thirdparty/ice/ice_arfs.c @@ -2,6 +2,7 @@ /* Copyright (C) 2018-2021, Intel Corporation. */ #include "ice.h" +#include "ice_irq.h" /** * ice_is_arfs_active - helper to check is aRFS is active @@ -553,7 +554,7 @@ void ice_init_arfs(struct ice_vsi *vsi) if (!vsi || vsi->type != ICE_VSI_PF) return; - arfs_fltr_list = kzalloc(sizeof(*arfs_fltr_list) * ICE_MAX_ARFS_LIST, + arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list), GFP_KERNEL); if (!arfs_fltr_list) return; @@ -610,14 +611,14 @@ void ice_clear_arfs(struct ice_vsi *vsi) } /** - * ice_free_cpu_rx_rmap - free setup cpu reverse map + * ice_free_cpu_rx_rmap - free setup CPU reverse map * @vsi: the VSI to be forwarded to */ -static void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) +void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) { struct net_device *netdev; - if (!vsi || vsi->type != ICE_VSI_PF || !vsi->arfs_fltr_list) + if (!vsi || vsi->type != ICE_VSI_PF) return; netdev = vsi->netdev; @@ -629,7 +630,7 @@ static void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) } /** - * ice_set_cpu_rx_rmap - setup cpu reverse map for each queue + * ice_set_cpu_rx_rmap - setup CPU reverse map for each queue * @vsi: the VSI to be forwarded to */ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi) @@ -639,7 +640,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi) int base_idx, i; if (!vsi || vsi->type != ICE_VSI_PF) - return -EINVAL; + return 0; pf = vsi->back; netdev = vsi->netdev; @@ -656,7 +657,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi) base_idx = vsi->base_vector; for (i = 0; i < vsi->num_q_vectors; i++) if (irq_cpu_rmap_add(netdev->rx_cpu_rmap, - pf->msix_entries[base_idx + i].vector)) { + ice_get_irq_num(pf, base_idx + i))) { ice_free_cpu_rx_rmap(vsi); return -EINVAL; } @@ -676,7 +677,6 @@ void ice_remove_arfs(struct ice_pf *pf) if (!pf_vsi) return; - ice_free_cpu_rx_rmap(pf_vsi); ice_clear_arfs(pf_vsi); } @@ -693,9 +693,5 @@ void ice_rebuild_arfs(struct ice_pf *pf) return; ice_remove_arfs(pf); - if (ice_set_cpu_rx_rmap(pf_vsi)) { - dev_err(ice_pf_to_dev(pf), "Failed to rebuild aRFS\n"); - return; - } ice_init_arfs(pf_vsi); } diff --git a/drivers/thirdparty/ice/ice_arfs.h b/drivers/thirdparty/ice/ice_arfs.h index 7b9346fa73c8..bfd5a489e9eb 100644 --- a/drivers/thirdparty/ice/ice_arfs.h +++ b/drivers/thirdparty/ice/ice_arfs.h @@ -4,7 +4,7 @@ #ifndef _ICE_ARFS_H_ #define _ICE_ARFS_H_ -#include "ice.h" +#include "ice_fdir.h" enum ice_arfs_fltr_state { ICE_ARFS_INACTIVE, @@ -45,6 +45,7 @@ int ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb, u16 rxq_idx, u32 flow_id); void ice_clear_arfs(struct ice_vsi *vsi); +void ice_free_cpu_rx_rmap(struct ice_vsi *vsi); void ice_init_arfs(struct ice_vsi *vsi); void ice_sync_arfs_fltrs(struct ice_pf *pf); int ice_set_cpu_rx_rmap(struct ice_vsi *vsi); @@ -55,6 +56,7 @@ ice_is_arfs_using_perfect_flow(struct ice_hw *hw, enum ice_fltr_ptype flow_type); #else static inline void ice_clear_arfs(struct ice_vsi *vsi) { } +static inline void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) { } static inline void ice_init_arfs(struct ice_vsi *vsi) { } static inline void ice_sync_arfs_fltrs(struct ice_pf *pf) { } static inline void ice_remove_arfs(struct ice_pf *pf) { } diff --git a/drivers/thirdparty/ice/ice_base.c b/drivers/thirdparty/ice/ice_base.c index e2acab0c0777..f3cd8dc229d6 100644 --- a/drivers/thirdparty/ice/ice_base.c +++ b/drivers/thirdparty/ice/ice_base.c @@ -4,7 +4,7 @@ #include "ice_base.h" #include "ice_lib.h" #include "ice_dcb_lib.h" -#include "ice_virtchnl_pf.h" +#include "ice_sriov.h" /** * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI @@ -322,9 +322,10 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) break; case ICE_VSI_VF: /* Firmware expects vmvf_num to be absolute VF ID */ - tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id; + tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id; tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; break; + case ICE_VSI_ADI: case ICE_VSI_OFFLOAD_MACVLAN: case ICE_VSI_VMDQ2: case ICE_VSI_SWITCHDEV_CTRL: @@ -349,6 +350,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) break; } + tlan_ctx->quanta_prof_idx = ring->quanta_prof_id; tlan_ctx->tso_ena = ICE_TX_LEGACY; tlan_ctx->tso_qnum = pf_q; @@ -402,7 +404,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring) /* Strip the Ethernet CRC bytes before the packet is posted to host * memory. */ - rlan_ctx.crcstrip = ring->rx_crc_strip_dis ? 0 : 1; + rlan_ctx.crcstrip = !(ring->flags & ICE_RX_FLAGS_CRC_STRIP_DIS); /* L2TSEL flag defines the reported L2 Tags in the receive descriptor * and it needs to remain 1 for non-DVM capable configurations to not @@ -412,14 +414,25 @@ static int ice_setup_rx_ctx(struct ice_ring *ring) * be stripped in L2TAG1 of the Rx descriptor, which is where VFs will * check for the tag */ - if (ice_is_dvm_ena(hw)) - if (vsi->type == ICE_VSI_VF && - ice_vf_is_port_vlan_ena(&vsi->back->vf[vsi->vf_id])) - rlan_ctx.l2tsel = 1; - else + if (ice_is_dvm_ena(hw)) { + if (vsi->type == ICE_VSI_VF) { + struct ice_vf *vf = vsi->vf; + + if (vf && ice_vf_is_port_vlan_ena(vf)) { + rlan_ctx.l2tsel = 1; + } else if (!vf) { + WARN(1, "VF VSI %u has NULL VF pointer", + vsi->vsi_num); + rlan_ctx.l2tsel = 0; + } else { + rlan_ctx.l2tsel = 0; + } + } else { rlan_ctx.l2tsel = 0; - else + } + } else { rlan_ctx.l2tsel = 1; + } rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; @@ -454,22 +467,25 @@ static int ice_setup_rx_ctx(struct ice_ring *ring) * setting to 0x03 to ensure profile is programming if prev context is * of same priority */ - if (vsi->type != ICE_VSI_VF) - ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true); - else + switch (vsi->type) { + case ICE_VSI_ADI: + case ICE_VSI_VF: ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3, false); + break; + default: + ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true); + } /* Absolute queue number out of 2K needs to be passed */ err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); if (err) { - dev_err(ice_pf_to_dev(vsi->back), - "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", + dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", pf_q, err); return -EIO; } - if (vsi->type == ICE_VSI_VF) + if (vsi->type == ICE_VSI_VF || vsi->type == ICE_VSI_ADI) return 0; /* configure Rx buffer alignment */ @@ -499,6 +515,7 @@ int ice_vsi_cfg_rxq(struct ice_ring *ring) ring->rx_buf_len = ring->vsi->rx_buf_len; +#ifdef HAVE_XDP_BUFF_RXQ #ifdef HAVE_AF_XDP_ZC_SUPPORT if (ring->vsi->type == ICE_VSI_PF) { if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) @@ -506,7 +523,7 @@ int ice_vsi_cfg_rxq(struct ice_ring *ring) xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, ring->q_index, ring->q_vector->napi.napi_id); - ring->xsk_pool = ice_xsk_umem(ring); + ring->xsk_pool = ice_xsk_pool(ring); if (ring->xsk_pool) { xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); @@ -563,11 +580,13 @@ int ice_vsi_cfg_rxq(struct ice_ring *ring) return err; } #endif /* HAVE_AF_XDP_ZC_SUPPORT */ +#endif /* HAVE_XDP_BUFF_RXQ */ err = ice_setup_rx_ctx(ring); if (err) { - dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n", - ring->q_index, err); + ice_dev_err_errno(dev, err, + "ice_setup_rx_ctx failed for RxQ %d", + ring->q_index); return err; } @@ -714,8 +733,9 @@ err_out: while (v_idx--) ice_free_q_vector(vsi, v_idx); - dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n", - vsi->num_q_vectors, vsi->vsi_num, err); + ice_dev_err_errno(dev, err, + "Failed to allocate %d q_vector for VSI %d", + vsi->num_q_vectors, vsi->vsi_num); vsi->num_q_vectors = 0; return err; } @@ -754,9 +774,14 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { struct ice_ring *tx_ring = vsi->tx_rings[q_id]; - tx_ring->q_vector = q_vector; - tx_ring->next = q_vector->tx.ring; - q_vector->tx.ring = tx_ring; + if (tx_ring) { + tx_ring->q_vector = q_vector; + tx_ring->next = q_vector->tx.ring; + q_vector->tx.ring = tx_ring; + } else { + dev_err(ice_pf_to_dev(vsi->back), "NULL Tx ring found\n"); + break; + } } tx_rings_rem -= tx_rings_per_v; @@ -771,9 +796,14 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { struct ice_ring *rx_ring = vsi->rx_rings[q_id]; - rx_ring->q_vector = q_vector; - rx_ring->next = q_vector->rx.ring; - q_vector->rx.ring = rx_ring; + if (rx_ring) { + rx_ring->q_vector = q_vector; + rx_ring->next = q_vector->rx.ring; + q_vector->rx.ring = rx_ring; + } else { + dev_err(ice_pf_to_dev(vsi->back), "NULL Rx ring found\n"); + break; + } } rx_rings_rem -= rx_rings_per_v; } @@ -794,28 +824,28 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi) /** * ice_vsi_cfg_txq - Configure single Tx queue * @vsi: the VSI that queue belongs to - * @ring: Tx ring to be configured + * @tx_ring: Tx ring to be configured * @qg_buf: queue group buffer */ int -ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, +ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *tx_ring, struct ice_aqc_add_tx_qgrp *qg_buf) { u8 buf_len = struct_size(qg_buf, txqs, 1); struct ice_tlan_ctx tlan_ctx = { 0 }; + struct ice_channel *ch = tx_ring->ch; struct ice_aqc_add_txqs_perq *txq; - struct ice_channel *ch = ring->ch; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - enum ice_status status; + int status; u16 pf_q; u8 tc; /* Configure XPS */ - ice_cfg_xps_tx_ring(ring); + ice_cfg_xps_tx_ring(tx_ring); - pf_q = ring->reg_idx; - ice_setup_tx_ctx(ring, &tlan_ctx, pf_q); + pf_q = tx_ring->reg_idx; + ice_setup_tx_ctx(tx_ring, &tlan_ctx, pf_q); /* copy context contents into the qg_buf */ qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, @@ -824,10 +854,9 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, /* init queue specific tail reg. It is referred as * transmit comm scheduler queue doorbell. */ - ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q); - + tx_ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q); if (IS_ENABLED(CONFIG_DCB)) - tc = ring->dcb_tc; + tc = tx_ring->dcb_tc; else tc = 0; @@ -835,19 +864,22 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, * TC into the VSI Tx ring */ if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) - ring->q_handle = ice_eswitch_calc_q_handle(ring); + tx_ring->q_handle = ice_eswitch_calc_q_handle(tx_ring); else - ring->q_handle = ice_calc_q_handle(vsi, ring, tc); + tx_ring->q_handle = ice_calc_q_handle(vsi, tx_ring, tc); - status = (ch ? - ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0, - ring->q_handle, 1, qg_buf, buf_len, NULL) : - ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, - ring->q_handle, 1, qg_buf, buf_len, NULL)); + if (ch) + status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0, + tx_ring->q_handle, 1, qg_buf, buf_len, + NULL); + else + status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, + tx_ring->q_handle, 1, qg_buf, buf_len, + NULL); if (status) { - dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %s\n", - ice_stat_str(status)); - return -ENODEV; + dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n", + status); + return status; } /* Add Tx Queue TEID into the VSI Tx ring from the @@ -856,7 +888,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, */ txq = &qg_buf->txqs[0]; if (pf_q == le16_to_cpu(txq->txq_id)) - ring->txq_teid = le32_to_cpu(txq->q_teid); + tx_ring->txq_teid = le32_to_cpu(txq->q_teid); return 0; } @@ -971,7 +1003,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, struct ice_pf *pf = vsi->back; struct ice_q_vector *q_vector; struct ice_hw *hw = &pf->hw; - enum ice_status status; + int status; u32 val; /* clear cause_ena bit for disabled queues */ @@ -994,19 +1026,18 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, &txq_meta->q_id, &txq_meta->q_teid, rst_src, rel_vmvf_num, NULL); - /* if the disable queue command was exercised during an - * active reset flow, ICE_ERR_RESET_ONGOING is returned. - * This is not an error as the reset operation disables - * queues at the hardware level anyway. + /* If the disable queue command was exercised during an active reset + * flow, -EBUSY is returned. This is not an error as the reset + * operation disables queues at the hardware level anyway. */ - if (status == ICE_ERR_RESET_ONGOING) { + if (status == -EBUSY) { dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n"); - } else if (status == ICE_ERR_DOES_NOT_EXIST) { + } else if (status == -ENOENT) { dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n"); } else if (status) { - dev_err(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %s\n", - ice_stat_str(status)); - return -ENODEV; + dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n", + status); + return status; } return 0; diff --git a/drivers/thirdparty/ice/ice_base.h b/drivers/thirdparty/ice/ice_base.h index 5c83e555ef5c..c9f72d3a6c2c 100644 --- a/drivers/thirdparty/ice/ice_base.h +++ b/drivers/thirdparty/ice/ice_base.h @@ -15,7 +15,7 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi); void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi); void ice_vsi_free_q_vectors(struct ice_vsi *vsi); int -ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, +ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *tx_ring, struct ice_aqc_add_tx_qgrp *qg_buf); void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector); void diff --git a/drivers/thirdparty/ice/ice_bst_tcam.c b/drivers/thirdparty/ice/ice_bst_tcam.c new file mode 100644 index 000000000000..6f39573e7263 --- /dev/null +++ b/drivers/thirdparty/ice/ice_bst_tcam.c @@ -0,0 +1,297 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#include "ice_common.h" +#include "ice_parser_util.h" + +#define ICE_BST_TCAM_TABLE_SIZE 256 + +static void _bst_np_kb_dump(struct ice_hw *hw, struct ice_np_keybuilder *kb) +{ + dev_info(ice_hw_to_dev(hw), "next proto key builder:\n"); + dev_info(ice_hw_to_dev(hw), "\tops = %d\n", kb->ops); + dev_info(ice_hw_to_dev(hw), "\tstart_or_reg0 = %d\n", + kb->start_or_reg0); + dev_info(ice_hw_to_dev(hw), "\tlen_or_reg1 = %d\n", kb->len_or_reg1); +} + +static void _bst_pg_kb_dump(struct ice_hw *hw, struct ice_pg_keybuilder *kb) +{ + dev_info(ice_hw_to_dev(hw), "parse graph key builder:\n"); + dev_info(ice_hw_to_dev(hw), "\tflag0_ena = %d\n", kb->flag0_ena); + dev_info(ice_hw_to_dev(hw), "\tflag1_ena = %d\n", kb->flag1_ena); + dev_info(ice_hw_to_dev(hw), "\tflag2_ena = %d\n", kb->flag2_ena); + dev_info(ice_hw_to_dev(hw), "\tflag3_ena = %d\n", kb->flag3_ena); + dev_info(ice_hw_to_dev(hw), "\tflag0_idx = %d\n", kb->flag0_idx); + dev_info(ice_hw_to_dev(hw), "\tflag1_idx = %d\n", kb->flag1_idx); + dev_info(ice_hw_to_dev(hw), "\tflag2_idx = %d\n", kb->flag2_idx); + dev_info(ice_hw_to_dev(hw), "\tflag3_idx = %d\n", kb->flag3_idx); + dev_info(ice_hw_to_dev(hw), "\talu_reg_idx = %d\n", kb->alu_reg_idx); +} + +static void _bst_alu_dump(struct ice_hw *hw, struct ice_alu *alu, int index) +{ + dev_info(ice_hw_to_dev(hw), "alu%d:\n", index); + dev_info(ice_hw_to_dev(hw), "\topc = %d\n", alu->opc); + dev_info(ice_hw_to_dev(hw), "\tsrc_start = %d\n", alu->src_start); + dev_info(ice_hw_to_dev(hw), "\tsrc_len = %d\n", alu->src_len); + dev_info(ice_hw_to_dev(hw), "\tshift_xlate_select = %d\n", + alu->shift_xlate_select); + dev_info(ice_hw_to_dev(hw), "\tshift_xlate_key = %d\n", + alu->shift_xlate_key); + dev_info(ice_hw_to_dev(hw), "\tsrc_reg_id = %d\n", alu->src_reg_id); + dev_info(ice_hw_to_dev(hw), "\tdst_reg_id = %d\n", alu->dst_reg_id); + dev_info(ice_hw_to_dev(hw), "\tinc0 = %d\n", alu->inc0); + dev_info(ice_hw_to_dev(hw), "\tinc1 = %d\n", alu->inc1); + dev_info(ice_hw_to_dev(hw), "\tproto_offset_opc = %d\n", + alu->proto_offset_opc); + dev_info(ice_hw_to_dev(hw), "\tproto_offset = %d\n", + alu->proto_offset); + dev_info(ice_hw_to_dev(hw), "\tbranch_addr = %d\n", alu->branch_addr); + dev_info(ice_hw_to_dev(hw), "\timm = %d\n", alu->imm); + dev_info(ice_hw_to_dev(hw), "\tdst_start = %d\n", alu->dst_start); + dev_info(ice_hw_to_dev(hw), "\tdst_len = %d\n", alu->dst_len); + dev_info(ice_hw_to_dev(hw), "\tflags_extr_imm = %d\n", + alu->flags_extr_imm); + dev_info(ice_hw_to_dev(hw), "\tflags_start_imm= %d\n", + alu->flags_start_imm); +} + +/** + * ice_bst_tcam_dump - dump a boost tcam info + * @hw: pointer to the hardware structure + * @item: boost tcam to dump + */ +void ice_bst_tcam_dump(struct ice_hw *hw, struct ice_bst_tcam_item *item) +{ + int i; + + dev_info(ice_hw_to_dev(hw), "address = %d\n", item->address); + dev_info(ice_hw_to_dev(hw), "key :"); + for (i = 0; i < 20; i++) + dev_info(ice_hw_to_dev(hw), "%02x ", item->key[i]); + dev_info(ice_hw_to_dev(hw), "\n"); + dev_info(ice_hw_to_dev(hw), "key_inv:"); + for (i = 0; i < 20; i++) + dev_info(ice_hw_to_dev(hw), "%02x ", item->key_inv[i]); + dev_info(ice_hw_to_dev(hw), "\n"); + dev_info(ice_hw_to_dev(hw), "hit_idx_grp = %d\n", item->hit_idx_grp); + dev_info(ice_hw_to_dev(hw), "pg_pri = %d\n", item->pg_pri); + _bst_np_kb_dump(hw, &item->np_kb); + _bst_pg_kb_dump(hw, &item->pg_kb); + _bst_alu_dump(hw, &item->alu0, 0); + _bst_alu_dump(hw, &item->alu1, 1); + _bst_alu_dump(hw, &item->alu2, 2); +} + +/** The function parses a 96 bits ALU entry with below format: + * BIT 0-5: Opcode (alu->opc) + * BIT 6-13: Source Start (alu->src_start) + * BIT 14-18: Source Length (alu->src_len) + * BIT 19: Shift/Xlate Select (alu->shift_xlate_select) + * BIT 20-23: Shift/Xlate Key (alu->shift_xlate_key) + * BIT 24-30: Source Register ID (alu->src_reg_id) + * BIT 31-37: Dest. Register ID (alu->dst_reg_id) + * BIT 38: Inc0 (alu->inc0) + * BIT 39: Inc1:(alu->inc1) + * BIT 40:41 Protocol Offset Opcode (alu->proto_offset_opc) + * BIT 42:49 Protocol Offset (alu->proto_offset) + * BIT 50:57 Branch Address (alu->branch_addr) + * BIT 58:73 Immediate (alu->imm) + * BIT 74 Dedicated Flags Enable (alu->dedicate_flags_ena) + * BIT 75:80 Dest. Start (alu->dst_start) + * BIT 81:86 Dest. Length (alu->dst_len) + * BIT 87 Flags Extract Imm. (alu->flags_extr_imm) + * BIT 88:95 Flags Start/Immediate (alu->flags_start_imm) + * + * NOTE: the first 7 bits are skipped as the start bit is not + * byte aligned. + */ +static void _bst_alu_init(struct ice_alu *alu, u8 *data) +{ + u64 d64 = *(u64 *)data >> 7; + + alu->opc = (enum ice_alu_opcode)(d64 & 0x3f); + alu->src_start = (u8)((d64 >> 6) & 0xff); + alu->src_len = (u8)((d64 >> 14) & 0x1f); + alu->shift_xlate_select = ((d64 >> 19) & 0x1) != 0; + alu->shift_xlate_key = (u8)((d64 >> 20) & 0xf); + alu->src_reg_id = (u8)((d64 >> 24) & 0x7f); + alu->dst_reg_id = (u8)((d64 >> 31) & 0x7f); + alu->inc0 = ((d64 >> 38) & 0x1) != 0; + alu->inc1 = ((d64 >> 39) & 0x1) != 0; + alu->proto_offset_opc = (u8)((d64 >> 40) & 0x3); + alu->proto_offset = (u8)((d64 >> 42) & 0xff); + + d64 = *(u64 *)(&data[6]) >> 9; + + alu->branch_addr = (u8)(d64 & 0xff); + alu->imm = (u16)((d64 >> 8) & 0xffff); + alu->dedicate_flags_ena = ((d64 >> 24) & 0x1) != 0; + alu->dst_start = (u8)((d64 >> 25) & 0x3f); + alu->dst_len = (u8)((d64 >> 31) & 0x3f); + alu->flags_extr_imm = ((d64 >> 37) & 0x1) != 0; + alu->flags_start_imm = (u8)((d64 >> 38) & 0xff); +} + +/** The function parses a 35 bits Parse Graph Key Build with below format: + * BIT 0: Flag 0 Enable (kb->flag0_ena) + * BIT 1-6: Flag 0 Index (kb->flag0_idx) + * BIT 7: Flag 1 Enable (kb->flag1_ena) + * BIT 8-13: Flag 1 Index (kb->flag1_idx) + * BIT 14: Flag 2 Enable (kb->flag2_ena) + * BIT 15-20: Flag 2 Index (kb->flag2_idx) + * BIT 21: Flag 3 Enable (kb->flag3_ena) + * BIT 22-27: Flag 3 Index (kb->flag3_idx) + * BIT 28-34: ALU Register Index (kb->alu_reg_idx) + */ +static void _bst_pgkb_init(struct ice_pg_keybuilder *kb, u64 data) +{ + kb->flag0_ena = (data & 0x1) != 0; + kb->flag0_idx = (u8)((data >> 1) & 0x3f); + kb->flag1_ena = ((data >> 7) & 0x1) != 0; + kb->flag1_idx = (u8)((data >> 8) & 0x3f); + kb->flag2_ena = ((data >> 14) & 0x1) != 0; + kb->flag2_idx = (u8)((data >> 15) & 0x3f); + kb->flag3_ena = ((data >> 21) & 0x1) != 0; + kb->flag3_idx = (u8)((data >> 22) & 0x3f); + kb->alu_reg_idx = (u8)((data >> 28) & 0x7f); +} + +/** The function parses a 18 bits Next Protocol Key Build with below format: + * BIT 0-1: Opcode kb->ops + * BIT 2-9: Start / Reg 0 (kb->start_or_reg0) + * BIT 10-17: Length / Reg 1 (kb->len_or_reg1) + */ +static void _bst_npkb_init(struct ice_np_keybuilder *kb, u32 data) +{ + kb->ops = (u8)(data & 0x3); + kb->start_or_reg0 = (u8)((data >> 2) & 0xff); + kb->len_or_reg1 = (u8)((data >> 10) & 0xff); +} + +/** The function parses a 704 bits Boost TCAM entry with below format: + * BIT 0-15: Address (ti->address) + * BIT 16-31: reserved + * BIT 32-191: Key (ti->key) + * BIT 192-351:Key Invert (ti->key_inv) + * BIT 352-359:Boost Hit Index Group (ti->hit_idx_grp) + * BIT 360-361:PG Priority (ti->pg_pri) + * BIT 362-379:Next Proto Key Build (ti->np_kb) + * BIT 380-414:PG Key Build (ti->pg_kb) + * BIT 415-510:ALU 0 (ti->alu0) + * BIT 511-606:ALU 1 (ti->alu1) + * BIT 607-702:ALU 2 (ti->alu2) + * BIT 703: reserved + */ +static void _bst_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int size) +{ + struct ice_bst_tcam_item *ti = (struct ice_bst_tcam_item *)item; + u8 *buf = (u8 *)data; + int i; + + ti->address = *(u16 *)buf; + + for (i = 0; i < 20; i++) + ti->key[i] = buf[4 + i]; + for (i = 0; i < 20; i++) + ti->key_inv[i] = buf[24 + i]; + ti->hit_idx_grp = buf[44]; + ti->pg_pri = buf[45] & 0x3; + _bst_npkb_init(&ti->np_kb, *(u32 *)&buf[45] >> 2); + _bst_pgkb_init(&ti->pg_kb, *(u64 *)&buf[47] >> 4); + _bst_alu_init(&ti->alu0, &buf[51]); + _bst_alu_init(&ti->alu1, &buf[63]); + _bst_alu_init(&ti->alu2, &buf[75]); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_bst_tcam_dump(hw, ti); +} + +/** + * ice_bst_tcam_table_get - create a boost tcam table + * @hw: pointer to the hardware structure + */ +struct ice_bst_tcam_item *ice_bst_tcam_table_get(struct ice_hw *hw) +{ + return (struct ice_bst_tcam_item *) + ice_parser_create_table(hw, ICE_SID_RXPARSER_BOOST_TCAM, + sizeof(struct ice_bst_tcam_item), + ICE_BST_TCAM_TABLE_SIZE, + ice_parser_sect_item_get, + _bst_parse_item, true); +} + +static void _parse_lbl_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int size) +{ + ice_parse_item_dflt(hw, idx, item, data, size); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_lbl_dump(hw, (struct ice_lbl_item *)item); +} + +/** + * ice_bst_lbl_table_get - create a boost label table + * @hw: pointer to the hardware structure + */ +struct ice_lbl_item *ice_bst_lbl_table_get(struct ice_hw *hw) +{ + return (struct ice_lbl_item *) + ice_parser_create_table(hw, ICE_SID_LBL_RXPARSER_TMEM, + sizeof(struct ice_lbl_item), + ICE_BST_TCAM_TABLE_SIZE, + ice_parser_sect_item_get, + _parse_lbl_item, true); +} + +/** + * ice_bst_tcam_match - match a pattern on the boost tcam table + * @tcam_table: boost tcam table to search + * @pat: pattern to match + */ +struct ice_bst_tcam_item * +ice_bst_tcam_match(struct ice_bst_tcam_item *tcam_table, u8 *pat) +{ + int i; + + for (i = 0; i < ICE_BST_TCAM_TABLE_SIZE; i++) { + struct ice_bst_tcam_item *item = &tcam_table[i]; + + if (item->hit_idx_grp == 0) + continue; + if (ice_ternary_match(item->key, item->key_inv, pat, 20)) + return item; + } + + return NULL; +} + +static bool _start_with(const char *prefix, const char *string) +{ + int len1 = strlen(prefix); + int len2 = strlen(string); + + if (len2 < len1) + return false; + + return !memcmp(prefix, string, len1); +} + +struct ice_bst_tcam_item * +ice_bst_tcam_search(struct ice_bst_tcam_item *tcam_table, + struct ice_lbl_item *lbl_table, + const char *prefix, u16 *start) +{ + u16 i = *start; + + for (; i < ICE_BST_TCAM_TABLE_SIZE; i++) { + if (_start_with(prefix, lbl_table[i].label)) { + *start = i; + return &tcam_table[lbl_table[i].idx]; + } + } + + return NULL; +} diff --git a/drivers/thirdparty/ice/ice_bst_tcam.h b/drivers/thirdparty/ice/ice_bst_tcam.h new file mode 100644 index 000000000000..b9b5a2c1cddc --- /dev/null +++ b/drivers/thirdparty/ice/ice_bst_tcam.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_BST_TCAM_H_ +#define _ICE_BST_TCAM_H_ + +#include "ice_imem.h" + +struct ice_bst_tcam_item { + u16 address; + u8 key[20]; + u8 key_inv[20]; + u8 hit_idx_grp; + u8 pg_pri; + struct ice_np_keybuilder np_kb; + struct ice_pg_keybuilder pg_kb; + struct ice_alu alu0; + struct ice_alu alu1; + struct ice_alu alu2; +}; + +void ice_bst_tcam_dump(struct ice_hw *hw, struct ice_bst_tcam_item *item); + +struct ice_bst_tcam_item *ice_bst_tcam_table_get(struct ice_hw *hw); + +struct ice_lbl_item *ice_bst_lbl_table_get(struct ice_hw *hw); + +struct ice_bst_tcam_item * +ice_bst_tcam_match(struct ice_bst_tcam_item *tcam_table, u8 *pat); +struct ice_bst_tcam_item * +ice_bst_tcam_search(struct ice_bst_tcam_item *tcam_table, + struct ice_lbl_item *lbl_table, + const char *prefix, u16 *start); +#endif /*_ICE_BST_TCAM_H_ */ diff --git a/drivers/thirdparty/ice/ice_cgu.h b/drivers/thirdparty/ice/ice_cgu.h deleted file mode 100644 index a9c215a9df92..000000000000 --- a/drivers/thirdparty/ice/ice_cgu.h +++ /dev/null @@ -1,231 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (C) 2018-2021, Intel Corporation. */ - -#ifndef _ICE_CGU_H_ -#define _ICE_CGU_H_ - -#include -#include "ice_cgu_regs.h" - -/* CGU mux identifier - * Specifies a mux within the CGU block. - */ -enum ice_cgu_mux_sel { - /* CGU reference clock source (DWORD10_SYNCE_S_REF_CLK) */ - ICE_CGU_MUX_SEL_REF_CLK, - /* CGU bypass clock source (DWORD11_SYNCE_S_BYP_CLK) */ - ICE_CGU_MUX_SEL_BYPASS_CLK, - /* CGU ETHCLKO pin source (DWORD10_SYNCE_ETHCLKO_SEL) */ - ICE_CGU_MUX_SEL_ETHCLKO, - /* CGU CLKO pin source (DWORD10_SYNCE_CLKO_SEL) */ - ICE_CGU_MUX_SEL_CLKO, - - NUM_ICE_CGU_MUX_SEL -}; - -/* CGU reference clock specification - * Specifies the source for the CGU reference/bypass clock. - */ -enum ice_cgu_clk_src { - /* network reference clock 0 */ - ICE_CGU_CLK_SRC_NET_REF_CLK0, - /* network reference clock 1 */ - ICE_CGU_CLK_SRC_NET_REF_CLK1, - /* 1588 recovered clock */ - ICE_CGU_CLK_SRC_1588_RECOVERED_CLK, - /* recovered clock from phys port 0 */ - ICE_CGU_CLK_SRC_SYNCE_CLK_0, - /* recovered clock from phys port 1 */ - ICE_CGU_CLK_SRC_SYNCE_CLK_1, - /* recovered clock from phys port 2 */ - ICE_CGU_CLK_SRC_SYNCE_CLK_2, - /* recovered clock from phys port 3 */ - ICE_CGU_CLK_SRC_SYNCE_CLK_3, - /* recovered clock from phys port 4 */ - ICE_CGU_CLK_SRC_SYNCE_CLK_4, - /* recovered clock from phys port 5 */ - ICE_CGU_CLK_SRC_SYNCE_CLK_5, - /* recovered clock from phys port 6 */ - ICE_CGU_CLK_SRC_SYNCE_CLK_6, - /* recovered clock from phys port 7 */ - ICE_CGU_CLK_SRC_SYNCE_CLK_7, - NUM_ICE_CGU_CLK_SRC -}; - -/* Sources for ETHCLKO pin */ -enum ice_cgu_ethclko_sel { - /* DPLL reference clock 0 input divided by ETHDIV */ - ICE_CGU_ETHCLKO_SEL_REF_CLK_BYP0_DIV, - /* DPLL reference clock 1 input divided by ETHDIV */ - ICE_CGU_ETHCLKO_SEL_REF_CLK_BYP1_DIV, - /* DPLL output clock divided by ETHDIV */ - ICE_CGU_ETHCLKO_SEL_CLK_PLL_25000_DIV, - /* JAPLL output clock divided by ETHDIV */ - ICE_CGU_ETHCLKO_SEL_CLK_JAPLL_625000_DIV, - /* DPLL reference clock 0 input */ - ICE_CGU_ETHCLKO_SEL_REF_CLK_BYP0, - /* DPLL reference clock 1 input */ - ICE_CGU_ETHCLKO_SEL_REF_CLK_BYP1, - /* DPLL output clock */ - ICE_CGU_ETHCLKO_SEL_CLK_PLL_25000, - ICE_CGU_ETHCLKO_SEL_CLK_JAPLL_625000, - - NUM_ICE_CGU_ETHCLKO_SEL -}; - -#define ICE_CGU_ETHCLKO_SEL_NRCKI ICE_CGU_ETHCLKO_SEL_REF_CLK_BYP1 - -/* Sources for CLKO pin */ -enum ice_cgu_clko_sel { - /* DPLL reference clock 0 input divided by CLKODIV */ - ICE_CGU_CLKO_SEL_REF_CLK_BYP0_DIV, - /* DPLL reference clock 1 input divided by CLKODIV */ - ICE_CGU_CLKO_SEL_REF_CLK_BYP1_DIV, - /* DPLL core clock divided by CLKODIV */ - ICE_CGU_CLKO_SEL_CLK_SYS_DIV, - /* JAPLL output clock divided by CLKODIV */ - ICE_CGU_CLKO_SEL_CLK_JAPLL_625000_DIV, - /* DPLL reference clock 0 input */ - ICE_CGU_CLKO_SEL_REF_CLK_BYP0, - /* DPLL reference clock 1 input */ - ICE_CGU_CLKO_SEL_REF_CLK_BYP1, - - /* 1.544 MHz, NRCP divider output */ - ICE_CGU_CLKO_SEL_CLK_1544 = 8, - /* 2.048 MHz, NRCP divider output */ - ICE_CGU_CLKO_SEL_CLK_2048 = 9, - - NUM_ICE_CGU_CLKO_SEL -}; - -#define ICE_CGU_CLKO_SEL_NRCKI ICE_CGU_CLKO_SEL_REF_CLK_BYP1 - -/* TIME_REF source selection */ -enum ice_cgu_time_ref_sel { - ICE_CGU_TIME_REF_SEL_TCXO, /* Use TCXO source */ - ICE_CGU_TIME_REF_SEL_TIME_REF, /* Use TIME_REF source */ - - NUM_ICE_CGU_TIME_REF_SEL -}; - -/* Macro to convert an enum ice_time_ref_freq to a string for printing */ -#define ICE_TIME_REF_FREQ_TO_STR(__trf) \ - ({ \ - enum ice_time_ref_freq _trf = (__trf); \ - (_trf) == ICE_TIME_REF_FREQ_25_000 ? "25 MHz" : \ - (_trf) == ICE_TIME_REF_FREQ_122_880 ? "122.88 MHz" : \ - (_trf) == ICE_TIME_REF_FREQ_125_000 ? "125 MHz" : \ - (_trf) == ICE_TIME_REF_FREQ_153_600 ? "153.6 MHz" : \ - (_trf) == ICE_TIME_REF_FREQ_156_250 ? "156.25 MHz" : \ - (_trf) == ICE_TIME_REF_FREQ_245_760 ? "245.76 MHz" : \ - "invalid"; \ - }) - -/* Macro to convert an enum ice_cgu_time_ref_sel to a string for printing */ -#define ICE_TIME_REF_SEL_TO_STR(__trs) \ - ({ \ - enum ice_cgu_time_ref_sel _trs = (__trs); \ - (_trs) == ICE_CGU_TIME_REF_SEL_TCXO ? "TCXO" : \ - (_trs) == ICE_CGU_TIME_REF_SEL_TIME_REF ? "TIME_REF" : \ - "invalid"; \ - }) -/* Macro to convert an enum ice_src_tmr_mode to a string for printing */ -#define ICE_SRC_TMR_MODE_TO_STR(__mtm) \ - ({ \ - enum ice_src_tmr_mode _mtm = (__mtm); \ - (_mtm) == ICE_SRC_TMR_MODE_NANOSECONDS ? "nanoseconds" : \ - (_mtm) == ICE_SRC_TMR_MODE_LOCKED ? "locked" : \ - "invalid"; \ - }) - -/* DPLL select */ -enum ice_cgu_dpll_select { - /* DPLL (DPLL1) */ - ICE_CGU_DPLL_SELECT_TRANSPORT, - /* EEC DPLL (DPLL2), 0x098 Hz BW */ - ICE_CGU_DPLL_SELECT_EEC_RELAXED_BW, - - NUM_ICE_CGU_DPLL_SELECT -}; - -/* DPLL holdover mode */ -enum ice_cgu_dpll_holdover_mode { - /* previous acquired frequency */ - ICE_CGU_DPLL_HOLDOVER_MODE_ACQUIRED_FREQ, - /* local frequency (free run) */ - ICE_CGU_DPLL_HOLDOVER_MODE_LOCAL_FREQ, - - NUM_ICE_CGU_DPLL_HOLDOVER_MODE -}; - -/* DPLL configuration parameters */ -struct ice_cgu_dpll_cfg { - /* CGU reference clock frequency */ - enum ice_time_ref_freq ref_freq; - /* select DPLL */ - enum ice_cgu_dpll_select dpll_sel; - /* enable holdover feature support */ - u32 holdover_support; - /* select holdover mode */ - enum ice_cgu_dpll_holdover_mode holdover_mode; -}; - -enum ice_japll_ref_freq { - ICE_CGU_JAPLL_REF_FREQ_25_000, /* 25 MHz */ - ICE_CGU_JAPLL_REF_FREQ_156_250, /* 156.25 MHz */ - - NUM_ICE_CGU_JAPLL_REF_FREQ -}; - -/* Mux configuration parameters */ -struct ice_cgu_mux_cfg { - /* reference clock source select */ - enum ice_cgu_clk_src ref_clk_src; - /* bypass clock source select */ - enum ice_cgu_clk_src byp_clk_src; - /* ETHCLKO pin source select */ - enum ice_cgu_ethclko_sel eth_clk_out; - /* CLKO pin source select */ - enum ice_cgu_clko_sel clk_out; - /* CLKO programmable divider */ - __u8 clk_out_div; - /* ETHCLKO programmable divider */ - __u8 eth_clk_out_div; - /* bypass DPLL */ - u32 bypass; - /* tie refClk to ground (force holdover mode) */ - u32 ref_clk_gnd_ena; -}; - -/* CGU event was triggered by SyncE loss of lock */ -#define ICE_CGU_EVENT_ERR_SYNCE_LOCK_LOSS 0x1 - -/* CGU event was triggered by SyncE holdover change */ -#define ICE_CGU_EVENT_ERR_HOLDOVER_CHNG 0x2 - -/* CGU event was triggered by timestamp PLL loss of lock */ -#define ICE_CGU_EVENT_ERR_TIMESYNC_LOCK_LOSS 0x4 - - -struct ice_cgu_info { - struct ice_cgu_dpll_cfg dpll_cfg; - struct ice_cgu_mux_cfg mux_cfg; - enum ice_japll_ref_freq japll_ref_freq; - wait_queue_head_t wq_head; - - /* used to synchronize waiters (only one at a time) */ - struct mutex event_mutex; - - u32 event_occurred; - u8 err_type; - u8 unlock_event; - - /* current state of 1588 output to CGU */ - u8 out_1588_enabled; - enum ice_time_ref_freq out_1588_ref_freq; - - enum ice_time_ref_freq time_ref_freq; - enum ice_src_tmr_mode src_tmr_mode; -}; - -#endif /* _ICE_CGU_H_ */ diff --git a/drivers/thirdparty/ice/ice_cgu_ops.c b/drivers/thirdparty/ice/ice_cgu_ops.c deleted file mode 100644 index cb7a3ce8605b..000000000000 --- a/drivers/thirdparty/ice/ice_cgu_ops.c +++ /dev/null @@ -1,248 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2018-2021, Intel Corporation. */ - -#include "ice.h" - -/** - * ice_cgu_cfg_ts_pll - Configure the TS PLL - * @pf: Board private structure - * @enable: True to enable TS PLL - * @time_ref_freq: primary timer frequency - * @time_ref_sel: Time source - * @src_tmr_mode: primary timer mode - */ -int ice_cgu_cfg_ts_pll(struct ice_pf *pf, bool enable, enum ice_time_ref_freq time_ref_freq, - enum ice_cgu_time_ref_sel time_ref_sel, enum ice_src_tmr_mode src_tmr_mode) -{ - struct ice_cgu_info *cgu_info = &pf->cgu_info; - union tspll_ro_bwm_lf bwm_lf; - union nac_cgu_dword19 dw19; - union nac_cgu_dword22 dw22; - union nac_cgu_dword24 dw24; - union nac_cgu_dword9 dw9; - int err; - - dev_dbg(ice_pf_to_dev(pf), "Requested %s, time_ref_freq %s, time_ref_sel %s, src_tmr_mode %s\n", - enable ? "enable" : "disable", - ICE_TIME_REF_FREQ_TO_STR(time_ref_freq), - ICE_TIME_REF_SEL_TO_STR(time_ref_sel), - ICE_SRC_TMR_MODE_TO_STR(src_tmr_mode)); - - if (time_ref_freq >= NUM_ICE_TIME_REF_FREQ) { - dev_err(ice_pf_to_dev(pf), "Invalid TIME_REF freq %u\n", time_ref_freq); - return -EIO; - } - - if (time_ref_sel >= NUM_ICE_CGU_TIME_REF_SEL) { - dev_err(ice_pf_to_dev(pf), "Invalid TIME_REF sel %u\n", time_ref_sel); - return -EIO; - } - - if (src_tmr_mode >= NUM_ICE_SRC_TMR_MODE) { - dev_err(ice_pf_to_dev(pf), "Invalid src_tmr_mode %u\n", src_tmr_mode); - return -EIO; - } - - if (time_ref_sel == ICE_CGU_TIME_REF_SEL_TCXO && - time_ref_freq != ICE_TIME_REF_FREQ_25_000) { - dev_err(ice_pf_to_dev(pf), - "TS PLL source specified as TCXO but specified frequency is not 25 MHz\n"); - return -EIO; - } - - err = ice_cgu_reg_read(pf, NAC_CGU_DWORD9, &dw9.val); - if (!err) - err = ice_cgu_reg_read(pf, NAC_CGU_DWORD24, &dw24.val); - if (!err) - err = ice_cgu_reg_read(pf, TSPLL_RO_BWM_LF, &bwm_lf.val); - if (err) - return err; - - dev_dbg(ice_pf_to_dev(pf), - "Before change, %s, time_ref_freq %s, time_ref_sel %s, PLL %s\n", - dw24.field.ts_pll_enable ? "enabled" : "disabled", - ICE_TIME_REF_FREQ_TO_STR(dw9.field.time_ref_freq_sel), - ICE_TIME_REF_SEL_TO_STR(dw24.field.time_ref_sel), - bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked"); - - if (!enable) { - if (dw24.field.ts_pll_enable) { - dw24.field.ts_pll_enable = 0; - err = ice_cgu_reg_write(pf, NAC_CGU_DWORD24, dw24.val); - if (!err) - ice_cgu_usleep(1); - } - /* don't need to update the freq, sel, or mode; that'll happen - * when the PLL is re-enabled - */ - return err; - } - - /* TS PLL must be disabled before changing freq or src */ - if (dw24.field.ts_pll_enable && (dw9.field.time_ref_freq_sel != time_ref_freq || - dw24.field.time_ref_sel != time_ref_sel)) { - dev_err(ice_pf_to_dev(pf), - "Can't adjust time_ref_freq or time_ref_sel while TS PLL is enabled\n"); - return -EIO; - } - - /* set frequency, configure TS PLL params, and enable the TS PLL */ - err = ice_cgu_reg_read(pf, NAC_CGU_DWORD19, &dw19.val); - if (!err) - err = ice_cgu_reg_read(pf, NAC_CGU_DWORD22, &dw22.val); - if (!err) { - dw9.field.time_ref_freq_sel = time_ref_freq; - dw19.field.tspll_fbdiv_intgr = tspll_per_rate_params[time_ref_freq].feedback_div; - dw19.field.tspll_ndivratio = 1; - dw22.field.time1588clk_div = tspll_per_rate_params[time_ref_freq].post_pll_div; - dw22.field.time1588clk_sel_div2 = 0; - dw24.field.ref1588_ck_div = tspll_per_rate_params[time_ref_freq].refclk_pre_div; - dw24.field.tspll_fbdiv_frac = tspll_per_rate_params[time_ref_freq].frac_n_div; - dw24.field.time_ref_sel = time_ref_sel; - err = ice_cgu_reg_write(pf, NAC_CGU_DWORD9, dw9.val); - } - if (!err) - err = ice_cgu_reg_write(pf, NAC_CGU_DWORD19, dw19.val); - if (!err) - err = ice_cgu_reg_write(pf, NAC_CGU_DWORD22, dw22.val); - /* first write dw24 with updated values but still not enabled */ - if (!err) - err = ice_cgu_reg_write(pf, NAC_CGU_DWORD24, dw24.val); - /* now enable the TS_PLL */ - if (!err) { - dw24.field.ts_pll_enable = 1; - err = ice_cgu_reg_write(pf, NAC_CGU_DWORD24, dw24.val); - } - - if (!err) { - cgu_info->time_ref_freq = time_ref_freq; - cgu_info->src_tmr_mode = src_tmr_mode; - err = ice_ptp_update_incval(pf, time_ref_freq, src_tmr_mode); - if (err) { - dev_err(ice_pf_to_dev(pf), "Failed to update INCVAL\n"); - return err; - } - } - - /* to check for lock, wait 1 ms; if it hasn't locked by then, it's not - * going to lock - */ - if (!err) { - ice_cgu_usleep(1000); - err = ice_cgu_reg_read(pf, TSPLL_RO_BWM_LF, &bwm_lf.val); - } - if (!err && bwm_lf.field.plllock_true_lock_cri) { - dev_dbg(ice_pf_to_dev(pf), - "TS PLL successfully locked, time_ref_freq %s, time_ref_sel %s\n", - ICE_TIME_REF_FREQ_TO_STR(time_ref_freq), - ICE_TIME_REF_SEL_TO_STR(time_ref_sel)); - - /* update state to indicate no unlock event since last lock */ - cgu_info->unlock_event = false; - } else { - dev_err(ice_pf_to_dev(pf), "TS PLL failed to lock\n"); - err = -EFAULT; - } - - return err; -} - -/** - * ice_cgu_init_state - Initialize CGU HW - * @pf: Board private structure - * - * Read CGU registers, initialize internal state, and lock the timestamp PLL using the parameters - * read from the soft straps. - */ -void ice_cgu_init_state(struct ice_pf *pf) -{ - union tspll_cntr_bist_settings tspll_cntr_bist; - struct ice_cgu_info *cgu_info = &pf->cgu_info; - union nac_cgu_dword10 dw10; - union nac_cgu_dword11 dw11; - union nac_cgu_dword12 dw12; - union nac_cgu_dword14 dw14; - union nac_cgu_dword24 dw24; - union nac_cgu_dword9 dw9; - int err; - - init_waitqueue_head(&cgu_info->wq_head); - mutex_init(&cgu_info->event_mutex); - - err = ice_cgu_reg_read(pf, NAC_CGU_DWORD9, &dw9.val); - if (!err) - err = ice_cgu_reg_read(pf, NAC_CGU_DWORD10, &dw10.val); - if (!err) - err = ice_cgu_reg_read(pf, NAC_CGU_DWORD11, &dw11.val); - if (!err) - err = ice_cgu_reg_read(pf, NAC_CGU_DWORD12, &dw12.val); - if (!err) - err = ice_cgu_reg_read(pf, NAC_CGU_DWORD14, &dw14.val); - if (!err) - err = ice_cgu_reg_read(pf, NAC_CGU_DWORD24, &dw24.val); - if (!err) - err = ice_cgu_reg_read(pf, TSPLL_CNTR_BIST_SETTINGS, &tspll_cntr_bist.val); - if (err) - goto err; - - /* Note that the TIME_SYNC, TIME_REF, and ONE_PPS_OUT pins are enabled - * through soft straps. - */ - /* Mux config */ - cgu_info->mux_cfg.ref_clk_src = dw10.field.synce_s_ref_clk; - cgu_info->mux_cfg.byp_clk_src = dw11.field.synce_s_byp_clk; - cgu_info->mux_cfg.eth_clk_out = dw10.field.synce_ethclko_sel; - cgu_info->mux_cfg.clk_out = dw10.field.synce_clko_sel; - cgu_info->mux_cfg.clk_out_div = dw10.field.synce_clkodiv_m1; - cgu_info->mux_cfg.eth_clk_out_div = dw10.field.synce_ethdiv_m1; - cgu_info->mux_cfg.bypass = dw12.field.synce_dpll_byp; - cgu_info->mux_cfg.ref_clk_gnd_ena = dw10.field.synce_sel_gnd; - - /* Timestamp PLL config */ - /* Disable sticky lock detection so lock status reported is accurate */ - tspll_cntr_bist.field.i_plllock_sel_0 = 0; - tspll_cntr_bist.field.i_plllock_sel_1 = 0; - err = ice_cgu_reg_write(pf, TSPLL_CNTR_BIST_SETTINGS, tspll_cntr_bist.val); - - /* Assume the 1588 output to CGU isn't configured; require the app to reconfigure it before - * using it - */ - if (!err) - cgu_info->out_1588_enabled = false; - - /* first, try to lock the timestamp PLL with the parameters from the soft straps */ - /* disable first, then re-enable with correct parameters */ - err = ice_cgu_cfg_ts_pll(pf, false, dw9.field.time_ref_freq_sel, dw24.field.time_ref_sel, - ICE_SRC_TMR_MODE_NANOSECONDS); - if (err) - dev_err(ice_pf_to_dev(pf), "Failed to disable TS PLL\n"); - else - err = ice_cgu_cfg_ts_pll(pf, true, dw9.field.time_ref_freq_sel, - dw24.field.time_ref_sel, ICE_SRC_TMR_MODE_NANOSECONDS); - if (err) { - /* if that fails, try to lock the timestamp PLL with the TCXO - */ - dev_info(ice_pf_to_dev(pf), - "Unable to lock TS PLL with soft straps settings; trying TCXO\n"); - - /* disable first, then re-enable with correct parameters */ - err = ice_cgu_cfg_ts_pll(pf, false, ICE_TIME_REF_FREQ_25_000, - ICE_CGU_TIME_REF_SEL_TCXO, - ICE_SRC_TMR_MODE_NANOSECONDS); - if (err) - dev_err(ice_pf_to_dev(pf), "Failed to disable TS PLL with TCXO\n"); - else - err = ice_cgu_cfg_ts_pll(pf, true, ICE_TIME_REF_FREQ_25_000, - ICE_CGU_TIME_REF_SEL_TCXO, - ICE_SRC_TMR_MODE_NANOSECONDS); - if (err) { - dev_err(ice_pf_to_dev(pf), "Failed to lock TS PLL with TCXO\n"); - goto err; - } - } - - dev_info(ice_pf_to_dev(pf), "CGU init successful\n"); - return; -err: - dev_err(ice_pf_to_dev(pf), "CGU init failed, err=%d\n", err); -} diff --git a/drivers/thirdparty/ice/ice_cgu_ops.h b/drivers/thirdparty/ice/ice_cgu_ops.h deleted file mode 100644 index 9ba1ad7d939f..000000000000 --- a/drivers/thirdparty/ice/ice_cgu_ops.h +++ /dev/null @@ -1,121 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (C) 2018-2021, Intel Corporation. */ - -#ifndef _ICE_CGU_OPS_H_ -#define _ICE_CGU_OPS_H_ - -#define ICE_CGU_LOCK_CHECK_DELAY_USEC 256000 /* 256 msec */ - -/* fast mode lock check settings */ -#define ICE_CGU_EDPLL_FAST_LOCK_DELAY_LOOPS 239 /* 60 seconds total */ -#define ICE_CGU_TDPLL_FAST_LOCK_DELAY_LOOPS 25 /* 5 seconds total */ - -/* normal mode lock check settings */ -#define ICE_CGU_EDPLL_NORMAL_LOCK_DELAY_LOOPS 52 /* 12 seconds total */ -#define ICE_CGU_TDPLL_NORMAL_LOCK_DELAY_LOOPS 13 /* 2 seconds total */ - -/* number of consecutive locks to declare DPLL lock */ -#define ICE_CGU_DPLL_LOCK_COUNT 5 - -#define ICE_CGU_CORE_CLOCK_MHZ 800 -#define ICE_CGU_DPLL_FREQ_MHZ 25 - -/* DPLL lock/unlock threshold */ -#define ICE_CGU_TRANSPORT_DPLL_LOCK_THRESHOLD_800MHZ 0x2D8 -#define ICE_CGU_TRANSPORT_DPLL_UNLOCK_THRESHOLD_800MHZ 0x3640 -#define ICE_CGU_ECC_DPLL_LOCK_THRESHOLD_800MHZ 0x5A -#define ICE_CGU_ECC_DPLL_UNLOCK_THRESHOLD_800MHZ 0x21E8 - -/* time to hold enable bits low to perform a JAPLL reset */ -#define ICE_CGU_JAPLL_RESET_TIME_USEC 1 - -/* LCPLL lock alone (FDPLL disabled) should take < 10 usec */ -#define ICE_CGU_LCPLL_LOCK_CHECK_DELAY_USEC 1 -#define ICE_CGU_LCPLL_LOCK_DELAY_LOOPS 10 - -/* FDPLL lock time in fast mode is around 500 msec; - * use poll interval of 100ms, max poll time 5 seconds - * (max poll time was originally 2 seconds, increased - * to 5 to avoid occasional poll timeouts.) - */ -#define ICE_CGU_FDPLL_LOCK_CHECK_DELAY_USEC 100000 -#define ICE_CGU_FDPLL_LOCK_DELAY_LOOPS 50 -#define ICE_CGU_FDPLL_ACQ_TOGGLE_LOOPS 2 - - -/* valid values for enum ice_cgu_clko_sel */ -#define ICE_CGU_CLKO_SEL_VALID_BITMAP \ - (BIT(ICE_CGU_CLKO_SEL_REF_CLK_BYP0_DIV) | \ - BIT(ICE_CGU_CLKO_SEL_REF_CLK_BYP1_DIV) | \ - BIT(ICE_CGU_CLKO_SEL_CLK_SYS_DIV) | \ - BIT(ICE_CGU_CLKO_SEL_CLK_JAPLL_625000_DIV) | \ - BIT(ICE_CGU_CLKO_SEL_REF_CLK_BYP0) | \ - BIT(ICE_CGU_CLKO_SEL_REF_CLK_BYP1) | \ - BIT(ICE_CGU_CLKO_SEL_CLK_1544) | \ - BIT(ICE_CGU_CLKO_SEL_CLK_2048)) - -/* Only FW can read NAC_CGU_DWORD8 where these are defined, so they are exposed - * to the driver stack via soft straps in the misc24 field of NAC_CGU_DWORD9. - */ -#define MISC24_BIT_TCXO_FREQ_SEL_M BIT(0) -#define MISC24_BIT_TCXO_SEL_M BIT(4) - -/* internal structure definitions */ - -enum ice_cgu_sample_rate { - ICE_CGU_SAMPLE_RATE_8K = 0, /* 8 KHz sample rate */ - ICE_CGU_SAMPLE_RATE_10K, /* 10 KHz sample rate */ - ICE_CGU_SAMPLE_RATE_12K5, /* 12.5 KHz sample rate */ - - NUM_ICE_CGU_SAMPLE_RATE -}; - -struct ice_cgu_div_rat_m1 { - u32 ref_clk_rate; /* reference clock rate in kHz */ - u32 div_rat_m1; /* div_rat_m1 value */ -}; - -struct ice_cgu_dpll_params { - enum ice_cgu_dpll_select dpll_select; - enum ice_cgu_sample_rate sample_rate; - u32 mul_rat_m1; - u32 scale; - u32 gain; -}; - -struct ice_cgu_dpll_per_rate_params { - u32 rate_hz; - enum ice_cgu_sample_rate sample_rate; - u32 div_rat_m1; - u32 synce_rat_sel; -}; - -struct ice_cgu_lcpll_per_rate_params { - u32 refclk_pre_div; - u32 feedback_div; - u32 frac_n_div; - u32 post_pll_div; -}; - -#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) -/* Function to init internal state */ -void ice_cgu_init_state(struct ice_pf *pf); -/* Function to configure TS PLL */ -int -ice_cgu_cfg_ts_pll(struct ice_pf *pf, bool enable, enum ice_time_ref_freq time_ref_freq, - enum ice_cgu_time_ref_sel time_ref_sel, - enum ice_src_tmr_mode src_tmr_mode); -#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ -static inline void ice_cgu_init_state(struct ice_pf *pf) { } -#if IS_ENABLED(CONFIG_DEBUG_FS) -static inline int -ice_cgu_cfg_ts_pll(struct ice_pf __always_unused *pf, bool __always_unused enable, - enum ice_time_ref_freq __always_unused time_ref_freq, - enum ice_cgu_time_ref_sel __always_unused time_ref_sel, - enum ice_src_tmr_mode __always_unused src_tmr_mode) -{ - return 0; -} -#endif /* IS_ENABLED(CONFIG_DEBUG_FS) */ -#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ -#endif /* _ICE_CGU_OPS_H_ */ diff --git a/drivers/thirdparty/ice/ice_cgu_regs.h b/drivers/thirdparty/ice/ice_cgu_regs.h index a58fc697e6f5..57abd52386d0 100644 --- a/drivers/thirdparty/ice/ice_cgu_regs.h +++ b/drivers/thirdparty/ice/ice_cgu_regs.h @@ -4,101 +4,7 @@ #ifndef _ICE_CGU_REGS_H_ #define _ICE_CGU_REGS_H_ -#include "ice_osdep.h" - -#define NAC_CGU_DWORD8 0x20 -#define NAC_CGU_DWORD8_TCXO_FREQ_SEL_S 0 -#define NAC_CGU_DWORD8_TCXO_FREQ_SEL_M BIT(0) -#define NAC_CGU_DWORD8_MISC8_S 1 -#define NAC_CGU_DWORD8_MISC8_M ICE_M(0x7, 1) -#define NAC_CGU_DWORD8_HLP_SWITCH_FREQ_SEL_S 4 -#define NAC_CGU_DWORD8_HLP_SWITCH_FREQ_SEL_M ICE_M(0xf, 4) -#define NAC_CGU_DWORD8_CGUPLL_NDIVRATIO_S 8 -#define NAC_CGU_DWORD8_CGUPLL_NDIVRATIO_M ICE_M(0xf, 8) -#define NAC_CGU_DWORD8_CGUPLL_IREF_NDIVRATIO_S 12 -#define NAC_CGU_DWORD8_CGUPLL_IREF_NDIVRATIO_M ICE_M(0x7, 12) -#define NAC_CGU_DWORD8_MISC28_S 15 -#define NAC_CGU_DWORD8_MISC28_M BIT(15) -#define NAC_CGU_DWORD8_HLPPLL_NDIVRATIO_S 16 -#define NAC_CGU_DWORD8_HLPPLL_NDIVRATIO_M ICE_M(0xf, 16) -#define NAC_CGU_DWORD8_HLPPLL_IREF_NDIVRATIO_S 20 -#define NAC_CGU_DWORD8_HLPPLL_IREF_NDIVRATIO_M ICE_M(0x7, 20) -#define NAC_CGU_DWORD8_MISC29_S 23 -#define NAC_CGU_DWORD8_MISC29_M BIT(23) -#define NAC_CGU_DWORD8_CLK_EREF1_EN_SELFBIAS_S 24 -#define NAC_CGU_DWORD8_CLK_EREF1_EN_SELFBIAS_M BIT(24) -#define NAC_CGU_DWORD8_CLK_EREF0_EN_SELFBIAS_S 25 -#define NAC_CGU_DWORD8_CLK_EREF0_EN_SELFBIAS_M BIT(25) -#define NAC_CGU_DWORD8_TIME_REF_EN_SELFBIAS_S 26 -#define NAC_CGU_DWORD8_TIME_REF_EN_SELFBIAS_M BIT(26) -#define NAC_CGU_DWORD8_TIME_SYNC_EN_SELFBIAS_S 27 -#define NAC_CGU_DWORD8_TIME_SYNC_EN_SELFBIAS_M BIT(27) -#define NAC_CGU_DWORD8_CLK_REF_SYNC_E_EN_SELFBIAS_S 28 -#define NAC_CGU_DWORD8_CLK_REF_SYNC_E_EN_SELFBIAS_M BIT(28) -#define NAC_CGU_DWORD8_NET_CLK_REF1_EN_SELFBIAS_S 29 -#define NAC_CGU_DWORD8_NET_CLK_REF1_EN_SELFBIAS_M BIT(29) -#define NAC_CGU_DWORD8_NET_CLK_REF0_EN_SELFBIAS_S 30 -#define NAC_CGU_DWORD8_NET_CLK_REF0_EN_SELFBIAS_M BIT(30) -#define NAC_CGU_DWORD8_TCXO_SEL_S 31 -#define NAC_CGU_DWORD8_TCXO_SEL_M BIT(31) - -union nac_cgu_dword8 { - struct { - u32 tcxo_freq_sel : 1; - u32 misc8 : 3; - u32 hlp_switch_freq_sel : 4; - u32 cgupll_ndivratio : 4; - u32 cgupll_iref_ndivratio : 3; - u32 misc28 : 1; - u32 hlppll_ndivratio : 4; - u32 hlppll_iref_ndivratio : 3; - u32 misc29 : 1; - u32 clk_eref1_en_selfbias : 1; - u32 clk_eref0_en_selfbias : 1; - u32 time_ref_en_selfbias : 1; - u32 time_sync_en_selfbias : 1; - u32 clk_ref_sync_e_en_selfbias : 1; - u32 net_clk_ref1_en_selfbias : 1; - u32 net_clk_ref0_en_selfbias : 1; - u32 tcxo_sel : 1; - } field; - u32 val; -}; - #define NAC_CGU_DWORD9 0x24 -#define NAC_CGU_DWORD9_TIME_REF_FREQ_SEL_S 0 -#define NAC_CGU_DWORD9_TIME_REF_FREQ_SEL_M ICE_M(0x7, 0) -#define NAC_CGU_DWORD9_CLK_EREF1_EN_S 3 -#define NAC_CGU_DWORD9_CLK_EREF1_EN_M BIT(3) -#define NAC_CGU_DWORD9_CLK_EREF0_EN_S 4 -#define NAC_CGU_DWORD9_CLK_EREF0_EN_M BIT(4) -#define NAC_CGU_DWORD9_TIME_REF_EN_S 5 -#define NAC_CGU_DWORD9_TIME_REF_EN_M BIT(5) -#define NAC_CGU_DWORD9_TIME_SYNC_EN_S 6 -#define NAC_CGU_DWORD9_TIME_SYNC_EN_M BIT(6) -#define NAC_CGU_DWORD9_ONE_PPS_OUT_EN_S 7 -#define NAC_CGU_DWORD9_ONE_PPS_OUT_EN_M BIT(7) -#define NAC_CGU_DWORD9_CLK_REF_SYNCE_EN_S 8 -#define NAC_CGU_DWORD9_CLK_REF_SYNCE_EN_M BIT(8) -#define NAC_CGU_DWORD9_CLK_SYNCE1_EN_S 9 -#define NAC_CGU_DWORD9_CLK_SYNCE1_EN_M BIT(9) -#define NAC_CGU_DWORD9_CLK_SYNCE0_EN_S 10 -#define NAC_CGU_DWORD9_CLK_SYNCE0_EN_M BIT(10) -#define NAC_CGU_DWORD9_NET_CLK_REF1_EN_S 11 -#define NAC_CGU_DWORD9_NET_CLK_REF1_EN_M BIT(11) -#define NAC_CGU_DWORD9_NET_CLK_REF0_EN_S 12 -#define NAC_CGU_DWORD9_NET_CLK_REF0_EN_M BIT(12) -#define NAC_CGU_DWORD9_CLK_SYNCE1_AMP_S 13 -#define NAC_CGU_DWORD9_CLK_SYNCE1_AMP_M ICE_M(0x3, 13) -#define NAC_CGU_DWORD9_MISC6_S 15 -#define NAC_CGU_DWORD9_MISC6_M BIT(15) -#define NAC_CGU_DWORD9_CLK_SYNCE0_AMP_S 16 -#define NAC_CGU_DWORD9_CLK_SYNCE0_AMP_M ICE_M(0x3, 16) -#define NAC_CGU_DWORD9_ONE_PPS_OUT_AMP_S 18 -#define NAC_CGU_DWORD9_ONE_PPS_OUT_AMP_M ICE_M(0x3, 18) -#define NAC_CGU_DWORD9_MISC24_S 20 -#define NAC_CGU_DWORD9_MISC24_M ICE_M(0xfff, 20) - union nac_cgu_dword9 { struct { u32 time_ref_freq_sel : 3; @@ -121,311 +27,7 @@ union nac_cgu_dword9 { u32 val; }; -#define NAC_CGU_DWORD10 0x28 -#define NAC_CGU_DWORD10_JA_PLL_ENABLE_S 0 -#define NAC_CGU_DWORD10_JA_PLL_ENABLE_M BIT(0) -#define NAC_CGU_DWORD10_MISC11_S 1 -#define NAC_CGU_DWORD10_MISC11_M BIT(1) -#define NAC_CGU_DWORD10_FDPLL_ENABLE_S 2 -#define NAC_CGU_DWORD10_FDPLL_ENABLE_M BIT(2) -#define NAC_CGU_DWORD10_FDPLL_SLOW_S 3 -#define NAC_CGU_DWORD10_FDPLL_SLOW_M BIT(3) -#define NAC_CGU_DWORD10_FDPLL_LOCK_INT_ENB_S 4 -#define NAC_CGU_DWORD10_FDPLL_LOCK_INT_ENB_M BIT(4) -#define NAC_CGU_DWORD10_SYNCE_CLKO_SEL_S 5 -#define NAC_CGU_DWORD10_SYNCE_CLKO_SEL_M ICE_M(0xf, 5) -#define NAC_CGU_DWORD10_SYNCE_CLKODIV_M1_S 9 -#define NAC_CGU_DWORD10_SYNCE_CLKODIV_M1_M ICE_M(0x1f, 9) -#define NAC_CGU_DWORD10_SYNCE_CLKODIV_LOAD_S 14 -#define NAC_CGU_DWORD10_SYNCE_CLKODIV_LOAD_M BIT(14) -#define NAC_CGU_DWORD10_SYNCE_DCK_RST_S 15 -#define NAC_CGU_DWORD10_SYNCE_DCK_RST_M BIT(15) -#define NAC_CGU_DWORD10_SYNCE_ETHCLKO_SEL_S 16 -#define NAC_CGU_DWORD10_SYNCE_ETHCLKO_SEL_M ICE_M(0x7, 16) -#define NAC_CGU_DWORD10_SYNCE_ETHDIV_M1_S 19 -#define NAC_CGU_DWORD10_SYNCE_ETHDIV_M1_M ICE_M(0x1f, 19) -#define NAC_CGU_DWORD10_SYNCE_ETHDIV_LOAD_S 24 -#define NAC_CGU_DWORD10_SYNCE_ETHDIV_LOAD_M BIT(24) -#define NAC_CGU_DWORD10_SYNCE_DCK2_RST_S 25 -#define NAC_CGU_DWORD10_SYNCE_DCK2_RST_M BIT(25) -#define NAC_CGU_DWORD10_SYNCE_SEL_GND_S 26 -#define NAC_CGU_DWORD10_SYNCE_SEL_GND_M BIT(26) -#define NAC_CGU_DWORD10_SYNCE_S_REF_CLK_S 27 -#define NAC_CGU_DWORD10_SYNCE_S_REF_CLK_M ICE_M(0x1f, 27) - -union nac_cgu_dword10 { - struct { - u32 ja_pll_enable : 1; - u32 misc11 : 1; - u32 fdpll_enable : 1; - u32 fdpll_slow : 1; - u32 fdpll_lock_int_enb : 1; - u32 synce_clko_sel : 4; - u32 synce_clkodiv_m1 : 5; - u32 synce_clkodiv_load : 1; - u32 synce_dck_rst : 1; - u32 synce_ethclko_sel : 3; - u32 synce_ethdiv_m1 : 5; - u32 synce_ethdiv_load : 1; - u32 synce_dck2_rst : 1; - u32 synce_sel_gnd : 1; - u32 synce_s_ref_clk : 5; - } field; - u32 val; -}; - -#define NAC_CGU_DWORD11 0x2c -#define NAC_CGU_DWORD11_MISC25_S 0 -#define NAC_CGU_DWORD11_MISC25_M BIT(0) -#define NAC_CGU_DWORD11_SYNCE_S_BYP_CLK_S 1 -#define NAC_CGU_DWORD11_SYNCE_S_BYP_CLK_M ICE_M(0x3f, 1) -#define NAC_CGU_DWORD11_SYNCE_HDOV_MODE_S 7 -#define NAC_CGU_DWORD11_SYNCE_HDOV_MODE_M BIT(7) -#define NAC_CGU_DWORD11_SYNCE_RAT_SEL_S 8 -#define NAC_CGU_DWORD11_SYNCE_RAT_SEL_M ICE_M(0x3, 8) -#define NAC_CGU_DWORD11_SYNCE_LINK_ENABLE_S 10 -#define NAC_CGU_DWORD11_SYNCE_LINK_ENABLE_M ICE_M(0xfffff, 10) -#define NAC_CGU_DWORD11_SYNCE_MISCLK_EN_S 30 -#define NAC_CGU_DWORD11_SYNCE_MISCLK_EN_M BIT(30) -#define NAC_CGU_DWORD11_SYNCE_MISCLK_RAT_M1_S 31 -#define NAC_CGU_DWORD11_SYNCE_MISCLK_RAT_M1_M BIT(31) - -union nac_cgu_dword11 { - struct { - u32 misc25 : 1; - u32 synce_s_byp_clk : 6; - u32 synce_hdov_mode : 1; - u32 synce_rat_sel : 2; - u32 synce_link_enable : 20; - u32 synce_misclk_en : 1; - u32 synce_misclk_rat_m1 : 1; - } field; - u32 val; -}; - -#define NAC_CGU_DWORD12 0x30 -#define NAC_CGU_DWORD12_SYNCE_MISCLK_RAT_M1_S 0 -#define NAC_CGU_DWORD12_SYNCE_MISCLK_RAT_M1_M ICE_M(0x3ff, 0) -#define NAC_CGU_DWORD12_SYNCE_MCK_RST_S 10 -#define NAC_CGU_DWORD12_SYNCE_MCK_RST_M BIT(10) -#define NAC_CGU_DWORD12_SYNCE_DPLL_BYP_S 11 -#define NAC_CGU_DWORD12_SYNCE_DPLL_BYP_M BIT(11) -#define NAC_CGU_DWORD12_SYNCE_DV_RAT_M1_S 12 -#define NAC_CGU_DWORD12_SYNCE_DV_RAT_M1_M ICE_M(0x1fff, 12) -#define NAC_CGU_DWORD12_SYNCE_ML_RAT_M1_S 25 -#define NAC_CGU_DWORD12_SYNCE_ML_RAT_M1_M ICE_M(0x7f, 25) - -union nac_cgu_dword12 { - struct { - u32 synce_misclk_rat_m1 : 10; - u32 synce_mck_rst : 1; - u32 synce_dpll_byp : 1; - u32 synce_dv_rat_m1 : 13; - u32 synce_ml_rat_m1 : 7; - } field; - u32 val; -}; - -#define NAC_CGU_DWORD13 0x34 -#define NAC_CGU_DWORD13_SYNCE_ML_RAT_M1_S 0 -#define NAC_CGU_DWORD13_SYNCE_ML_RAT_M1_M ICE_M(0x1f, 0) -#define NAC_CGU_DWORD13_SYNCE_HDOV_CHANGED_S 5 -#define NAC_CGU_DWORD13_SYNCE_HDOV_CHANGED_M BIT(5) -#define NAC_CGU_DWORD13_SYNCE_LOCK_CHANGED_S 6 -#define NAC_CGU_DWORD13_SYNCE_LOCK_CHANGED_M BIT(6) -#define NAC_CGU_DWORD13_SYNCE_HDOV_S 7 -#define NAC_CGU_DWORD13_SYNCE_HDOV_M BIT(7) -#define NAC_CGU_DWORD13_SYNCE_HDOV_INT_ENB_S 8 -#define NAC_CGU_DWORD13_SYNCE_HDOV_INT_ENB_M BIT(8) -#define NAC_CGU_DWORD13_SYNCE_LOCK_INT_ENB_S 9 -#define NAC_CGU_DWORD13_SYNCE_LOCK_INT_ENB_M BIT(9) -#define NAC_CGU_DWORD13_SYNCE_LOCKED_NC_S 10 -#define NAC_CGU_DWORD13_SYNCE_LOCKED_NC_M BIT(10) -#define NAC_CGU_DWORD13_FDPLL_LOCKED_NC_S 11 -#define NAC_CGU_DWORD13_FDPLL_LOCKED_NC_M BIT(11) -#define NAC_CGU_DWORD13_SYNCE_LOCKED_CLEAR_S 12 -#define NAC_CGU_DWORD13_SYNCE_LOCKED_CLEAR_M BIT(12) -#define NAC_CGU_DWORD13_SYNCE_HDOV_CLEAR_S 13 -#define NAC_CGU_DWORD13_SYNCE_HDOV_CLEAR_M BIT(13) -#define NAC_CGU_DWORD13_FDPLL_LOCKED_CLEAR_S 14 -#define NAC_CGU_DWORD13_FDPLL_LOCKED_CLEAR_M BIT(14) -#define NAC_CGU_DWORD13_FDPLL_LOCK_CHANGED_S 15 -#define NAC_CGU_DWORD13_FDPLL_LOCK_CHANGED_M BIT(15) -#define NAC_CGU_DWORD13_RMNRXCLK_SEL_S 16 -#define NAC_CGU_DWORD13_RMNRXCLK_SEL_M ICE_M(0x1f, 16) -#define NAC_CGU_DWORD13_ENABLE_ETH_COUNT_S 21 -#define NAC_CGU_DWORD13_ENABLE_ETH_COUNT_M BIT(21) -#define NAC_CGU_DWORD13_ETH_COUNT_FAST_MODE_S 22 -#define NAC_CGU_DWORD13_ETH_COUNT_FAST_MODE_M BIT(22) -#define NAC_CGU_DWORD13_MISC12_S 23 -#define NAC_CGU_DWORD13_MISC12_M ICE_M(0x1ff, 23) - -union nac_cgu_dword13 { - struct { - u32 synce_ml_rat_m1 : 5; - u32 synce_hdov_changed : 1; - u32 synce_lock_changed : 1; - u32 synce_hdov : 1; - u32 synce_hdov_int_enb : 1; - u32 synce_lock_int_enb : 1; - u32 synce_locked_nc : 1; - u32 fdpll_locked_nc : 1; - u32 synce_locked_clear : 1; - u32 synce_hdov_clear : 1; - u32 fdpll_locked_clear : 1; - u32 fdpll_lock_changed : 1; - u32 rmnrxclk_sel : 5; - u32 enable_eth_count : 1; - u32 eth_count_fast_mode : 1; - u32 misc12 : 9; - } field; - u32 val; -}; - -#define NAC_CGU_DWORD14 0x38 -#define NAC_CGU_DWORD14_SYNCE_LNK_UP_MD_S 0 -#define NAC_CGU_DWORD14_SYNCE_LNK_UP_MD_M BIT(0) -#define NAC_CGU_DWORD14_SYNCE_LNK_DN_MD_S 1 -#define NAC_CGU_DWORD14_SYNCE_LNK_DN_MD_M BIT(1) -#define NAC_CGU_DWORD14_SYNCE_FAST_MODE_S 2 -#define NAC_CGU_DWORD14_SYNCE_FAST_MODE_M BIT(2) -#define NAC_CGU_DWORD14_SYNCE_EEC_MODE_S 3 -#define NAC_CGU_DWORD14_SYNCE_EEC_MODE_M BIT(3) -#define NAC_CGU_DWORD14_SYNCE_NGAIN_S 4 -#define NAC_CGU_DWORD14_SYNCE_NGAIN_M ICE_M(0xff, 4) -#define NAC_CGU_DWORD14_SYNCE_NSCALE_S 12 -#define NAC_CGU_DWORD14_SYNCE_NSCALE_M ICE_M(0x3f, 12) -#define NAC_CGU_DWORD14_SYNCE_UNLCK_THR_S 18 -#define NAC_CGU_DWORD14_SYNCE_UNLCK_THR_M ICE_M(0x3fff, 18) - -union nac_cgu_dword14 { - struct { - u32 synce_lnk_up_md : 1; - u32 synce_lnk_dn_md : 1; - u32 synce_fast_mode : 1; - u32 synce_eec_mode : 1; - u32 synce_ngain : 8; - u32 synce_nscale : 6; - u32 synce_unlck_thr : 14; - } field; - u32 val; -}; - -#define NAC_CGU_DWORD15 0x3c -#define NAC_CGU_DWORD15_SYNCE_UNLCK_THR_S 0 -#define NAC_CGU_DWORD15_SYNCE_UNLCK_THR_M ICE_M(0x7, 0) -#define NAC_CGU_DWORD15_SYNCE_LOCK_THR_S 3 -#define NAC_CGU_DWORD15_SYNCE_LOCK_THR_M ICE_M(0x1ffff, 3) -#define NAC_CGU_DWORD15_SYNCE_QUO_M1_S 20 -#define NAC_CGU_DWORD15_SYNCE_QUO_M1_M ICE_M(0x3f, 20) -#define NAC_CGU_DWORD15_SYNCE_REMNDR_S 26 -#define NAC_CGU_DWORD15_SYNCE_REMNDR_M ICE_M(0x3f, 26) - -union nac_cgu_dword15 { - struct { - u32 synce_unlck_thr : 3; - u32 synce_lock_thr : 17; - u32 synce_quo_m1 : 6; - u32 synce_remndr : 6; - } field; - u32 val; -}; - -#define NAC_CGU_DWORD16 0x40 -#define NAC_CGU_DWORD16_SYNCE_REMNDR_S 0 -#define NAC_CGU_DWORD16_SYNCE_REMNDR_M ICE_M(0x3f, 0) -#define NAC_CGU_DWORD16_SYNCE_PHLMT_EN_S 6 -#define NAC_CGU_DWORD16_SYNCE_PHLMT_EN_M BIT(6) -#define NAC_CGU_DWORD16_MISC13_S 7 -#define NAC_CGU_DWORD16_MISC13_M ICE_M(0x1ffffff, 7) - -union nac_cgu_dword16 { - struct { - u32 synce_remndr : 6; - u32 synce_phlmt_en : 1; - u32 misc13 : 25; - } field; - u32 val; -}; - -#define NAC_CGU_DWORD17 0x44 -#define NAC_CGU_DWORD17_FDPLL_GAIN_S 0 -#define NAC_CGU_DWORD17_FDPLL_GAIN_M ICE_M(0xf, 0) -#define NAC_CGU_DWORD17_FDPLL_SCALE_S 4 -#define NAC_CGU_DWORD17_FDPLL_SCALE_M ICE_M(0xf, 4) -#define NAC_CGU_DWORD17_FDPLL_FGAIN_SHIFT_F_S 8 -#define NAC_CGU_DWORD17_FDPLL_FGAIN_SHIFT_F_M ICE_M(0x3f, 8) -#define NAC_CGU_DWORD17_FDPLL_CLR_PHERR_S 14 -#define NAC_CGU_DWORD17_FDPLL_CLR_PHERR_M BIT(14) -#define NAC_CGU_DWORD17_FDPLL_BB_EN_S 15 -#define NAC_CGU_DWORD17_FDPLL_BB_EN_M BIT(15) -#define NAC_CGU_DWORD17_FDPLL_FGAIN_SHIFT_S 16 -#define NAC_CGU_DWORD17_FDPLL_FGAIN_SHIFT_M ICE_M(0x3f, 16) -#define NAC_CGU_DWORD17_FDPLL_FSCALE_SHIFT_S 22 -#define NAC_CGU_DWORD17_FDPLL_FSCALE_SHIFT_M ICE_M(0x1f, 22) -#define NAC_CGU_DWORD17_FDPLL_FSCALE_SHIFT_F_S 27 -#define NAC_CGU_DWORD17_FDPLL_FSCALE_SHIFT_F_M ICE_M(0x1f, 27) - -union nac_cgu_dword17 { - struct { - u32 fdpll_gain : 4; - u32 fdpll_scale : 4; - u32 fdpll_fgain_shift_f : 6; - u32 fdpll_clr_pherr : 1; - u32 fdpll_bb_en : 1; - u32 fdpll_fgain_shift : 6; - u32 fdpll_fscale_shift : 5; - u32 fdpll_fscale_shift_f : 5; - } field; - u32 val; -}; - -#define NAC_CGU_DWORD18 0x48 -#define NAC_CGU_DWORD18_FDPLL_BYPASS_S 0 -#define NAC_CGU_DWORD18_FDPLL_BYPASS_M BIT(0) -#define NAC_CGU_DWORD18_FDPLL_INP_NCO_S 1 -#define NAC_CGU_DWORD18_FDPLL_INP_NCO_M ICE_M(0xff, 1) -#define NAC_CGU_DWORD18_FDPLL_AUTO_EN_S 9 -#define NAC_CGU_DWORD18_FDPLL_AUTO_EN_M BIT(9) -#define NAC_CGU_DWORD18_FDPLL_SAMP_CNT_S 10 -#define NAC_CGU_DWORD18_FDPLL_SAMP_CNT_M ICE_M(0xfff, 10) -#define NAC_CGU_DWORD18_FDPLL_LOCKCNT_S 22 -#define NAC_CGU_DWORD18_FDPLL_LOCKCNT_M ICE_M(0x1f, 22) -#define NAC_CGU_DWORD18_FDPLL_LOCK_THR_S 27 -#define NAC_CGU_DWORD18_FDPLL_LOCK_THR_M ICE_M(0x1f, 27) - -union nac_cgu_dword18 { - struct { - u32 fdpll_bypass : 1; - u32 fdpll_inp_nco : 8; - u32 fdpll_auto_en : 1; - u32 fdpll_samp_cnt : 12; - u32 fdpll_lockcnt : 5; - u32 fdpll_lock_thr : 5; - } field; - u32 val; -}; - #define NAC_CGU_DWORD19 0x4c -#define NAC_CGU_DWORD19_TSPLL_FBDIV_INTGR_S 0 -#define NAC_CGU_DWORD19_TSPLL_FBDIV_INTGR_M ICE_M(0xff, 0) -#define NAC_CGU_DWORD19_FDPLL_ULCK_THR_S 8 -#define NAC_CGU_DWORD19_FDPLL_ULCK_THR_M ICE_M(0x1f, 8) -#define NAC_CGU_DWORD19_MISC15_S 13 -#define NAC_CGU_DWORD19_MISC15_M ICE_M(0x7, 13) -#define NAC_CGU_DWORD19_TSPLL_NDIVRATIO_S 16 -#define NAC_CGU_DWORD19_TSPLL_NDIVRATIO_M ICE_M(0xf, 16) -#define NAC_CGU_DWORD19_TSPLL_IREF_NDIVRATIO_S 20 -#define NAC_CGU_DWORD19_TSPLL_IREF_NDIVRATIO_M ICE_M(0x7, 20) -#define NAC_CGU_DWORD19_MISC19_S 23 -#define NAC_CGU_DWORD19_MISC19_M BIT(23) -#define NAC_CGU_DWORD19_JAPLL_NDIVRATIO_S 24 -#define NAC_CGU_DWORD19_JAPLL_NDIVRATIO_M ICE_M(0xf, 24) -#define NAC_CGU_DWORD19_JAPLL_IREF_NDIVRATIO_S 28 -#define NAC_CGU_DWORD19_JAPLL_IREF_NDIVRATIO_M ICE_M(0x7, 28) -#define NAC_CGU_DWORD19_MISC27_S 31 -#define NAC_CGU_DWORD19_MISC27_M BIT(31) - union nac_cgu_dword19 { struct { u32 tspll_fbdiv_intgr : 8; @@ -441,76 +43,7 @@ union nac_cgu_dword19 { u32 val; }; -#define NAC_CGU_DWORD20 0x50 -#define NAC_CGU_DWORD20_JAPLL_INT_DIV_S 0 -#define NAC_CGU_DWORD20_JAPLL_INT_DIV_M ICE_M(0xff, 0) -#define NAC_CGU_DWORD20_JAPLL_FRAC_DIV_S 8 -#define NAC_CGU_DWORD20_JAPLL_FRAC_DIV_M ICE_M(0x3fffff, 8) -#define NAC_CGU_DWORD20_MISC16_S 30 -#define NAC_CGU_DWORD20_MISC16_M ICE_M(0x3, 30) - -union nac_cgu_dword20 { - struct { - u32 japll_int_div : 8; - u32 japll_frac_div : 22; - u32 misc16 : 2; - } field; - u32 val; -}; - -#define NAC_CGU_DWORD21 0x54 -#define NAC_CGU_DWORD21_MISC17_S 0 -#define NAC_CGU_DWORD21_MISC17_M ICE_M(0xf, 0) -#define NAC_CGU_DWORD21_FDPLL_INT_DIV_OUT_NC_S 4 -#define NAC_CGU_DWORD21_FDPLL_INT_DIV_OUT_NC_M ICE_M(0xff, 4) -#define NAC_CGU_DWORD21_FDPLL_FRAC_DIV_OUT_NC_S 12 -#define NAC_CGU_DWORD21_FDPLL_FRAC_DIV_OUT_NC_M ICE_M(0xfffff, 12) - -union nac_cgu_dword21 { - struct { - u32 misc17 : 4; - u32 fdpll_int_div_out_nc : 8; - u32 fdpll_frac_div_out_nc : 20; - } field; - u32 val; -}; - #define NAC_CGU_DWORD22 0x58 -#define NAC_CGU_DWORD22_FDPLL_FRAC_DIV_OUT_NC_S 0 -#define NAC_CGU_DWORD22_FDPLL_FRAC_DIV_OUT_NC_M ICE_M(0x3, 0) -#define NAC_CGU_DWORD22_FDPLL_LOCK_INT_FOR_S 2 -#define NAC_CGU_DWORD22_FDPLL_LOCK_INT_FOR_M BIT(2) -#define NAC_CGU_DWORD22_SYNCE_HDOV_INT_FOR_S 3 -#define NAC_CGU_DWORD22_SYNCE_HDOV_INT_FOR_M BIT(3) -#define NAC_CGU_DWORD22_SYNCE_LOCK_INT_FOR_S 4 -#define NAC_CGU_DWORD22_SYNCE_LOCK_INT_FOR_M BIT(4) -#define NAC_CGU_DWORD22_FDPLL_PHLEAD_SLIP_NC_S 5 -#define NAC_CGU_DWORD22_FDPLL_PHLEAD_SLIP_NC_M BIT(5) -#define NAC_CGU_DWORD22_FDPLL_ACC1_OVFL_NC_S 6 -#define NAC_CGU_DWORD22_FDPLL_ACC1_OVFL_NC_M BIT(6) -#define NAC_CGU_DWORD22_FDPLL_ACC2_OVFL_NC_S 7 -#define NAC_CGU_DWORD22_FDPLL_ACC2_OVFL_NC_M BIT(7) -#define NAC_CGU_DWORD22_SYNCE_STATUS_NC_S 8 -#define NAC_CGU_DWORD22_SYNCE_STATUS_NC_M ICE_M(0x3f, 8) -#define NAC_CGU_DWORD22_FDPLL_ACC1F_OVFL_S 14 -#define NAC_CGU_DWORD22_FDPLL_ACC1F_OVFL_M BIT(14) -#define NAC_CGU_DWORD22_MISC18_S 15 -#define NAC_CGU_DWORD22_MISC18_M BIT(15) -#define NAC_CGU_DWORD22_FDPLLCLK_DIV_S 16 -#define NAC_CGU_DWORD22_FDPLLCLK_DIV_M ICE_M(0xf, 16) -#define NAC_CGU_DWORD22_TIME1588CLK_DIV_S 20 -#define NAC_CGU_DWORD22_TIME1588CLK_DIV_M ICE_M(0xf, 20) -#define NAC_CGU_DWORD22_SYNCECLK_DIV_S 24 -#define NAC_CGU_DWORD22_SYNCECLK_DIV_M ICE_M(0xf, 24) -#define NAC_CGU_DWORD22_SYNCECLK_SEL_DIV2_S 28 -#define NAC_CGU_DWORD22_SYNCECLK_SEL_DIV2_M BIT(28) -#define NAC_CGU_DWORD22_FDPLLCLK_SEL_DIV2_S 29 -#define NAC_CGU_DWORD22_FDPLLCLK_SEL_DIV2_M BIT(29) -#define NAC_CGU_DWORD22_TIME1588CLK_SEL_DIV2_S 30 -#define NAC_CGU_DWORD22_TIME1588CLK_SEL_DIV2_M BIT(30) -#define NAC_CGU_DWORD22_MISC3_S 31 -#define NAC_CGU_DWORD22_MISC3_M BIT(31) - union nac_cgu_dword22 { struct { u32 fdpll_frac_div_out_nc : 2; @@ -535,21 +68,6 @@ union nac_cgu_dword22 { }; #define NAC_CGU_DWORD24 0x60 -#define NAC_CGU_DWORD24_TSPLL_FBDIV_FRAC_S 0 -#define NAC_CGU_DWORD24_TSPLL_FBDIV_FRAC_M ICE_M(0x3fffff, 0) -#define NAC_CGU_DWORD24_MISC20_S 22 -#define NAC_CGU_DWORD24_MISC20_M ICE_M(0x3, 22) -#define NAC_CGU_DWORD24_TS_PLL_ENABLE_S 24 -#define NAC_CGU_DWORD24_TS_PLL_ENABLE_M BIT(24) -#define NAC_CGU_DWORD24_TIME_SYNC_TSPLL_ALIGN_SEL_S 25 -#define NAC_CGU_DWORD24_TIME_SYNC_TSPLL_ALIGN_SEL_M BIT(25) -#define NAC_CGU_DWORD24_EXT_SYNCE_SEL_S 26 -#define NAC_CGU_DWORD24_EXT_SYNCE_SEL_M BIT(26) -#define NAC_CGU_DWORD24_REF1588_CK_DIV_S 27 -#define NAC_CGU_DWORD24_REF1588_CK_DIV_M ICE_M(0xf, 27) -#define NAC_CGU_DWORD24_TIME_REF_SEL_S 31 -#define NAC_CGU_DWORD24_TIME_REF_SEL_M BIT(31) - union nac_cgu_dword24 { struct { u32 tspll_fbdiv_frac : 22; @@ -564,25 +82,6 @@ union nac_cgu_dword24 { }; #define TSPLL_CNTR_BIST_SETTINGS 0x344 -#define TSPLL_CNTR_BIST_SETTINGS_I_IREFGEN_SETTLING_TIME_CNTR_7_0_S 0 -#define TSPLL_CNTR_BIST_SETTINGS_I_IREFGEN_SETTLING_TIME_CNTR_7_0_M \ - ICE_M(0xff, 0) -#define TSPLL_CNTR_BIST_SETTINGS_I_IREFGEN_SETTLING_TIME_RO_STANDBY_1_0_S 8 -#define TSPLL_CNTR_BIST_SETTINGS_I_IREFGEN_SETTLING_TIME_RO_STANDBY_1_0_M \ - ICE_M(0x3, 8) -#define TSPLL_CNTR_BIST_SETTINGS_RESERVED195_S 10 -#define TSPLL_CNTR_BIST_SETTINGS_RESERVED195_M ICE_M(0x1f, 10) -#define TSPLL_CNTR_BIST_SETTINGS_I_PLLLOCK_SEL_0_S 15 -#define TSPLL_CNTR_BIST_SETTINGS_I_PLLLOCK_SEL_0_M BIT(15) -#define TSPLL_CNTR_BIST_SETTINGS_I_PLLLOCK_SEL_1_S 16 -#define TSPLL_CNTR_BIST_SETTINGS_I_PLLLOCK_SEL_1_M BIT(16) -#define TSPLL_CNTR_BIST_SETTINGS_I_PLLLOCK_CNT_6_0_S 17 -#define TSPLL_CNTR_BIST_SETTINGS_I_PLLLOCK_CNT_6_0_M ICE_M(0x7f, 17) -#define TSPLL_CNTR_BIST_SETTINGS_I_PLLLOCK_CNT_10_7_S 24 -#define TSPLL_CNTR_BIST_SETTINGS_I_PLLLOCK_CNT_10_7_M ICE_M(0xf, 24) -#define TSPLL_CNTR_BIST_SETTINGS_RESERVED200_S 28 -#define TSPLL_CNTR_BIST_SETTINGS_RESERVED200_M ICE_M(0xf, 28) - union tspll_cntr_bist_settings { struct { u32 i_irefgen_settling_time_cntr_7_0 : 8; @@ -598,27 +97,6 @@ union tspll_cntr_bist_settings { }; #define TSPLL_RO_BWM_LF 0x370 -#define TSPLL_RO_BWM_LF_BW_FREQOV_HIGH_CRI_7_0_S 0 -#define TSPLL_RO_BWM_LF_BW_FREQOV_HIGH_CRI_7_0_M ICE_M(0xff, 0) -#define TSPLL_RO_BWM_LF_BW_FREQOV_HIGH_CRI_9_8_S 8 -#define TSPLL_RO_BWM_LF_BW_FREQOV_HIGH_CRI_9_8_M ICE_M(0x3, 8) -#define TSPLL_RO_BWM_LF_BIASCALDONE_CRI_S 10 -#define TSPLL_RO_BWM_LF_BIASCALDONE_CRI_M BIT(10) -#define TSPLL_RO_BWM_LF_PLLLOCK_GAIN_TRAN_CRI_S 11 -#define TSPLL_RO_BWM_LF_PLLLOCK_GAIN_TRAN_CRI_M BIT(11) -#define TSPLL_RO_BWM_LF_PLLLOCK_TRUE_LOCK_CRI_S 12 -#define TSPLL_RO_BWM_LF_PLLLOCK_TRUE_LOCK_CRI_M BIT(12) -#define TSPLL_RO_BWM_LF_PLLUNLOCK_FLAG_CRI_S 13 -#define TSPLL_RO_BWM_LF_PLLUNLOCK_FLAG_CRI_M BIT(13) -#define TSPLL_RO_BWM_LF_AFCERR_CRI_S 14 -#define TSPLL_RO_BWM_LF_AFCERR_CRI_M BIT(14) -#define TSPLL_RO_BWM_LF_AFCDONE_CRI_S 15 -#define TSPLL_RO_BWM_LF_AFCDONE_CRI_M BIT(15) -#define TSPLL_RO_BWM_LF_FEEDFWRDGAIN_CAL_CRI_7_0_S 16 -#define TSPLL_RO_BWM_LF_FEEDFWRDGAIN_CAL_CRI_7_0_M ICE_M(0xff, 16) -#define TSPLL_RO_BWM_LF_M2FBDIVMOD_CRI_7_0_S 24 -#define TSPLL_RO_BWM_LF_M2FBDIVMOD_CRI_7_0_M ICE_M(0xff, 24) - union tspll_ro_bwm_lf { struct { u32 bw_freqov_high_cri_7_0 : 8; @@ -635,307 +113,4 @@ union tspll_ro_bwm_lf { u32 val; }; -#define JAPLL_DIV0 0x400 -#define JAPLL_DIV0_I_FBDIV_INTGR_7_0_S 0 -#define JAPLL_DIV0_I_FBDIV_INTGR_7_0_M ICE_M(0xff, 0) -#define JAPLL_DIV0_I_FBDIV_FRAC_7_0_S 8 -#define JAPLL_DIV0_I_FBDIV_FRAC_7_0_M ICE_M(0xff, 8) -#define JAPLL_DIV0_I_FBDIV_FRAC_15_8_S 16 -#define JAPLL_DIV0_I_FBDIV_FRAC_15_8_M ICE_M(0xff, 16) -#define JAPLL_DIV0_I_FBDIV_FRAC_21_16_S 24 -#define JAPLL_DIV0_I_FBDIV_FRAC_21_16_M ICE_M(0x3f, 24) -#define JAPLL_DIV0_I_FRACNEN_H_S 30 -#define JAPLL_DIV0_I_FRACNEN_H_M BIT(30) -#define JAPLL_DIV0_I_DIRECT_PIN_IF_EN_S 31 -#define JAPLL_DIV0_I_DIRECT_PIN_IF_EN_M BIT(31) - -union japll_div0 { - struct { - u32 i_fbdiv_intgr_7_0 : 8; - u32 i_fbdiv_frac_7_0 : 8; - u32 i_fbdiv_frac_15_8 : 8; - u32 i_fbdiv_frac_21_16 : 6; - u32 i_fracnen_h : 1; - u32 i_direct_pin_if_en : 1; - } field; - u32 val; -}; - -#define JAPLL_LF 0x408 -#define JAPLL_LF_I_PROP_COEFF_3_0_S 0 -#define JAPLL_LF_I_PROP_COEFF_3_0_M ICE_M(0xf, 0) -#define JAPLL_LF_I_FLL_INT_COEFF_3_0_S 4 -#define JAPLL_LF_I_FLL_INT_COEFF_3_0_M ICE_M(0xf, 4) -#define JAPLL_LF_I_INT_COEFF_4_0_S 8 -#define JAPLL_LF_I_INT_COEFF_4_0_M ICE_M(0x1f, 8) -#define JAPLL_LF_I_FLL_EN_H_S 13 -#define JAPLL_LF_I_FLL_EN_H_M BIT(13) -#define JAPLL_LF_I_TDC_FINE_RES_S 14 -#define JAPLL_LF_I_TDC_FINE_RES_M BIT(14) -#define JAPLL_LF_I_DCOFINE_RESOLUTION_S 15 -#define JAPLL_LF_I_DCOFINE_RESOLUTION_M BIT(15) -#define JAPLL_LF_I_GAINCTRL_2_0_S 16 -#define JAPLL_LF_I_GAINCTRL_2_0_M ICE_M(0x7, 16) -#define JAPLL_LF_I_AFC_DIVRATIO_S 19 -#define JAPLL_LF_I_AFC_DIVRATIO_M BIT(19) -#define JAPLL_LF_I_AFCCNTSEL_S 20 -#define JAPLL_LF_I_AFCCNTSEL_M BIT(20) -#define JAPLL_LF_I_AFC_STARTUP_1_0_S 21 -#define JAPLL_LF_I_AFC_STARTUP_1_0_M ICE_M(0x3, 21) -#define JAPLL_LF_RESERVED31_S 23 -#define JAPLL_LF_RESERVED31_M BIT(23) -#define JAPLL_LF_I_TDCTARGETCNT_7_0_S 24 -#define JAPLL_LF_I_TDCTARGETCNT_7_0_M ICE_M(0xff, 24) - -union japll_lf { - struct { - u32 i_prop_coeff_3_0 : 4; - u32 i_fll_int_coeff_3_0 : 4; - u32 i_int_coeff_4_0 : 5; - u32 i_fll_en_h : 1; - u32 i_tdc_fine_res : 1; - u32 i_dcofine_resolution : 1; - u32 i_gainctrl_2_0 : 3; - u32 i_afc_divratio : 1; - u32 i_afccntsel : 1; - u32 i_afc_startup_1_0 : 2; - u32 reserved31 : 1; - u32 i_tdctargetcnt_7_0 : 8; - } field; - u32 val; -}; - -#define JAPLL_FRAC_LOCK 0x40c -#define JAPLL_FRAC_LOCK_I_FEEDFWRDGAIN_7_0_S 0 -#define JAPLL_FRAC_LOCK_I_FEEDFWRDGAIN_7_0_M ICE_M(0xff, 0) -#define JAPLL_FRAC_LOCK_I_FEEDFWRDCAL_EN_H_S 8 -#define JAPLL_FRAC_LOCK_I_FEEDFWRDCAL_EN_H_M BIT(8) -#define JAPLL_FRAC_LOCK_I_FEEDFWRDCAL_PAUSE_H_S 9 -#define JAPLL_FRAC_LOCK_I_FEEDFWRDCAL_PAUSE_H_M BIT(9) -#define JAPLL_FRAC_LOCK_I_DCODITHEREN_H_S 10 -#define JAPLL_FRAC_LOCK_I_DCODITHEREN_H_M BIT(10) -#define JAPLL_FRAC_LOCK_I_LOCKTHRESH_3_0_S 11 -#define JAPLL_FRAC_LOCK_I_LOCKTHRESH_3_0_M ICE_M(0xf, 11) -#define JAPLL_FRAC_LOCK_I_DCODITHER_CONFIG_S 15 -#define JAPLL_FRAC_LOCK_I_DCODITHER_CONFIG_M BIT(15) -#define JAPLL_FRAC_LOCK_I_EARLYLOCK_CRITERIA_1_0_S 16 -#define JAPLL_FRAC_LOCK_I_EARLYLOCK_CRITERIA_1_0_M ICE_M(0x3, 16) -#define JAPLL_FRAC_LOCK_I_TRUELOCK_CRITERIA_1_0_S 18 -#define JAPLL_FRAC_LOCK_I_TRUELOCK_CRITERIA_1_0_M ICE_M(0x3, 18) -#define JAPLL_FRAC_LOCK_I_LF_HALF_CYC_EN_S 20 -#define JAPLL_FRAC_LOCK_I_LF_HALF_CYC_EN_M BIT(20) -#define JAPLL_FRAC_LOCK_I_DITHER_OVRD_S 21 -#define JAPLL_FRAC_LOCK_I_DITHER_OVRD_M BIT(21) -#define JAPLL_FRAC_LOCK_I_PLLLC_RESTORE_REG_S 22 -#define JAPLL_FRAC_LOCK_I_PLLLC_RESTORE_REG_M BIT(22) -#define JAPLL_FRAC_LOCK_I_PLLLC_RESTORE_MODE_CTRL_S 23 -#define JAPLL_FRAC_LOCK_I_PLLLC_RESTORE_MODE_CTRL_M BIT(23) -#define JAPLL_FRAC_LOCK_I_PLLRAMPEN_H_S 24 -#define JAPLL_FRAC_LOCK_I_PLLRAMPEN_H_M BIT(24) -#define JAPLL_FRAC_LOCK_I_FBDIV_STROBE_H_S 25 -#define JAPLL_FRAC_LOCK_I_FBDIV_STROBE_H_M BIT(25) -#define JAPLL_FRAC_LOCK_I_OVC_SNAPSHOT_H_S 26 -#define JAPLL_FRAC_LOCK_I_OVC_SNAPSHOT_H_M BIT(26) -#define JAPLL_FRAC_LOCK_I_DITHER_VALUE_4_0_S 27 -#define JAPLL_FRAC_LOCK_I_DITHER_VALUE_4_0_M ICE_M(0x1f, 27) - -union japll_frac_lock { - struct { - u32 i_feedfwrdgain_7_0 : 8; - u32 i_feedfwrdcal_en_h : 1; - u32 i_feedfwrdcal_pause_h : 1; - u32 i_dcoditheren_h : 1; - u32 i_lockthresh_3_0 : 4; - u32 i_dcodither_config : 1; - u32 i_earlylock_criteria_1_0 : 2; - u32 i_truelock_criteria_1_0 : 2; - u32 i_lf_half_cyc_en : 1; - u32 i_dither_ovrd : 1; - u32 i_plllc_restore_reg : 1; - u32 i_plllc_restore_mode_ctrl : 1; - u32 i_pllrampen_h : 1; - u32 i_fbdiv_strobe_h : 1; - u32 i_ovc_snapshot_h : 1; - u32 i_dither_value_4_0 : 5; - } field; - u32 val; -}; - -#define JAPLL_BIAS 0x414 -#define JAPLL_BIAS_I_IREFTRIM_4_0_S 0 -#define JAPLL_BIAS_I_IREFTRIM_4_0_M ICE_M(0x1f, 0) -#define JAPLL_BIAS_I_VREF_RDAC_2_0_S 5 -#define JAPLL_BIAS_I_VREF_RDAC_2_0_M ICE_M(0x7, 5) -#define JAPLL_BIAS_I_CTRIM_4_0_S 8 -#define JAPLL_BIAS_I_CTRIM_4_0_M ICE_M(0x1f, 8) -#define JAPLL_BIAS_I_IREF_REFCLK_MODE_1_0_S 13 -#define JAPLL_BIAS_I_IREF_REFCLK_MODE_1_0_M ICE_M(0x3, 13) -#define JAPLL_BIAS_I_BIASCAL_EN_H_S 15 -#define JAPLL_BIAS_I_BIASCAL_EN_H_M BIT(15) -#define JAPLL_BIAS_I_BIAS_BONUS_7_0_S 16 -#define JAPLL_BIAS_I_BIAS_BONUS_7_0_M ICE_M(0xff, 16) -#define JAPLL_BIAS_I_INIT_DCOAMP_5_0_S 24 -#define JAPLL_BIAS_I_INIT_DCOAMP_5_0_M ICE_M(0x3f, 24) -#define JAPLL_BIAS_I_BIAS_GB_SEL_1_0_S 30 -#define JAPLL_BIAS_I_BIAS_GB_SEL_1_0_M ICE_M(0x3, 30) - -union japll_bias { - struct { - u32 i_ireftrim_4_0 : 5; - u32 i_vref_rdac_2_0 : 3; - u32 i_ctrim_4_0 : 5; - u32 i_iref_refclk_mode_1_0 : 2; - u32 i_biascal_en_h : 1; - u32 i_bias_bonus_7_0 : 8; - u32 i_init_dcoamp_5_0 : 6; - u32 i_bias_gb_sel_1_0 : 2; - } field; - u32 val; -}; - -#define JAPLL_TDC_COLDST_BIAS 0x418 -#define JAPLL_TDC_COLDST_BIAS_I_TDCSEL_1_0_S 0 -#define JAPLL_TDC_COLDST_BIAS_I_TDCSEL_1_0_M ICE_M(0x3, 0) -#define JAPLL_TDC_COLDST_BIAS_I_TDCOVCCORR_EN_H_S 2 -#define JAPLL_TDC_COLDST_BIAS_I_TDCOVCCORR_EN_H_M BIT(2) -#define JAPLL_TDC_COLDST_BIAS_I_TDCDC_EN_H_S 3 -#define JAPLL_TDC_COLDST_BIAS_I_TDCDC_EN_H_M BIT(3) -#define JAPLL_TDC_COLDST_BIAS_I_TDC_OFFSET_LOCK_1_0_S 4 -#define JAPLL_TDC_COLDST_BIAS_I_TDC_OFFSET_LOCK_1_0_M ICE_M(0x3, 4) -#define JAPLL_TDC_COLDST_BIAS_I_SWCAP_IREFGEN_CLKMODE_1_0_S 6 -#define JAPLL_TDC_COLDST_BIAS_I_SWCAP_IREFGEN_CLKMODE_1_0_M ICE_M(0x3, 6) -#define JAPLL_TDC_COLDST_BIAS_I_BB_GAIN_2_0_S 8 -#define JAPLL_TDC_COLDST_BIAS_I_BB_GAIN_2_0_M ICE_M(0x7, 8) -#define JAPLL_TDC_COLDST_BIAS_I_BBTHRESH_3_0_S 11 -#define JAPLL_TDC_COLDST_BIAS_I_BBTHRESH_3_0_M ICE_M(0xf, 11) -#define JAPLL_TDC_COLDST_BIAS_I_BBINLOCK_H_S 15 -#define JAPLL_TDC_COLDST_BIAS_I_BBINLOCK_H_M BIT(15) -#define JAPLL_TDC_COLDST_BIAS_I_COLDSTART_S 16 -#define JAPLL_TDC_COLDST_BIAS_I_COLDSTART_M BIT(16) -#define JAPLL_TDC_COLDST_BIAS_I_IREFBIAS_STARTUP_PULSE_WIDTH_1_0_S 17 -#define JAPLL_TDC_COLDST_BIAS_I_IREFBIAS_STARTUP_PULSE_WIDTH_1_0_M \ - ICE_M(0x3, 17) -#define JAPLL_TDC_COLDST_BIAS_I_DCO_SETTLING_TIME_CNTR_3_0_S 19 -#define JAPLL_TDC_COLDST_BIAS_I_DCO_SETTLING_TIME_CNTR_3_0_M ICE_M(0xf, 19) -#define JAPLL_TDC_COLDST_BIAS_I_IREFBIAS_STARTUP_PULSE_BYPASS_S 23 -#define JAPLL_TDC_COLDST_BIAS_I_IREFBIAS_STARTUP_PULSE_BYPASS_M BIT(23) -#define JAPLL_TDC_COLDST_BIAS_I_BIAS_CALIB_STEPSIZE_1_0_S 24 -#define JAPLL_TDC_COLDST_BIAS_I_BIAS_CALIB_STEPSIZE_1_0_M ICE_M(0x3, 24) -#define JAPLL_TDC_COLDST_BIAS_RESERVED81_S 26 -#define JAPLL_TDC_COLDST_BIAS_RESERVED81_M BIT(26) -#define JAPLL_TDC_COLDST_BIAS_I_IREFINT_EN_S 27 -#define JAPLL_TDC_COLDST_BIAS_I_IREFINT_EN_M BIT(27) -#define JAPLL_TDC_COLDST_BIAS_I_VGSBUFEN_S 28 -#define JAPLL_TDC_COLDST_BIAS_I_VGSBUFEN_M BIT(28) -#define JAPLL_TDC_COLDST_BIAS_I_DIGDFTSWEP_S 29 -#define JAPLL_TDC_COLDST_BIAS_I_DIGDFTSWEP_M BIT(29) -#define JAPLL_TDC_COLDST_BIAS_I_IREFDIGDFTEN_S 30 -#define JAPLL_TDC_COLDST_BIAS_I_IREFDIGDFTEN_M BIT(30) -#define JAPLL_TDC_COLDST_BIAS_I_IREF_REFCLK_INV_EN_S 31 -#define JAPLL_TDC_COLDST_BIAS_I_IREF_REFCLK_INV_EN_M BIT(31) - -union japll_tdc_coldst_bias { - struct { - u32 i_tdcsel_1_0 : 2; - u32 i_tdcovccorr_en_h : 1; - u32 i_tdcdc_en_h : 1; - u32 i_tdc_offset_lock_1_0 : 2; - u32 i_swcap_irefgen_clkmode_1_0 : 2; - u32 i_bb_gain_2_0 : 3; - u32 i_bbthresh_3_0 : 4; - u32 i_bbinlock_h : 1; - u32 i_coldstart : 1; - u32 i_irefbias_startup_pulse_width_1_0 : 2; - u32 i_dco_settling_time_cntr_3_0 : 4; - u32 i_irefbias_startup_pulse_bypass : 1; - u32 i_bias_calib_stepsize_1_0 : 2; - u32 reserved81 : 1; - u32 i_irefint_en : 1; - u32 i_vgsbufen : 1; - u32 i_digdftswep : 1; - u32 i_irefdigdften : 1; - u32 i_iref_refclk_inv_en : 1; - } field; - u32 val; -}; - -#define JAPLL_DFX_DCO 0x424 -#define JAPLL_DFX_DCO_I_DCOFINEDFTSEL_1_0_S 0 -#define JAPLL_DFX_DCO_I_DCOFINEDFTSEL_1_0_M ICE_M(0x3, 0) -#define JAPLL_DFX_DCO_I_DCOCOARSE_OVRD_H_S 2 -#define JAPLL_DFX_DCO_I_DCOCOARSE_OVRD_H_M BIT(2) -#define JAPLL_DFX_DCO_I_BIAS_FILTER_EN_S 3 -#define JAPLL_DFX_DCO_I_BIAS_FILTER_EN_M BIT(3) -#define JAPLL_DFX_DCO_I_PLLPWRMODE_1_0_S 4 -#define JAPLL_DFX_DCO_I_PLLPWRMODE_1_0_M ICE_M(0x3, 4) -#define JAPLL_DFX_DCO_I_DCOAMP_STATICLEG_CFG_1_0_S 6 -#define JAPLL_DFX_DCO_I_DCOAMP_STATICLEG_CFG_1_0_M ICE_M(0x3, 6) -#define JAPLL_DFX_DCO_I_DCOFINE_7_0_S 8 -#define JAPLL_DFX_DCO_I_DCOFINE_7_0_M ICE_M(0xff, 8) -#define JAPLL_DFX_DCO_I_DCOFINE_9_8_S 16 -#define JAPLL_DFX_DCO_I_DCOFINE_9_8_M ICE_M(0x3, 16) -#define JAPLL_DFX_DCO_I_DCOAMPOVRDEN_H_S 18 -#define JAPLL_DFX_DCO_I_DCOAMPOVRDEN_H_M BIT(18) -#define JAPLL_DFX_DCO_I_DCOAMP_3_0_S 19 -#define JAPLL_DFX_DCO_I_DCOAMP_3_0_M ICE_M(0xf, 19) -#define JAPLL_DFX_DCO_I_BIASFILTER_EN_DELAY_S 23 -#define JAPLL_DFX_DCO_I_BIASFILTER_EN_DELAY_M BIT(23) -#define JAPLL_DFX_DCO_I_DCOCOARSE_7_0_S 24 -#define JAPLL_DFX_DCO_I_DCOCOARSE_7_0_M ICE_M(0xff, 24) - -union japll_dfx_dco { - struct { - u32 i_dcofinedftsel_1_0 : 2; - u32 i_dcocoarse_ovrd_h : 1; - u32 i_bias_filter_en : 1; - u32 i_pllpwrmode_1_0 : 2; - u32 i_dcoamp_staticleg_cfg_1_0 : 2; - u32 i_dcofine_7_0 : 8; - u32 i_dcofine_9_8 : 2; - u32 i_dcoampovrden_h : 1; - u32 i_dcoamp_3_0 : 4; - u32 i_biasfilter_en_delay : 1; - u32 i_dcocoarse_7_0 : 8; - } field; - u32 val; -}; - -#define JAPLL_RO_BWM_LF 0x470 -#define JAPLL_RO_BWM_LF_BW_FREQOV_HIGH_CRI_7_0_S 0 -#define JAPLL_RO_BWM_LF_BW_FREQOV_HIGH_CRI_7_0_M ICE_M(0xff, 0) -#define JAPLL_RO_BWM_LF_BW_FREQOV_HIGH_CRI_9_8_S 8 -#define JAPLL_RO_BWM_LF_BW_FREQOV_HIGH_CRI_9_8_M ICE_M(0x3, 8) -#define JAPLL_RO_BWM_LF_BIASCALDONE_CRI_S 10 -#define JAPLL_RO_BWM_LF_BIASCALDONE_CRI_M BIT(10) -#define JAPLL_RO_BWM_LF_PLLLOCK_GAIN_TRAN_CRI_S 11 -#define JAPLL_RO_BWM_LF_PLLLOCK_GAIN_TRAN_CRI_M BIT(11) -#define JAPLL_RO_BWM_LF_PLLLOCK_TRUE_LOCK_CRI_S 12 -#define JAPLL_RO_BWM_LF_PLLLOCK_TRUE_LOCK_CRI_M BIT(12) -#define JAPLL_RO_BWM_LF_PLLUNLOCK_FLAG_CRI_S 13 -#define JAPLL_RO_BWM_LF_PLLUNLOCK_FLAG_CRI_M BIT(13) -#define JAPLL_RO_BWM_LF_AFCERR_CRI_S 14 -#define JAPLL_RO_BWM_LF_AFCERR_CRI_M BIT(14) -#define JAPLL_RO_BWM_LF_AFCDONE_CRI_S 15 -#define JAPLL_RO_BWM_LF_AFCDONE_CRI_M BIT(15) -#define JAPLL_RO_BWM_LF_FEEDFWRDGAIN_CAL_CRI_7_0_S 16 -#define JAPLL_RO_BWM_LF_FEEDFWRDGAIN_CAL_CRI_7_0_M ICE_M(0xff, 16) -#define JAPLL_RO_BWM_LF_M2FBDIVMOD_CRI_7_0_S 24 -#define JAPLL_RO_BWM_LF_M2FBDIVMOD_CRI_7_0_M ICE_M(0xff, 24) - -union japll_ro_bwm_lf { - struct { - u32 bw_freqov_high_cri_7_0 : 8; - u32 bw_freqov_high_cri_9_8 : 2; - u32 biascaldone_cri : 1; - u32 plllock_gain_tran_cri : 1; - u32 plllock_true_lock_cri : 1; - u32 pllunlock_flag_cri : 1; - u32 afcerr_cri : 1; - u32 afcdone_cri : 1; - u32 feedfwrdgain_cal_cri_7_0 : 8; - u32 m2fbdivmod_cri_7_0 : 8; - } field; - u32 val; -}; - #endif /* _ICE_CGU_REGS_H_ */ diff --git a/drivers/thirdparty/ice/ice_cgu_util.c b/drivers/thirdparty/ice/ice_cgu_util.c deleted file mode 100644 index 75561d5c26fd..000000000000 --- a/drivers/thirdparty/ice/ice_cgu_util.c +++ /dev/null @@ -1,444 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2018-2021, Intel Corporation. */ - -#include "ice.h" - -/** - * ice_cgu_reg_read - Read a CGU register - * @pf: Board private structure - * @reg: Register to read from - * @val: Pointer to the value to read (out param) - */ -int ice_cgu_reg_read(struct ice_pf *pf, u32 reg, u32 *val) -{ - struct ice_sbq_msg_input cgu_msg; - int status; - - cgu_msg.opcode = ice_sbq_msg_rd; - cgu_msg.dest_dev = cgu; - cgu_msg.msg_addr_low = reg; - cgu_msg.msg_addr_high = 0x0; - - status = ice_sbq_rw_reg_lp(&pf->hw, &cgu_msg, true); - if (status) { - dev_dbg(ice_pf_to_dev(pf), "addr 0x%04x, val 0x%08x\n", reg, cgu_msg.data); - return -EIO; - } - - *val = cgu_msg.data; - - return 0; -} - -/** - * ice_cgu_reg_write - Write a CGU register with lock parameter - * @pf: Board private structure - * @reg: Register to write to - * @val: Value to write - */ -int ice_cgu_reg_write(struct ice_pf *pf, u32 reg, u32 val) -{ - struct ice_sbq_msg_input cgu_msg; - int status; - - cgu_msg.opcode = ice_sbq_msg_wr; - cgu_msg.dest_dev = cgu; - cgu_msg.msg_addr_low = reg; - cgu_msg.msg_addr_high = 0x0; - cgu_msg.data = val; - - dev_dbg(ice_pf_to_dev(pf), "addr 0x%04x, val 0x%08x\n", reg, val); - - status = ice_sbq_rw_reg_lp(&pf->hw, &cgu_msg, true); - if (status) - return -EIO; - - return 0; -} - -/** - * ice_cgu_set_gnd - Ground the refclk - * @pf: Board private structure - * @enable: True to ground the refclk - */ -int ice_cgu_set_gnd(struct ice_pf *pf, bool enable) -{ - int status = 0; - union nac_cgu_dword10 dw10; - int i; - - status = ice_cgu_reg_read(pf, NAC_CGU_DWORD10, &dw10.val); - if (status) - goto err; - - if (enable) - dw10.field.synce_sel_gnd = 1; - else - dw10.field.synce_sel_gnd = 0; - - status = ice_cgu_reg_write(pf, NAC_CGU_DWORD10, dw10.val); - if (status) - goto err; - - for (i = 0; i < 3; i++) - status = ice_cgu_reg_read(pf, NAC_CGU_DWORD10, &dw10.val); - if (status) - goto err; - -err: - return status; -} - -/** - * ice_cgu_set_byp - Set the DPLL bypass - * @pf: Board private structure - * @enable: True to enable bypass - */ -int ice_cgu_set_byp(struct ice_pf *pf, bool enable) -{ - union nac_cgu_dword12 dw12; - int status = 0; - - status = ice_cgu_reg_read(pf, NAC_CGU_DWORD12, &dw12.val); - if (status) - goto err; - - if (enable) - dw12.field.synce_dpll_byp = 1; - else - dw12.field.synce_dpll_byp = 0; - - status = ice_cgu_reg_write(pf, NAC_CGU_DWORD12, dw12.val); - if (status) - goto err; - -err: - return status; -} - -/** - * ice_cgu_set_holdover_lock_irq - Set holdover/lock interrupt - * @pf: Board private structure - * @enable: True to enable the lock - */ -int ice_cgu_set_holdover_lock_irq(struct ice_pf *pf, bool enable) -{ - union nac_cgu_dword13 dw13; - int status; - - status = ice_cgu_reg_read(pf, NAC_CGU_DWORD13, &dw13.val); - if (status) - goto err; - - /* the *_int_enb bits are defined opposite of what one would expect. - * 0 = enabled, 1 = disabled - */ - if (enable) { - dw13.field.synce_hdov_int_enb = 0; - dw13.field.synce_lock_int_enb = 0; - } else { - dw13.field.synce_hdov_int_enb = 1; - dw13.field.synce_lock_int_enb = 1; - } - - status = ice_cgu_reg_write(pf, NAC_CGU_DWORD13, dw13.val); - if (status) - goto err; - -err: - return status; -} - -/** - * ice_cgu_mux_sel_set_reg - Write to selected mux register - * @pf: Board private structure - * @mux_sel: Target mux - * @val: Value to write to - */ -int ice_cgu_mux_sel_set_reg(struct ice_pf *pf, enum ice_cgu_mux_sel mux_sel, u32 val) -{ - union nac_cgu_dword10 dw10; - union nac_cgu_dword11 dw11; - int status; - - switch (mux_sel) { - case ICE_CGU_MUX_SEL_REF_CLK: - status = ice_cgu_reg_read(pf, NAC_CGU_DWORD10, &dw10.val); - if (status) - goto err; - dw10.field.synce_s_ref_clk = val; - status = ice_cgu_reg_write(pf, NAC_CGU_DWORD10, dw10.val); - if (status) - goto err; - break; - - case ICE_CGU_MUX_SEL_BYPASS_CLK: - status = ice_cgu_reg_read(pf, NAC_CGU_DWORD11, &dw11.val); - if (status) - goto err; - dw11.field.synce_s_byp_clk = val; - status = ice_cgu_reg_write(pf, NAC_CGU_DWORD11, dw11.val); - if (status) - goto err; - break; - - case ICE_CGU_MUX_SEL_ETHCLKO: - status = ice_cgu_reg_read(pf, NAC_CGU_DWORD10, &dw10.val); - if (status) - goto err; - dw10.field.synce_ethclko_sel = val; - status = ice_cgu_reg_write(pf, NAC_CGU_DWORD10, dw10.val); - if (status) - goto err; - break; - - case ICE_CGU_MUX_SEL_CLKO: - status = ice_cgu_reg_read(pf, NAC_CGU_DWORD10, &dw10.val); - if (status) - goto err; - dw10.field.synce_clko_sel = val; - status = ice_cgu_reg_write(pf, NAC_CGU_DWORD10, dw10.val); - if (status) - goto err; - break; - - default: - dev_err(ice_pf_to_dev(pf), "internal error -- invalid mux!\n"); - return -EIO; - } - -err: - return status; -} - -/** - * ice_cgu_dck_rst_assert_release - Assert the dck reset - * @pf: Board private structure - * @assert: True to assert, false to release - */ -int ice_cgu_dck_rst_assert_release(struct ice_pf *pf, bool assert) -{ - union nac_cgu_dword10 dw10; - int status = 0; - int i; - - status = ice_cgu_reg_read(pf, NAC_CGU_DWORD10, &dw10.val); - if (status) - goto err; - - if (assert) - dw10.field.synce_dck_rst = 1; - else - dw10.field.synce_dck_rst = 0; - - status = ice_cgu_reg_write(pf, NAC_CGU_DWORD10, dw10.val); - if (status) - goto err; - - for (i = 0; i < 3; i++) - status = ice_cgu_reg_read(pf, NAC_CGU_DWORD10, &dw10.val); - if (status) - goto err; - -err: - return status; -} - -/** - * ice_cgu_dck2_rst_assert_release - Assert the dck2 reset - * @pf: Board private structure - * @assert: True to assert, false to release - */ -int ice_cgu_dck2_rst_assert_release(struct ice_pf *pf, bool assert) -{ - union nac_cgu_dword10 dw10; - int status = 0; - int i; - - status = ice_cgu_reg_read(pf, NAC_CGU_DWORD10, &dw10.val); - if (status) - goto err; - - if (assert) - dw10.field.synce_dck2_rst = 1; - else - dw10.field.synce_dck2_rst = 0; - - status = ice_cgu_reg_write(pf, NAC_CGU_DWORD10, dw10.val); - if (status) - goto err; - - for (i = 0; i < 3; i++) - status = ice_cgu_reg_read(pf, NAC_CGU_DWORD10, &dw10.val); - if (status) - goto err; - -err: - return status; -} - -/** - * ice_cgu_mck_rst_assert_release - Assert the mck reset - * @pf: Board private structure - * @assert: True to assert, false to release - */ -int ice_cgu_mck_rst_assert_release(struct ice_pf *pf, bool assert) -{ - union nac_cgu_dword12 dw12; - int status = 0; - int i; - - status = ice_cgu_reg_read(pf, NAC_CGU_DWORD12, &dw12.val); - if (status) - goto err; - - if (assert) - dw12.field.synce_mck_rst = 1; - else - dw12.field.synce_mck_rst = 0; - - status = ice_cgu_reg_write(pf, NAC_CGU_DWORD12, dw12.val); - if (status) - goto err; - - for (i = 0; i < 3; i++) - status = ice_cgu_reg_read(pf, NAC_CGU_DWORD12, &dw12.val); - if (status) - goto err; - -err: - return status; -} - -/** - * ice_cgu_usleep - Sleep for a specified period of time - * @usec: Time to sleep in microseconds - */ -void ice_cgu_usleep(u64 usec) -{ - if (usec <= 10) { - udelay(usec); - } else if (usec <= 20000) { - usleep_range(usec, usec + 10); - } else { - int msec; - - msec = (usec + 999) / 1000; - msleep_interruptible(msec); - } -} - -/** - * ice_cgu_poll - Poll the specified CGU register for the specified value - * @pf: Board private structure - * @offset: Offset of the register - * @mask: Bitmask for testing the value - * @value: Value to poll for - * @delay_time: Delay between the register reads - * @delay_loops: Number of read loops - */ -int ice_cgu_poll(struct ice_pf *pf, u64 offset, u32 mask, u32 value, u32 delay_time, - u32 delay_loops) -{ - int status; - u32 reg, i; - - for (i = 0; i < delay_loops; i++) { - status = ice_cgu_reg_read(pf, offset, ®); - if (status) - goto err; - - if ((reg & mask) == value) - return 0; - - /* delay for a bit */ - ice_cgu_usleep(delay_time); - } - - return -EBUSY; - -err: - return status; -} - -/** - * ice_cgu_npoll - Poll the specified CGU register for the specified value occurring n times - * @pf: Board private structure - * @offset: Offset of the register - * @mask: Bitmask for testing the value - * @value: Value to poll for - * @delay_time: Delay between the register reads - * @delay_loops: Number of read loops - * @poll_count: Number of the value matches to poll for - * @count_delay_time: Additional delay after the value match - */ -int ice_cgu_npoll(struct ice_pf *pf, u32 offset, u32 mask, u32 value, u32 delay_time, - u32 delay_loops, u32 poll_count, u32 count_delay_time) -{ - u32 reg, i, my_count = 0, complete = 0; - int status; - - for (i = 0; i < delay_loops; i++) { - status = ice_cgu_reg_read(pf, offset, ®); - if (status) - goto err; - - dev_dbg(ice_pf_to_dev(pf), "count=%u, reg=%08x\n", my_count, reg); - - if ((reg & mask) == value) { - my_count++; - if (my_count < poll_count) { - ice_cgu_usleep(count_delay_time); - } else { - complete = 1; - break; - } - } else { - my_count = 0; - ice_cgu_usleep(delay_time); - } - } - - if (complete) - return 0; - else - return -EBUSY; - -err: - return status; -} - -struct ice_cgu_dpll_params dpll_params_table[ICE_NUM_DPLL_PARAMS] = { - /* {dpll select, sample rate, mul_rat_m1, scale, gain} */ - { ICE_CGU_DPLL_SELECT_TRANSPORT, ICE_CGU_SAMPLE_RATE_8K, 3124, 16, 42 }, - { ICE_CGU_DPLL_SELECT_EEC_RELAXED_BW, ICE_CGU_SAMPLE_RATE_8K, 3124, 7, 3 }, - { ICE_CGU_DPLL_SELECT_TRANSPORT, ICE_CGU_SAMPLE_RATE_10K, 2499, 20, 66 }, - { ICE_CGU_DPLL_SELECT_EEC_RELAXED_BW, ICE_CGU_SAMPLE_RATE_10K, 2499, 8, 4 }, - { ICE_CGU_DPLL_SELECT_TRANSPORT, ICE_CGU_SAMPLE_RATE_12K5, 1999, 25, 103 }, - { ICE_CGU_DPLL_SELECT_EEC_RELAXED_BW, ICE_CGU_SAMPLE_RATE_12K5, 1999, 10, 6 } -}; - -struct ice_cgu_dpll_per_rate_params dpll_per_rate_params[NUM_ICE_TIME_REF_FREQ] = { - /* {rate_hz, sample_rate, div_rat_m1, synce_rat_sel} */ - { 25000000, ICE_CGU_SAMPLE_RATE_10K, 2499, 0 }, /* 25 MHz */ - { 122880000, ICE_CGU_SAMPLE_RATE_8K, 3071, 1 }, /* 122.88 MHz */ - { 125000000, ICE_CGU_SAMPLE_RATE_10K, 2499, 1 }, /* 125 MHz */ - { 153600000, ICE_CGU_SAMPLE_RATE_10K, 3071, 1 }, /* 153.6 MHz */ - { 156250000, ICE_CGU_SAMPLE_RATE_10K, 3124, 1 }, /* 156.25 MHz */ -}; - -struct ice_cgu_lcpll_per_rate_params tspll_per_rate_params[NUM_ICE_TIME_REF_FREQ] = { - /* {refclk_pre_div, feedback_div, frac_n_div, post_pll_div} */ - { 1, 197, 2621440, 6 }, /* 25 MHz */ - { 5, 223, 524288, 7 }, /* 122.88 MHz */ - { 5, 223, 524288, 7 }, /* 125 MHz */ - { 5, 159, 1572864, 6 }, /* 153.6 MHz */ - { 5, 159, 1572864, 6 }, /* 156.25 MHz */ - { 10, 223, 524288, 7 }, /* 245.76 MHz */ -}; - -struct ice_cgu_lcpll_per_rate_params japll_per_rate_params[NUM_ICE_CGU_JAPLL_REF_FREQ] = { - /* {refclk_pre_div, feedback_div, frac_n_div, post_pll_div} */ - { 1, 150, 0, 6 }, /* 25 MHz */ - { 1, 120, 0, 6 }, /* 156.25 MHz */ -}; diff --git a/drivers/thirdparty/ice/ice_cgu_util.h b/drivers/thirdparty/ice/ice_cgu_util.h deleted file mode 100644 index 6a1566324cb1..000000000000 --- a/drivers/thirdparty/ice/ice_cgu_util.h +++ /dev/null @@ -1,46 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (C) 2018-2021, Intel Corporation. */ - -#ifndef _ICE_CGU_UTIL_H_ -#define _ICE_CGU_UTIL_H_ - -/* offset of last valid CGU register */ -#define ICE_CGU_MAX_REG_OFFS 0x47c - -int ice_cgu_reg_read(struct ice_pf *pf, u32 reg, u32 *val); - -int ice_cgu_reg_write(struct ice_pf *pf, u32 reg, u32 val); - -int ice_cgu_set_gnd(struct ice_pf *pf, bool enable); - -int ice_cgu_set_byp(struct ice_pf *pf, bool enable); - -int ice_cgu_set_holdover_lock_irq(struct ice_pf *pf, bool enable); - -int ice_cgu_mux_sel_set_reg(struct ice_pf *pf, enum ice_cgu_mux_sel mux_sel, u32 val); - -int ice_cgu_dck_rst_assert_release(struct ice_pf *pf, bool assert); - -int ice_cgu_dck2_rst_assert_release(struct ice_pf *pf, bool assert); - -int ice_cgu_mck_rst_assert_release(struct ice_pf *pf, bool assert); - -void ice_cgu_usleep(u64 usec); - -int ice_cgu_poll(struct ice_pf *pf, u64 offset, u32 mask, u32 value, u32 delay_time, - u32 delay_loops); - -int ice_cgu_npoll(struct ice_pf *pf, u32 offset, u32 mask, u32 value, u32 delay_time, - u32 delay_loops, u32 poll_count, u32 count_delay_time); - -#define ICE_NUM_DPLL_PARAMS (NUM_ICE_CGU_SAMPLE_RATE * NUM_ICE_CGU_DPLL_SELECT) - -extern struct ice_cgu_dpll_params dpll_params_table[ICE_NUM_DPLL_PARAMS]; - -extern struct ice_cgu_dpll_per_rate_params dpll_per_rate_params[NUM_ICE_TIME_REF_FREQ]; - -extern struct ice_cgu_lcpll_per_rate_params tspll_per_rate_params[NUM_ICE_TIME_REF_FREQ]; - -extern struct ice_cgu_lcpll_per_rate_params japll_per_rate_params[NUM_ICE_CGU_JAPLL_REF_FREQ]; - -#endif /* _ICE_CGU_UTIL_H_ */ diff --git a/drivers/thirdparty/ice/ice_common.c b/drivers/thirdparty/ice/ice_common.c index 32335405e484..83d8209af163 100644 --- a/drivers/thirdparty/ice/ice_common.c +++ b/drivers/thirdparty/ice/ice_common.c @@ -11,6 +11,111 @@ #define ICE_PF_RESET_WAIT_COUNT 300 #define ICE_SCHED_VALID_SEC_BITS 4 +static const char * const ice_link_mode_str_low[] = { + [0] = "100BASE_TX", + [1] = "100M_SGMII", + [2] = "1000BASE_T", + [3] = "1000BASE_SX", + [4] = "1000BASE_LX", + [5] = "1000BASE_KX", + [6] = "1G_SGMII", + [7] = "2500BASE_T", + [8] = "2500BASE_X", + [9] = "2500BASE_KX", + [10] = "5GBASE_T", + [11] = "5GBASE_KR", + [12] = "10GBASE_T", + [13] = "10G_SFI_DA", + [14] = "10GBASE_SR", + [15] = "10GBASE_LR", + [16] = "10GBASE_KR_CR1", + [17] = "10G_SFI_AOC_ACC", + [18] = "10G_SFI_C2C", + [19] = "25GBASE_T", + [20] = "25GBASE_CR", + [21] = "25GBASE_CR_S", + [22] = "25GBASE_CR1", + [23] = "25GBASE_SR", + [24] = "25GBASE_LR", + [25] = "25GBASE_KR", + [26] = "25GBASE_KR_S", + [27] = "25GBASE_KR1", + [28] = "25G_AUI_AOC_ACC", + [29] = "25G_AUI_C2C", + [30] = "40GBASE_CR4", + [31] = "40GBASE_SR4", + [32] = "40GBASE_LR4", + [33] = "40GBASE_KR4", + [34] = "40G_XLAUI_AOC_ACC", + [35] = "40G_XLAUI", + [36] = "50GBASE_CR2", + [37] = "50GBASE_SR2", + [38] = "50GBASE_LR2", + [39] = "50GBASE_KR2", + [40] = "50G_LAUI2_AOC_ACC", + [41] = "50G_LAUI2", + [42] = "50G_AUI2_AOC_ACC", + [43] = "50G_AUI2", + [44] = "50GBASE_CP", + [45] = "50GBASE_SR", + [46] = "50GBASE_FR", + [47] = "50GBASE_LR", + [48] = "50GBASE_KR_PAM4", + [49] = "50G_AUI1_AOC_ACC", + [50] = "50G_AUI1", + [51] = "100GBASE_CR4", + [52] = "100GBASE_SR4", + [53] = "100GBASE_LR4", + [54] = "100GBASE_KR4", + [55] = "100G_CAUI4_AOC_ACC", + [56] = "100G_CAUI4", + [57] = "100G_AUI4_AOC_ACC", + [58] = "100G_AUI4", + [59] = "100GBASE_CR_PAM4", + [60] = "100GBASE_KR_PAM4", + [61] = "100GBASE_CP2", + [62] = "100GBASE_SR2", + [63] = "100GBASE_DR", +}; + +static const char * const ice_link_mode_str_high[] = { + [0] = "100GBASE_KR2_PAM4", + [1] = "100G_CAUI2_AOC_ACC", + [2] = "100G_CAUI2", + [3] = "100G_AUI2_AOC_ACC", + [4] = "100G_AUI2", +}; + +/** + * ice_dump_phy_type - helper function to dump phy_type + * @hw: pointer to the HW structure + * @low: 64 bit value for phy_type_low + * @high: 64 bit value for phy_type_high + * @prefix: prefix string to differentiate multiple dumps + */ +static void +ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix) +{ + u32 i; + + ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, + (unsigned long long)low); + + for (i = 0; i < ARRAY_SIZE(ice_link_mode_str_low); i++) { + if (low & BIT_ULL(i)) + ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", + prefix, i, ice_link_mode_str_low[i]); + } + + ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, + (unsigned long long)high); + + for (i = 0; i < ARRAY_SIZE(ice_link_mode_str_high); i++) { + if (high & BIT_ULL(i)) + ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", + prefix, i, ice_link_mode_str_high[i]); + } +} /** * ice_set_mac_type - Sets MAC type @@ -19,10 +124,10 @@ * This function sets the MAC type of the adapter based on the * vendor ID and device ID stored in the HW structure. */ -static enum ice_status ice_set_mac_type(struct ice_hw *hw) +static int ice_set_mac_type(struct ice_hw *hw) { if (hw->vendor_id != PCI_VENDOR_ID_INTEL) - return ICE_ERR_DEVICE_NOT_SUPPORTED; + return -ENODEV; switch (hw->device_id) { case ICE_DEV_ID_E810C_BACKPLANE: @@ -54,6 +159,12 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw) case ICE_DEV_ID_E823C_SGMII: hw->mac_type = ICE_MAC_GENERIC; break; + case ICE_DEV_ID_E825C_BACKPLANE: + case ICE_DEV_ID_E825C_QSFP: + case ICE_DEV_ID_E825C_SFP: + case ICE_DEV_ID_E825C_SGMII: + hw->mac_type = ICE_MAC_GENERIC; + break; default: hw->mac_type = ICE_MAC_UNKNOWN; break; @@ -71,7 +182,8 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw) */ bool ice_is_generic_mac(struct ice_hw *hw) { - return hw->mac_type == ICE_MAC_GENERIC; + return (hw->mac_type == ICE_MAC_GENERIC || + hw->mac_type == ICE_MAC_GENERIC_3K); } /** @@ -85,6 +197,65 @@ bool ice_is_e810(struct ice_hw *hw) return hw->mac_type == ICE_MAC_E810; } +/** + * ice_is_e810t + * @hw: pointer to the hardware structure + * + * returns true if the device is E810T based, false if not. + */ +bool ice_is_e810t(struct ice_hw *hw) +{ + switch (hw->device_id) { + case ICE_DEV_ID_E810C_SFP: + switch (hw->subsystem_device_id) { + case ICE_SUBDEV_ID_E810T: + case ICE_SUBDEV_ID_E810T2: + case ICE_SUBDEV_ID_E810T3: + case ICE_SUBDEV_ID_E810T4: + case ICE_SUBDEV_ID_E810T5: + case ICE_SUBDEV_ID_E810T7: + return true; + } + break; + case ICE_DEV_ID_E810C_QSFP: + switch (hw->subsystem_device_id) { + case ICE_SUBDEV_ID_E810T2: + case ICE_SUBDEV_ID_E810T5: + case ICE_SUBDEV_ID_E810T6: + return true; + } + break; + default: + break; + } + + return false; +} + +/** + * ice_is_e823 + * @hw: pointer to the hardware structure + * + * returns true if the device is E823-L or E823-C based, false if not. + */ +bool ice_is_e823(struct ice_hw *hw) +{ + switch (hw->device_id) { + case ICE_DEV_ID_E823L_BACKPLANE: + case ICE_DEV_ID_E823L_SFP: + case ICE_DEV_ID_E823L_10G_BASE_T: + case ICE_DEV_ID_E823L_1GBE: + case ICE_DEV_ID_E823L_QSFP: + case ICE_DEV_ID_E823C_BACKPLANE: + case ICE_DEV_ID_E823C_QSFP: + case ICE_DEV_ID_E823C_SFP: + case ICE_DEV_ID_E823C_10G_BASE_T: + case ICE_DEV_ID_E823C_SGMII: + return true; + default: + return false; + } +} /** * ice_clear_pf_cfg - Clear PF configuration @@ -93,7 +264,7 @@ bool ice_is_e810(struct ice_hw *hw) * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port * configuration, flow director filters, etc.). */ -enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) +int ice_clear_pf_cfg(struct ice_hw *hw) { struct ice_aq_desc desc; @@ -117,21 +288,21 @@ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) * ice_discover_dev_caps is expected to be called before this function is * called. */ -static enum ice_status +static int ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_manage_mac_read_resp *resp; struct ice_aqc_manage_mac_read *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; u16 flags; u8 i; cmd = &desc.params.mac_read; if (buf_size < sizeof(*resp)) - return ICE_ERR_BUF_TOO_SHORT; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); @@ -144,7 +315,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); - return ICE_ERR_CFG; + return -EIO; } /* A single port can report up to two (LAN and WoL) addresses */ @@ -169,7 +340,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, * * Returns the various PHY capabilities supported on the Port (0x0600) */ -enum ice_status +int ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps_data *pcaps, struct ice_sq_cd *cd) @@ -177,18 +348,19 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps *cmd; u16 pcaps_size = sizeof(*pcaps); struct ice_aq_desc desc; - enum ice_status status; + const char *prefix; struct ice_hw *hw; + int status; cmd = &desc.params.get_phy; if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; if (report_mode == ICE_AQC_REPORT_DFLT_CFG && !ice_fw_supports_report_dflt_cfg(hw)) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); @@ -196,31 +368,51 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); cmd->param0 |= cpu_to_le16(report_mode); + status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); - ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n", - report_mode); - ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", - (unsigned long long)le64_to_cpu(pcaps->phy_type_low)); - ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", - (unsigned long long)le64_to_cpu(pcaps->phy_type_high)); - ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps); - ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", + ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n"); + + switch (report_mode) { + case ICE_AQC_REPORT_TOPO_CAP_MEDIA: + prefix = "phy_caps_media"; + break; + case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA: + prefix = "phy_caps_no_media"; + break; + case ICE_AQC_REPORT_ACTIVE_CFG: + prefix = "phy_caps_active"; + break; + case ICE_AQC_REPORT_DFLT_CFG: + prefix = "phy_caps_default"; + break; + default: + prefix = "phy_caps_invalid"; + } + + ice_dump_phy_type(hw, le64_to_cpu(pcaps->phy_type_low), + le64_to_cpu(pcaps->phy_type_high), prefix); + + ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n", + prefix, report_mode); + ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps); + ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix, pcaps->low_power_ctrl_an); - ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap); - ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", + ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix, + pcaps->eee_cap); + ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix, pcaps->eeer_value); - ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n", + ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix, pcaps->link_fec_options); - ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n", - pcaps->module_compliance_enforcement); - ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n", - pcaps->extended_compliance_code); - ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n", + ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n", + prefix, pcaps->module_compliance_enforcement); + ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n", + prefix, pcaps->extended_compliance_code); + ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix, pcaps->module_type[0]); - ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n", + ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix, pcaps->module_type[1]); - ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n", + ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix, pcaps->module_type[2]); if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { @@ -234,20 +426,113 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, } /** - * ice_aq_get_link_topo_handle - get link topology node return status - * @pi: port information structure - * @node_type: requested node type - * @cd: pointer to command details structure or NULL - * - * Get link topology node return status for specified node type (0x06E0) - * - * Node type cage can be used to determine if cage is present. If AQC - * returns error (ENOENT), then no cage present. If no cage present, then - * connection type is backplane or BASE-T. + * ice_aq_get_netlist_node_pin + * @hw: pointer to the hw struct + * @cmd: get_link_topo_pin AQ structure + * @node_handle: output node handle parameter if node found */ -static enum ice_status -ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, - struct ice_sq_cd *cd) +int +ice_aq_get_netlist_node_pin(struct ice_hw *hw, + struct ice_aqc_get_link_topo_pin *cmd, + u16 *node_handle) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo_pin); + desc.params.get_link_topo_pin = *cmd; + + if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL)) + return -EOPNOTSUPP; + + if (node_handle) + *node_handle = + le16_to_cpu(desc.params.get_link_topo_pin.addr.handle); + + return 0; +} + +/** + * ice_aq_get_netlist_node + * @hw: pointer to the hw struct + * @cmd: get_link_topo AQ structure + * @node_part_number: output node part number if node found + * @node_handle: output node handle parameter if node found + */ +int +ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, + u8 *node_part_number, u16 *node_handle) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); + desc.params.get_link_topo = *cmd; + + if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL)) + return -EOPNOTSUPP; + + if (node_handle) + *node_handle = + le16_to_cpu(desc.params.get_link_topo.addr.handle); + if (node_part_number) + *node_part_number = desc.params.get_link_topo.node_part_num; + + return 0; +} + +#define MAX_NETLIST_SIZE 10 +/** + * ice_find_netlist_node + * @hw: pointer to the hw struct + * @node_type_ctx: type of netlist node to look for + * @node_part_number: node part number to look for + * @node_handle: output parameter if node found - optional + * + * Find and return the node handle for a given node type and part number in the + * netlist. When found 0 is returned, -ENOENT otherwise. If + * node_handle provided, it would be set to found node handle. + */ +int +ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number, + u16 *node_handle) +{ + struct ice_aqc_get_link_topo cmd; + u8 rec_node_part_number; + u16 rec_node_handle; + u8 idx; + + for (idx = 0; idx < MAX_NETLIST_SIZE; idx++) { + int status; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.addr.topo_params.node_type_ctx = + (node_type_ctx << ICE_AQC_LINK_TOPO_NODE_TYPE_S); + cmd.addr.topo_params.index = idx; + + status = ice_aq_get_netlist_node(hw, &cmd, + &rec_node_part_number, + &rec_node_handle); + if (status) + return status; + + if (rec_node_part_number == node_part_number) { + if (node_handle) + *node_handle = rec_node_handle; + return 0; + } + } + + return -ENOENT; +} + +/** + * ice_is_media_cage_present + * @pi: port information structure + * + * Returns true if media cage is present, else false. If no cage, then + * media type is backplane or BASE-T. + */ +static bool ice_is_media_cage_present(struct ice_port_info *pi) { struct ice_aqc_get_link_topo *cmd; struct ice_aq_desc desc; @@ -262,27 +547,14 @@ ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, /* set node type */ cmd->addr.topo_params.node_type_ctx |= - (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); + (ICE_AQC_LINK_TOPO_NODE_TYPE_M & + ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE); - return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); -} - -/** - * ice_is_media_cage_present - * @pi: port information structure - * - * Returns true if media cage is present, else false. If no cage, then - * media type is backplane or BASE-T. - */ -static bool ice_is_media_cage_present(struct ice_port_info *pi) -{ /* Node type cage can be used to determine if cage is present. If AQC * returns error (ENOENT), then no cage present. If no cage present then * connection type is backplane or BASE-T. */ - return !ice_aq_get_link_topo_handle(pi, - ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE, - NULL); + return ice_aq_get_netlist_node(pi->hw, cmd, NULL, NULL); } /** @@ -319,7 +591,6 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) case ICE_PHY_TYPE_LOW_1000BASE_LX: case ICE_PHY_TYPE_LOW_10GBASE_SR: case ICE_PHY_TYPE_LOW_10GBASE_LR: - case ICE_PHY_TYPE_LOW_10G_SFI_C2C: case ICE_PHY_TYPE_LOW_25GBASE_SR: case ICE_PHY_TYPE_LOW_25GBASE_LR: case ICE_PHY_TYPE_LOW_40GBASE_SR4: @@ -370,12 +641,13 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) case ICE_PHY_TYPE_LOW_100G_CAUI4: if (ice_is_media_cage_present(pi)) return ICE_MEDIA_AUI; - /* fall-through */ + fallthrough; case ICE_PHY_TYPE_LOW_1000BASE_KX: case ICE_PHY_TYPE_LOW_2500BASE_KX: case ICE_PHY_TYPE_LOW_2500BASE_X: case ICE_PHY_TYPE_LOW_5GBASE_KR: case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: + case ICE_PHY_TYPE_LOW_10G_SFI_C2C: case ICE_PHY_TYPE_LOW_25GBASE_KR: case ICE_PHY_TYPE_LOW_25GBASE_KR1: case ICE_PHY_TYPE_LOW_25GBASE_KR_S: @@ -392,7 +664,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) case ICE_PHY_TYPE_HIGH_100G_CAUI2: if (ice_is_media_cage_present(pi)) return ICE_MEDIA_AUI; - /* fall-through */ + fallthrough; case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: return ICE_MEDIA_BACKPLANE; case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: @@ -403,6 +675,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) return ICE_MEDIA_UNKNOWN; } +#define ice_get_link_status_datalen(hw) ICE_GET_LINK_STATUS_DATALEN_V1 /** * ice_aq_get_link_info @@ -413,7 +686,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) * * Get Link Status (0x607). Returns the link status of the adapter. */ -enum ice_status +int ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, struct ice_link_status *link, struct ice_sq_cd *cd) { @@ -424,15 +697,14 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, struct ice_fc_info *hw_fc_info; bool tx_pause, rx_pause; struct ice_aq_desc desc; - enum ice_status status; struct ice_hw *hw; u16 cmd_flags; + int status; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; - li_old = &pi->phy.link_info_old; hw_media_type = &pi->phy.media_type; li = &pi->phy.link_info; @@ -444,8 +716,8 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, resp->cmd_flags = cpu_to_le16(cmd_flags); resp->lport_num = pi->lport; - status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd); - + status = ice_aq_send_cmd(hw, &desc, &link_data, + ice_get_link_status_datalen(hw), cd); if (status) return status; @@ -527,7 +799,7 @@ ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, * LFC. Thus, we will use index = * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. * - * Also, because we are opearating on transmit timer and fc + * Also, because we are operating on transmit timer and fc * threshold of LFC, we don't turn on any bit in tx_tmr_priority */ #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX @@ -549,12 +821,14 @@ ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, * ice_aq_set_mac_cfg * @hw: pointer to the HW struct * @max_frame_size: Maximum Frame Size to be supported + * @auto_drop: Tell HW to drop packets if TC queue is blocked * @cd: pointer to command details structure or NULL * * Set MAC configuration (0x0603) */ -enum ice_status -ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) +int +ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, bool auto_drop, + struct ice_sq_cd *cd) { struct ice_aqc_set_mac_cfg *cmd; struct ice_aq_desc desc; @@ -562,12 +836,14 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) cmd = &desc.params.set_mac_cfg; if (max_frame_size == 0) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); cmd->max_frame_size = cpu_to_le16(max_frame_size); + if (ice_is_fw_auto_drop_supported(hw) && auto_drop) + cmd->drop_opts |= ICE_AQ_SET_MAC_AUTO_DROP_BLOCKING_PKTS; ice_fill_tx_timer_and_fc_thresh(hw, cmd); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); @@ -577,10 +853,10 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) * ice_init_fltr_mgmt_struct - initializes filter management list and locks * @hw: pointer to the HW struct */ -static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) +int ice_init_fltr_mgmt_struct(struct ice_hw *hw) { struct ice_switch_info *sw; - enum ice_status status; + int status; hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*hw->switch_info), GFP_KERNEL); @@ -588,7 +864,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) sw = hw->switch_info; if (!sw) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; INIT_LIST_HEAD(&sw->vsi_list_map_head); sw->prof_res_bm_init = 0; @@ -668,12 +944,11 @@ ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw) * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks * @hw: pointer to the HW struct */ -static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) +void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) { ice_cleanup_fltr_mgmt_single(hw, hw->switch_info); } - /** * ice_get_itr_intrl_gran * @hw: pointer to the HW struct @@ -722,7 +997,6 @@ void ice_print_rollback_msg(struct ice_hw *hw) nvm_str, hw->fw_maj_ver, hw->fw_min_ver); } - /** * ice_set_umac_shared * @hw: pointer to the hw struct @@ -738,12 +1012,12 @@ void ice_set_umac_shared(struct ice_hw *hw) * ice_init_hw - main hardware initialization routine * @hw: pointer to the hardware structure */ -enum ice_status ice_init_hw(struct ice_hw *hw) +int ice_init_hw(struct ice_hw *hw) { struct ice_aqc_get_phy_caps_data *pcaps; - enum ice_status status; u16 mac_buf_len; void *mac_buf; + int status; /* Set MAC type based on DeviceID */ status = ice_set_mac_type(hw); @@ -754,13 +1028,11 @@ enum ice_status ice_init_hw(struct ice_hw *hw) PF_FUNC_RID_FUNCTION_NUMBER_M) >> PF_FUNC_RID_FUNCTION_NUMBER_S; - status = ice_reset(hw, ICE_RESET_PFR); if (status) return status; ice_get_itr_intrl_gran(hw); - status = ice_create_all_ctrlq(hw); if (status) goto err_unroll_cqinit; @@ -808,7 +1080,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*hw->port_info), GFP_KERNEL); if (!hw->port_info) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_unroll_cqinit; } @@ -835,7 +1107,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) goto err_unroll_sched; pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); if (!pcaps) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_unroll_sched; } @@ -855,7 +1127,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) /* need a valid SW entry point to build a Tx tree */ if (!hw->sw_entry_point_layer) { ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); - status = ICE_ERR_CFG; + status = -EIO; goto err_unroll_sched; } INIT_LIST_HEAD(&hw->agg_list); @@ -866,7 +1138,6 @@ enum ice_status ice_init_hw(struct ice_hw *hw) if (status) goto err_unroll_sched; - /* Get MAC information */ /* A single port can report up to two (LAN and WoL) addresses */ @@ -876,7 +1147,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); if (!mac_buf) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_unroll_fltr_mgmt_struct; } @@ -885,10 +1156,18 @@ enum ice_status ice_init_hw(struct ice_hw *hw) if (status) goto err_unroll_fltr_mgmt_struct; + + /* enable jumbo frame support at MAC level */ + status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, false, + NULL); + if (status) + goto err_unroll_fltr_mgmt_struct; + /* Obtain counter base index which would be used by flow director */ status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base); if (status) goto err_unroll_fltr_mgmt_struct; + status = ice_init_hw_tbls(hw); if (status) goto err_unroll_fltr_mgmt_struct; @@ -942,7 +1221,7 @@ void ice_deinit_hw(struct ice_hw *hw) * ice_check_reset - Check to see if a global reset is complete * @hw: pointer to the hardware structure */ -enum ice_status ice_check_reset(struct ice_hw *hw) +int ice_check_reset(struct ice_hw *hw) { u32 cnt, reg = 0, grst_timeout, uld_mask; @@ -962,7 +1241,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw) if (cnt == grst_timeout) { ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); - return ICE_ERR_RESET_FAILED; + return -EIO; } #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ @@ -989,7 +1268,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw) if (cnt == ICE_PF_RESET_WAIT_COUNT) { ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", reg); - return ICE_ERR_RESET_FAILED; + return -EIO; } return 0; @@ -1002,7 +1281,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw) * If a global reset has been triggered, this function checks * for its completion and then issues the PF reset */ -static enum ice_status ice_pf_reset(struct ice_hw *hw) +static int ice_pf_reset(struct ice_hw *hw) { u32 cnt, reg; @@ -1015,7 +1294,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw) (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { /* poll on global reset currently in progress until done */ if (ice_check_reset(hw)) - return ICE_ERR_RESET_FAILED; + return -EIO; return 0; } @@ -1030,7 +1309,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw) * that is occurring during a download package operation. */ for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT + - ICE_PF_RESET_WAIT_COUNT; cnt++) { + ICE_PF_RESET_WAIT_COUNT; cnt++) { reg = rd32(hw, PFGEN_CTRL); if (!(reg & PFGEN_CTRL_PFSWR_M)) break; @@ -1040,7 +1319,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw) if (cnt == ICE_PF_RESET_WAIT_COUNT) { ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); - return ICE_ERR_RESET_FAILED; + return -EIO; } return 0; @@ -1058,7 +1337,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw) * This has to be cleared using ice_clear_pxe_mode again, once the AQ * interface has been restored in the rebuild flow. */ -enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) +int ice_reset(struct ice_hw *hw, enum ice_reset_req req) { u32 val = 0; @@ -1074,19 +1353,17 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) val = GLGEN_RTRIG_GLOBR_M; break; default: - return ICE_ERR_PARAM; + return -EINVAL; } val |= rd32(hw, GLGEN_RTRIG); wr32(hw, GLGEN_RTRIG, val); ice_flush(hw); - /* wait for the FW to be ready */ return ice_check_reset(hw); } - /** * ice_copy_rxq_ctx_to_hw * @hw: pointer to the hardware structure @@ -1095,16 +1372,16 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) * * Copies rxq context from dense structure to HW register space */ -static enum ice_status +static int ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) { u8 i; if (!ice_rxq_ctx) - return ICE_ERR_BAD_PTR; + return -EINVAL; if (rxq_index > QRX_CTRL_MAX_INDEX) - return ICE_ERR_PARAM; + return -EINVAL; /* Copy each dword separately to HW */ for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { @@ -1154,14 +1431,14 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { * it to HW register space and enables the hardware to prefetch descriptors * instead of only fetching them on demand */ -enum ice_status +int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, u32 rxq_index) { u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; if (!rlan_ctx) - return ICE_ERR_BAD_PTR; + return -EINVAL; rlan_ctx->prefena = 1; @@ -1176,12 +1453,12 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, * * Clears rxq context in HW register space */ -enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index) +int ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index) { u8 i; if (rxq_index > QRX_CTRL_MAX_INDEX) - return ICE_ERR_PARAM; + return -EINVAL; /* Clear each dword register separately */ for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) @@ -1232,17 +1509,17 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = { * * Copies Tx completion queue context from dense structure to HW register space */ -static enum ice_status +static int ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx, u32 tx_cmpltnq_index) { u8 i; if (!ice_tx_cmpltnq_ctx) - return ICE_ERR_BAD_PTR; + return -EINVAL; if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX) - return ICE_ERR_PARAM; + return -EINVAL; /* Copy each dword separately to HW */ for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) { @@ -1281,7 +1558,7 @@ static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = { * Converts completion queue context from sparse to dense structure and then * writes it to HW register space */ -enum ice_status +int ice_write_tx_cmpltnq_ctx(struct ice_hw *hw, struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx, u32 tx_cmpltnq_index) @@ -1299,13 +1576,13 @@ ice_write_tx_cmpltnq_ctx(struct ice_hw *hw, * * Clears Tx completion queue context in HW register space */ -enum ice_status +int ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index) { u8 i; if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX) - return ICE_ERR_PARAM; + return -EINVAL; /* Clear each dword register separately */ for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) @@ -1322,17 +1599,17 @@ ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index) * * Copies doorbell queue context from dense structure to HW register space */ -static enum ice_status +static int ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx, u32 tx_drbell_q_index) { u8 i; if (!ice_tx_drbell_q_ctx) - return ICE_ERR_BAD_PTR; + return -EINVAL; if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX) - return ICE_ERR_PARAM; + return -EINVAL; /* Copy each dword separately to HW */ for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) { @@ -1372,7 +1649,7 @@ static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = { * Converts doorbell queue context from sparse to dense structure and then * writes it to HW register space */ -enum ice_status +int ice_write_tx_drbell_q_ctx(struct ice_hw *hw, struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx, u32 tx_drbell_q_index) @@ -1391,13 +1668,13 @@ ice_write_tx_drbell_q_ctx(struct ice_hw *hw, * * Clears doorbell queue context in HW register space */ -enum ice_status +int ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index) { u8 i; if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX) - return ICE_ERR_PARAM; + return -EINVAL; /* Clear each dword register separately */ for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) @@ -1427,7 +1704,7 @@ static struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw) * @buf_size: size of buffer for indirect commands (0 for direct commands) * @cd: pointer to command details structure */ -static enum ice_status +static int ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) { @@ -1444,7 +1721,7 @@ ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, * @buf_size: size of buffer for indirect commands (0 for direct commands) * @cd: pointer to command details structure */ -static enum ice_status +static int ice_sbq_send_cmd_nolock(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) { @@ -1460,13 +1737,13 @@ ice_sbq_send_cmd_nolock(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, * @lock: true to lock the sq_lock (the usual case); false if the sq_lock has * already been locked at a higher level */ -enum ice_status ice_sbq_rw_reg_lp(struct ice_hw *hw, +int ice_sbq_rw_reg_lp(struct ice_hw *hw, struct ice_sbq_msg_input *in, bool lock) { struct ice_sbq_cmd_desc desc = {0}; struct ice_sbq_msg_req msg = {0}; - enum ice_status status; u16 msg_len; + int status; msg_len = sizeof(msg); @@ -1504,7 +1781,7 @@ enum ice_status ice_sbq_rw_reg_lp(struct ice_hw *hw, * @hw: pointer to the HW struct * @in: message info to be filled in descriptor */ -enum ice_status ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in) +int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in) { return ice_sbq_rw_reg_lp(hw, in, true); } @@ -1574,17 +1851,17 @@ static bool ice_should_retry_sq_send_cmd(u16 opcode) * Retry sending the FW Admin Queue command, multiple times, to the FW Admin * Queue if the EBUSY AQ error is returned. */ -static enum ice_status +static int ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aq_desc desc_cpy; - enum ice_status status; bool is_cmd_for_retry; u8 *buf_cpy = NULL; u8 idx = 0; u16 opcode; + int status; opcode = le16_to_cpu(desc->opcode); is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); @@ -1595,7 +1872,7 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, buf_cpy = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); if (!buf_cpy) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } memcpy(&desc_cpy, desc, sizeof(desc_cpy)); @@ -1633,13 +1910,13 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, * * Helper function to send FW Admin Queue commands to the FW Admin Queue. */ -enum ice_status +int ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_req_res *cmd = &desc->params.res_owner; bool lock_acquired = false; - enum ice_status status; + int status; /* When a package download is in process (i.e. when the firmware's * Global Configuration Lock resource is held), only the Download @@ -1660,6 +1937,8 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, case ice_aqc_opc_set_port_params: case ice_aqc_opc_get_vlan_mode_parameters: case ice_aqc_opc_set_vlan_mode_parameters: + case ice_aqc_opc_set_tx_topo: + case ice_aqc_opc_get_tx_topo: case ice_aqc_opc_add_recipe: case ice_aqc_opc_recipe_to_profile: case ice_aqc_opc_get_recipe: @@ -1668,7 +1947,7 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, case ice_aqc_opc_release_res: if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK) break; - /* fall-through */ + fallthrough; default: mutex_lock(&ice_global_cfg_lock_sw); lock_acquired = true; @@ -1689,11 +1968,11 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, * * Get the firmware version (0x0001) from the admin queue commands */ -enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) +int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) { struct ice_aqc_get_ver *resp; struct ice_aq_desc desc; - enum ice_status status; + int status; resp = &desc.params.get_ver; @@ -1724,7 +2003,7 @@ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) * * Send the driver version (0x0002) to the firmware */ -enum ice_status +int ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, struct ice_sq_cd *cd) { @@ -1735,7 +2014,7 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, cmd = &desc.params.driver_ver; if (!dv) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); @@ -1761,7 +2040,7 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, * Tell the Firmware that we're shutting down the AdminQ and whether * or not the driver is unloading as well (0x0003). */ -enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) +int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) { struct ice_aqc_q_shutdown *cmd; struct ice_aq_desc desc; @@ -1788,9 +2067,9 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) * Requests common resource using the admin queue commands (0x0008). * When attempting to acquire the Global Config Lock, the driver can * learn of three states: - * 1) ICE_SUCCESS - acquired lock, and can perform download package - * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load - * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has + * 1) 0 - acquired lock, and can perform download package + * 2) -EIO - did not get lock, driver should fail to load + * 3) -EALREADY - did not get lock, but another driver has * successfully downloaded the package; the driver does * not have to download the package and can continue * loading @@ -1802,14 +2081,14 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) * will likely get an error propagated back to it indicating the Download * Package, Update Package or the Release Resource AQ commands timed out. */ -static enum ice_status +static int ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, struct ice_sq_cd *cd) { struct ice_aqc_req_res *cmd_resp; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd_resp = &desc.params.res_owner; @@ -1841,15 +2120,15 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, } else if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_IN_PROG) { *timeout = le32_to_cpu(cmd_resp->timeout); - return ICE_ERR_AQ_ERROR; + return -EIO; } else if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_DONE) { - return ICE_ERR_AQ_NO_WORK; + return -EALREADY; } /* invalid FW response, force a timeout immediately */ *timeout = 0; - return ICE_ERR_AQ_ERROR; + return -EIO; } /* If the resource is held by some other driver, the command completes @@ -1871,7 +2150,7 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, * * release common resource using the admin queue commands (0x0009) */ -static enum ice_status +static int ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, struct ice_sq_cd *cd) { @@ -1897,23 +2176,23 @@ ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, * * This function will attempt to acquire the ownership of a resource. */ -enum ice_status +int ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, enum ice_aq_res_access_type access, u32 timeout) { #define ICE_RES_POLLING_DELAY_MS 10 u32 delay = ICE_RES_POLLING_DELAY_MS; u32 time_left = timeout; - enum ice_status status; + int status; status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); - /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has + /* A return code of -EALREADY means that another driver has * previously acquired the resource and performed any necessary updates; * in this case the caller does not obtain the resource and has no * further work to do. */ - if (status == ICE_ERR_AQ_NO_WORK) + if (status == -EALREADY) goto ice_acquire_res_exit; if (status) @@ -1926,7 +2205,7 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, timeout = (timeout > delay) ? timeout - delay : 0; status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); - if (status == ICE_ERR_AQ_NO_WORK) + if (status == -EALREADY) /* lock free, but no work to do */ break; @@ -1934,15 +2213,15 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, /* lock acquired */ break; } - if (status && status != ICE_ERR_AQ_NO_WORK) + if (status && status != -EALREADY) ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); ice_acquire_res_exit: - if (status == ICE_ERR_AQ_NO_WORK) { + if (status == -EALREADY) { if (access == ICE_RES_WRITE) ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); else - ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n"); + ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n"); } return status; } @@ -1956,15 +2235,15 @@ ice_acquire_res_exit: */ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) { - enum ice_status status; u32 total_delay = 0; + int status; status = ice_aq_release_res(hw, res, 0, NULL); /* there are some rare cases when trying to release the resource * results in an admin queue timeout, so handle them correctly */ - while ((status == ICE_ERR_AQ_TIMEOUT) && + while ((status == -EIO) && (total_delay < hw->adminq.sq_cmd_timeout)) { msleep(1); status = ice_aq_release_res(hw, res, 0, NULL); @@ -1983,7 +2262,7 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) * * Helper function to allocate/free resources using the admin queue commands */ -enum ice_status +int ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, enum ice_adminq_opc opc, struct ice_sq_cd *cd) @@ -1994,10 +2273,10 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, cmd = &desc.params.sw_res_ctrl; if (!buf) - return ICE_ERR_PARAM; + return -EINVAL; if (buf_size < flex_array_size(buf, elem, num_entries)) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, opc); @@ -2016,17 +2295,17 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, * @btm: allocate from bottom * @res: pointer to array that will receive the resources */ -enum ice_status +int ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) { struct ice_aqc_alloc_free_res_elem *buf; - enum ice_status status; u16 buf_len; + int status; buf_len = struct_size(buf, elem, num); buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Prepare buffer to allocate resource. */ buf->num_elems = cpu_to_le16(num); @@ -2054,16 +2333,16 @@ ice_alloc_res_exit: * @num: number of resources * @res: pointer to array that contains the resources to free */ -enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) +int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) { struct ice_aqc_alloc_free_res_elem *buf; - enum ice_status status; u16 buf_len; + int status; buf_len = struct_size(buf, elem, num); buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Prepare buffer to free resource. */ buf->num_elems = cpu_to_le16(num); @@ -2189,6 +2468,7 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, u32 number = le32_to_cpu(elem->number); u16 cap = le16_to_cpu(elem->cap); bool found = true; + u8 i; switch (cap) { case ICE_AQC_CAPS_SWITCHING_MODE: @@ -2276,18 +2556,70 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, caps->msix_vector_first_id); break; case ICE_AQC_CAPS_NVM_VER: + caps->nvm_word_address[0] = ICE_LO_WORD(number); + caps->nvm_value[0] = ICE_HI_WORD(number); + caps->nvm_word_address[1] = ICE_LO_WORD(logical_id); + caps->nvm_value[1] = ICE_HI_WORD(logical_id); + caps->nvm_word_address[2] = ICE_LO_WORD(phys_id); + caps->nvm_value[2] = ICE_HI_WORD(phys_id); + for (i = 0; i < ICE_NVM_ADDRESS_VALUE_READS; i++) { + ice_debug(hw, ICE_DBG_INIT, "%s: nvm_word_address = 0x%04X\n", + prefix, caps->nvm_word_address[i]); + ice_debug(hw, ICE_DBG_INIT, "%s: nvm_value = 0x%04X\n", + prefix, caps->nvm_value[i]); + } + break; + case ICE_AQC_CAPS_OROM_VER: + caps->orom_ver = number; + ice_debug(hw, ICE_DBG_INIT, "%s: orom_ver = 0x%08X\n", prefix, + caps->orom_ver); + break; + case ICE_AQC_CAPS_NET_VER: + caps->base_release_ver_major = number; + caps->base_release_ver_type = logical_id; + caps->base_release_ver_iana = phys_id; + ice_debug(hw, ICE_DBG_INIT, "%s: base_release_ver_major = 0x%08X\n", + prefix, caps->base_release_ver_major); + ice_debug(hw, ICE_DBG_INIT, "%s: base_release_ver_type = 0x%08X\n", + prefix, caps->base_release_ver_type); + ice_debug(hw, ICE_DBG_INIT, "%s: base_release_ver_iana = 0x%08X\n", + prefix, caps->base_release_ver_iana); break; case ICE_AQC_CAPS_PENDING_NVM_VER: caps->nvm_update_pending_nvm = true; + caps->nvm_word_address[0] = ICE_LO_WORD(number); + caps->nvm_value[0] = ICE_HI_WORD(number); + caps->nvm_word_address[1] = ICE_LO_WORD(logical_id); + caps->nvm_value[1] = ICE_HI_WORD(logical_id); + caps->nvm_word_address[2] = ICE_LO_WORD(phys_id); + caps->nvm_value[2] = ICE_HI_WORD(phys_id); ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix); + for (i = 0; i < ICE_NVM_ADDRESS_VALUE_READS; i++) { + ice_debug(hw, ICE_DBG_INIT, "%s: nvm_word_address = 0x%04X\n", + prefix, caps->nvm_word_address[i]); + ice_debug(hw, ICE_DBG_INIT, "%s: nvm_value = 0x%04X\n", + prefix, caps->nvm_value[i]); + } break; case ICE_AQC_CAPS_PENDING_OROM_VER: caps->nvm_update_pending_orom = true; + caps->orom_ver = number; ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix); + ice_debug(hw, ICE_DBG_INIT, "%s: orom_ver = 0x%08X\n", prefix, + caps->orom_ver); break; case ICE_AQC_CAPS_PENDING_NET_VER: caps->nvm_update_pending_netlist = true; + caps->base_release_ver_major = number; + caps->base_release_ver_type = logical_id; + caps->base_release_ver_iana = phys_id; ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix); + ice_debug(hw, ICE_DBG_INIT, "%s: base_release_ver_major = 0x%08X\n", + prefix, caps->base_release_ver_major); + ice_debug(hw, ICE_DBG_INIT, "%s: base_release_ver_type = 0x%08X\n", + prefix, caps->base_release_ver_type); + ice_debug(hw, ICE_DBG_INIT, "%s: base_release_ver_iana = 0x%08X\n", + prefix, caps->base_release_ver_iana); break; case ICE_AQC_CAPS_NVM_MGMT: caps->sec_rev_disabled = @@ -2315,6 +2647,11 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, caps->iwarp = (number == 1); ice_debug(hw, ICE_DBG_INIT, "%s: iwarp = %d\n", prefix, caps->iwarp); break; + case ICE_AQC_CAPS_ROCEV2_LAG: + caps->roce_lag = (number == 1); + ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %d\n", + prefix, caps->roce_lag); + break; case ICE_AQC_CAPS_LED: if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) { caps->led[phys_id] = true; @@ -2346,21 +2683,32 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, caps->num_wol_proxy_fltr); ice_debug(hw, ICE_DBG_INIT, "%s: wol_proxy_vsi_seid = %d\n", prefix, caps->wol_proxy_vsi_seid); + ice_debug(hw, ICE_DBG_INIT, "%s: apm_wol_support = %d\n", + prefix, caps->apm_wol_support); break; case ICE_AQC_CAPS_MAX_MTU: caps->max_mtu = number; ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", prefix, caps->max_mtu); break; + case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE: + caps->pcie_reset_avoidance = (number > 0); + ice_debug(hw, ICE_DBG_INIT, + "%s: pcie_reset_avoidance = %d\n", prefix, + caps->pcie_reset_avoidance); + break; + case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT: + caps->reset_restrict_support = (number == 1); + ice_debug(hw, ICE_DBG_INIT, + "%s: reset_restrict_support = %d\n", prefix, + caps->reset_restrict_support); + break; case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0: case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG1: case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2: case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3: { - u8 index = cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0; - - if (index >= ICE_EXT_TOPO_DEV_IMG_COUNT) - break; + u8 index = (u8)(cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0); caps->ext_topo_dev_img_ver_high[index] = number; caps->ext_topo_dev_img_ver_low[index] = logical_id; @@ -2393,6 +2741,14 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, caps->ext_topo_dev_img_prog_en[index]); break; } + case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE: + caps->tx_sched_topo_comp_mode_en = (number == 1); + break; + case ICE_AQC_CAPS_DYN_FLATTENING: + caps->dyn_flattening_en = (number == 1); + ice_debug(hw, ICE_DBG_INIT, "%s: dyn_flattening_en = %d\n", + prefix, caps->dyn_flattening_en); + break; default: /* Not one of the recognized common capabilities */ found = false; @@ -2491,6 +2847,9 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, { struct ice_ts_func_info *info = &func_p->ts_func_info; u32 number = le32_to_cpu(cap->number); + u8 clk_freq; + + ice_debug(hw, ICE_DBG_INIT, "1588 func caps: raw value %x\n", number); info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0); func_p->common_cap.ieee_1588 = info->ena; @@ -2500,18 +2859,17 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); - info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S; info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); - - if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) { - info->time_ref = (enum ice_time_ref_freq)info->clk_freq; + clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S; + if (clk_freq < NUM_ICE_TIME_REF_FREQ) { + info->time_ref = (enum ice_time_ref_freq)clk_freq; } else { /* Unknown clock frequency, so assume a (probably incorrect) * default to avoid out-of-bounds look ups of frequency * related information. */ ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n", - info->clk_freq); + clk_freq); info->time_ref = ICE_TIME_REF_FREQ_25_000; } @@ -2526,7 +2884,7 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n", info->tmr_index_assoc); ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n", - info->clk_freq); + clk_freq); ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n", info->clk_src); } @@ -2558,7 +2916,6 @@ ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) func_p->fd_fltr_best_effort); } - /** * ice_parse_func_caps - Parse function capabilities * @hw: pointer to the HW struct @@ -2619,6 +2976,23 @@ ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, ice_recalc_port_limited_caps(hw, &func_p->common_cap); } +/** + * ice_func_id_to_logical_id - map from function id to logical pf id + * @active_function_bitmap: active function bitmap + * @pf_id: function number of device + */ +static int ice_func_id_to_logical_id(u32 active_function_bitmap, u8 pf_id) +{ + u8 logical_id = 0; + u8 i; + + for (i = 0; i < pf_id; i++) + if (active_function_bitmap & BIT(i)) + logical_id++; + + return logical_id; +} + /** * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps * @hw: pointer to the HW struct @@ -2636,6 +3010,8 @@ ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, dev_p->num_funcs = hweight32(number); ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", dev_p->num_funcs); + + hw->logical_pf_id = ice_func_id_to_logical_id(number, hw->pf_id); } /** @@ -2689,7 +3065,6 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, struct ice_aqc_list_caps_elem *cap) { struct ice_ts_dev_info *info = &dev_p->ts_dev_info; - u32 logical_id = le32_to_cpu(cap->logical_id); u32 phys_id = le32_to_cpu(cap->phys_id); u32 number = le32_to_cpu(cap->number); @@ -2704,7 +3079,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0); info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0); - info->ena_ports = logical_id; + info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0); + info->tmr_own_map = phys_id; ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n", @@ -2721,8 +3097,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, info->tmr1_owned); ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n", info->tmr1_ena); - ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n", - info->ena_ports); + ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n", + info->ts_ll_read); ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n", info->tmr_own_map); } @@ -2746,6 +3122,28 @@ ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, dev_p->num_flow_director_fltr); } +/** + * ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap + * @hw: pointer to the HW struct + * @dev_p: pointer to device capabilities structure + * @cap: capability element to parse + * + * Parse ICE_AQC_CAPS_NAC_TOPOLOGY for device capabilities. + */ +static void +ice_parse_nac_topo_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, + struct ice_aqc_list_caps_elem *cap) +{ + dev_p->nac_topo.mode = le32_to_cpu(cap->number); + dev_p->nac_topo.id = le32_to_cpu(cap->phys_id) & ICE_NAC_TOPO_ID_M; + + ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n", + !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M)); + ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n", + !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M)); + ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %d\n", + dev_p->nac_topo.id); +} /** * ice_parse_dev_caps - Parse device capabilities @@ -2795,6 +3193,9 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, case ICE_AQC_CAPS_FD: ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); break; + case ICE_AQC_CAPS_NAC_TOPOLOGY: + ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]); + break; default: /* Don't list common capabilities as unknown */ if (!found) @@ -2829,19 +3230,19 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that * firmware could return) to avoid this. */ -enum ice_status +int ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, enum ice_adminq_opc opc, struct ice_sq_cd *cd) { struct ice_aqc_list_caps *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.get_cap; if (opc != ice_aqc_opc_list_func_caps && opc != ice_aqc_opc_list_dev_caps) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, opc); status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); @@ -2860,16 +3261,16 @@ ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, * Read the device capabilities and extract them into the dev_caps structure * for later use. */ -enum ice_status +int ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) { - enum ice_status status; u32 cap_count = 0; void *cbuf; + int status; cbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); if (!cbuf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Although the driver doesn't know the number of capabilities the * device will return, we can simply send a 4KB buffer, the maximum @@ -2894,16 +3295,16 @@ ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) * Read the function capabilities and extract them into the func_caps structure * for later use. */ -static enum ice_status +static int ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) { - enum ice_status status; u32 cap_count = 0; void *cbuf; + int status; cbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); if (!cbuf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Although the driver doesn't know the number of capabilities the * device will return, we can simply send a 4KB buffer, the maximum @@ -2993,9 +3394,9 @@ void ice_set_safe_mode_caps(struct ice_hw *hw) * ice_get_caps - get info about the HW * @hw: pointer to the hardware structure */ -enum ice_status ice_get_caps(struct ice_hw *hw) +int ice_get_caps(struct ice_hw *hw) { - enum ice_status status; + int status; status = ice_discover_dev_caps(hw, &hw->dev_caps); if (status) @@ -3013,7 +3414,7 @@ enum ice_status ice_get_caps(struct ice_hw *hw) * * This function is used to write MAC address to the NVM (0x0108). */ -enum ice_status +int ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, struct ice_sq_cd *cd) { @@ -3035,7 +3436,7 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, * * Tell the firmware that the driver is taking over from PXE (0x0110). */ -static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw) +static int ice_aq_clear_pxe_mode(struct ice_hw *hw) { struct ice_aq_desc desc; @@ -3069,7 +3470,7 @@ void ice_clear_pxe_mode(struct ice_hw *hw) * * Set Physical port parameters (0x0203) */ -enum ice_status +int ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi, bool save_bad_pac, bool pad_short_pac, bool double_vlan, struct ice_sq_cd *cd) @@ -3095,6 +3496,26 @@ ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi, return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } +/** + * ice_is_100m_speed_supported + * @hw: pointer to the HW struct + * + * returns true if 100M speeds are supported by the device, + * false otherwise. + */ +bool ice_is_100m_speed_supported(struct ice_hw *hw) +{ + switch (hw->device_id) { + case ICE_DEV_ID_E822C_SGMII: + case ICE_DEV_ID_E822L_SGMII: + case ICE_DEV_ID_E823L_1GBE: + case ICE_DEV_ID_E823C_SGMII: + return true; + default: + return false; + } +} + /** * ice_get_link_speed_based_on_phy_type - returns link speed * @phy_type_low: lower part of phy_type @@ -3105,8 +3526,8 @@ ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi, * Note: In the structure of [phy_type_low, phy_type_high], there should * be one bit set, as this function will convert one PHY type to its * speed. - * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned - * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned + * If no bit gets set, 0 will be returned + * If more than one bit gets set, 0 will be returned */ static u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) @@ -3283,15 +3704,15 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, * mode as the PF may not have the privilege to set some of the PHY Config * parameters. This status will be indicated by the command response (0x0601). */ -enum ice_status +int ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) { struct ice_aq_desc desc; - enum ice_status status; + int status; if (!cfg) - return ICE_ERR_PARAM; + return -EINVAL; /* Ensure that only valid bits of cfg->caps can be turned on. */ if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { @@ -3333,13 +3754,13 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, * ice_update_link_info - update status of the HW network link * @pi: port info structure of the interested logical port */ -enum ice_status ice_update_link_info(struct ice_port_info *pi) +int ice_update_link_info(struct ice_port_info *pi) { struct ice_link_status *li; - enum ice_status status; + int status; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; li = &pi->phy.link_info; @@ -3355,7 +3776,7 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi) pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); if (!pcaps) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL); @@ -3432,8 +3853,12 @@ enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) */ enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) { - if (caps & ICE_AQC_PHY_EN_AUTO_FEC) - return ICE_FEC_AUTO; + if (caps & ICE_AQC_PHY_EN_AUTO_FEC) { + if (fec_options & ICE_AQC_PHY_FEC_DIS) + return ICE_FEC_DIS_AUTO; + else + return ICE_FEC_AUTO; + } if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | @@ -3455,7 +3880,7 @@ enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) * @cfg: PHY configuration data to set FC mode * @req_mode: FC mode to configure */ -enum ice_status +int ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fc_mode req_mode) { @@ -3463,7 +3888,7 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, u8 pause_mask = 0x0; if (!pi || !cfg) - return ICE_ERR_BAD_PTR; + return -EINVAL; switch (req_mode) { case ICE_FC_FULL: pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; @@ -3501,23 +3926,23 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, * * Set the requested flow control mode. */ -enum ice_status +int ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) { struct ice_aqc_set_phy_cfg_data cfg = { 0 }; struct ice_aqc_get_phy_caps_data *pcaps; - enum ice_status status; struct ice_hw *hw; + int status; if (!pi || !aq_failures) - return ICE_ERR_BAD_PTR; + return -EINVAL; *aq_failures = 0; hw = pi->hw; pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); if (!pcaps) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Get the current PHY config */ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, @@ -3611,7 +4036,7 @@ ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, /** * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data * @pi: port information structure - * @caps: PHY ability structure to copy date from + * @caps: PHY ability structure to copy data from * @cfg: PHY configuration structure to copy data to * * Helper function to copy AQC PHY get ability data to PHY set configuration @@ -3643,22 +4068,22 @@ ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, * @cfg: PHY configuration data to set FEC mode * @fec: FEC mode to configure */ -enum ice_status +int ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec) { struct ice_aqc_get_phy_caps_data *pcaps; - enum ice_status status = 0; struct ice_hw *hw; + int status = 0; if (!pi || !cfg) - return ICE_ERR_BAD_PTR; + return -EINVAL; hw = pi->hw; pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); if (!pcaps) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; status = ice_aq_get_phy_caps(pi, false, (ice_fw_supports_report_dflt_cfg(hw) ? @@ -3693,13 +4118,19 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, /* Clear all FEC option bits. */ cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; break; + case ICE_FEC_DIS_AUTO: + /* Set No FEC and auto FEC */ + if (!ice_fw_supports_fec_dis_auto(hw)) + return -EOPNOTSUPP; + cfg->link_fec_opt |= ICE_AQC_PHY_FEC_DIS; + fallthrough; case ICE_FEC_AUTO: /* AND auto FEC bit, and all caps bits. */ cfg->caps &= ICE_AQC_PHY_CAPS_MASK; cfg->link_fec_opt |= pcaps->link_fec_options; break; default: - status = ICE_ERR_PARAM; + status = -EINVAL; break; } @@ -3730,13 +4161,13 @@ out: * The variable link_up is invalid if status is non zero. As a * result of this call, link status reporting becomes enabled */ -enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) +int ice_get_link_status(struct ice_port_info *pi, bool *link_up) { struct ice_phy_info *phy_info; - enum ice_status status = 0; + int status = 0; if (!pi || !link_up) - return ICE_ERR_PARAM; + return -EINVAL; phy_info = &pi->phy; @@ -3761,7 +4192,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) * * Sets up the link and restarts the Auto-Negotiation over the link. */ -enum ice_status +int ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, struct ice_sq_cd *cd) { @@ -3791,7 +4222,7 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, * * Set event mask (0x0613) */ -enum ice_status +int ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, struct ice_sq_cd *cd) { @@ -3816,7 +4247,7 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, * * Enable/disable loopback on a given port */ -enum ice_status +int ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) { struct ice_aqc_set_mac_lb *cmd; @@ -3831,7 +4262,6 @@ ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } - /** * ice_aq_set_port_id_led * @pi: pointer to the port information @@ -3840,7 +4270,7 @@ ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) * * Set LED value for the given port (0x06e9) */ -enum ice_status +int ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, struct ice_sq_cd *cd) { @@ -3852,7 +4282,6 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); - if (is_orig_mode) cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; else @@ -3876,17 +4305,17 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, * * Read/Write SFF EEPROM (0x06EE) */ -enum ice_status +int ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, bool write, struct ice_sq_cd *cd) { struct ice_aqc_sff_eeprom *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (!data || (mem_addr & 0xff00)) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); cmd = &desc.params.read_write_sff_param; @@ -3916,7 +4345,7 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, * Program Topology Device NVM (0x06F2) * */ -enum ice_status +int ice_aq_prog_topo_dev_nvm(struct ice_hw *hw, struct ice_aqc_link_topo_params *topo_params, struct ice_sq_cd *cd) @@ -3944,7 +4373,7 @@ ice_aq_prog_topo_dev_nvm(struct ice_hw *hw, * Read Topology Device NVM (0x06F3) * */ -enum ice_status +int ice_aq_read_topo_dev_nvm(struct ice_hw *hw, struct ice_aqc_link_topo_params *topo_params, u32 start_address, u8 *data, u8 data_size, @@ -3952,17 +4381,17 @@ ice_aq_read_topo_dev_nvm(struct ice_hw *hw, { struct ice_aqc_read_topo_dev_nvm *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (!data || data_size == 0 || data_size > ICE_AQC_READ_TOPO_DEV_NVM_DATA_READ_SIZE) - return ICE_ERR_PARAM; + return -EINVAL; cmd = &desc.params.read_topo_dev_nvm; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_topo_dev_nvm); - desc.datalen = data_size; + desc.datalen = cpu_to_le16(data_size); memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params)); cmd->start_address = cpu_to_le32(start_address); @@ -3983,23 +4412,23 @@ ice_aq_read_topo_dev_nvm(struct ice_hw *hw, * * Internal function to get (0x0B05) or set (0x0B03) RSS look up table */ -static enum ice_status +static int __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set) { u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle; struct ice_aqc_get_set_rss_lut *cmd_resp; struct ice_aq_desc desc; - enum ice_status status; + int status; u8 *lut; if (!params) - return ICE_ERR_PARAM; + return -EINVAL; vsi_handle = params->vsi_handle; lut = params->lut; if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) - return ICE_ERR_PARAM; + return -EINVAL; lut_size = params->lut_size; lut_type = params->lut_type; @@ -4028,7 +4457,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M); break; default: - status = ICE_ERR_PARAM; + status = -EINVAL; goto ice_aq_get_set_rss_lut_exit; } @@ -4061,9 +4490,9 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; break; } - /* fall-through */ + fallthrough; default: - status = ICE_ERR_PARAM; + status = -EINVAL; goto ice_aq_get_set_rss_lut_exit; } @@ -4082,7 +4511,7 @@ ice_aq_get_set_rss_lut_exit: * * get the RSS lookup table, PF or VSI type */ -enum ice_status +int ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) { return __ice_aq_get_set_rss_lut(hw, get_params, false); @@ -4095,7 +4524,7 @@ ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_ * * set the RSS lookup table, PF or VSI type */ -enum ice_status +int ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) { return __ice_aq_get_set_rss_lut(hw, set_params, true); @@ -4110,8 +4539,7 @@ ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_ * * get (0x0B04) or set (0x0B02) the RSS key per VSI */ -static enum -ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, +static int __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, struct ice_aqc_get_set_rss_keys *key, bool set) { @@ -4144,12 +4572,12 @@ ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, * * get the RSS key per VSI */ -enum ice_status +int ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *key) { if (!ice_is_vsi_valid(hw, vsi_handle) || !key) - return ICE_ERR_PARAM; + return -EINVAL; return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), key, false); @@ -4163,12 +4591,12 @@ ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, * * set the RSS key per VSI */ -enum ice_status +int ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *keys) { if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) - return ICE_ERR_PARAM; + return -EINVAL; return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), keys, true); @@ -4195,7 +4623,7 @@ ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue * flow. */ -static enum ice_status +static int ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, struct ice_sq_cd *cd) @@ -4210,10 +4638,10 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); if (!qg_list) - return ICE_ERR_PARAM; + return -EINVAL; if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) - return ICE_ERR_PARAM; + return -EINVAL; for (i = 0, list = qg_list; i < num_qgrps; i++) { sum_size += struct_size(list, txqs, list->num_txqs); @@ -4222,7 +4650,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, } if (buf_size != sum_size) - return ICE_ERR_PARAM; + return -EINVAL; desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); @@ -4243,7 +4671,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, * * Disable LAN Tx queue (0x0C31) */ -static enum ice_status +static int ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, enum ice_disq_rst_src rst_src, u16 vmvf_num, @@ -4252,7 +4680,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, struct ice_aqc_dis_txq_item *item; struct ice_aqc_dis_txqs *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; u16 i, sz = 0; cmd = &desc.params.dis_txqs; @@ -4260,10 +4688,10 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, /* qg_list can be NULL only in VM/VF reset flow */ if (!qg_list && !rst_src) - return ICE_ERR_PARAM; + return -EINVAL; if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) - return ICE_ERR_PARAM; + return -EINVAL; cmd->num_entries = num_qgrps; @@ -4312,7 +4740,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, } if (buf_size != sz) - return ICE_ERR_PARAM; + return -EINVAL; do_aq: status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); @@ -4345,7 +4773,7 @@ do_aq: * * Move / Reconfigure Tx LAN queues (0x0C32) */ -enum ice_status +int ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move, bool is_tc_change, bool subseq_call, bool flush_pipe, u8 timeout, u32 *blocked_cgds, @@ -4354,20 +4782,20 @@ ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move, { struct ice_aqc_move_txqs *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.move_txqs; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs); #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50 if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX) - return ICE_ERR_PARAM; + return -EINVAL; if (is_tc_change && !flush_pipe && !blocked_cgds) - return ICE_ERR_PARAM; + return -EINVAL; if (!is_move && !is_tc_change) - return ICE_ERR_PARAM; + return -EINVAL; desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); @@ -4409,7 +4837,7 @@ ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move, * * Add Tx RDMA Qsets (0x0C33) */ -static enum ice_status +static int ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, struct ice_aqc_add_rdma_qset_data *qset_list, u16 buf_size, struct ice_sq_cd *cd) @@ -4424,10 +4852,10 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset); if (!qset_list) - return ICE_ERR_PARAM; + return -EINVAL; if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS) - return ICE_ERR_PARAM; + return -EINVAL; for (i = 0, list = qset_list; i < num_qset_grps; i++) { u16 num_qsets = le16_to_cpu(list->num_qsets); @@ -4438,7 +4866,7 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, } if (buf_size != sum_size) - return ICE_ERR_PARAM; + return -EINVAL; desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); @@ -4640,7 +5068,7 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) * @dest_ctx: pointer to memory for the packed structure * @ce_info: a description of the structure to be transformed */ -enum ice_status +int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) { @@ -4670,13 +5098,62 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); break; default: - return ICE_ERR_INVAL_SIZE; + return -EINVAL; } } return 0; } +/** + * ice_aq_get_internal_data + * @hw: pointer to the hardware structure + * @cluster_id: specific cluster to dump + * @table_id: table ID within cluster + * @start: index of line in the block to read + * @buf: dump buffer + * @buf_size: dump buffer size + * @ret_buf_size: return buffer size (returned by FW) + * @ret_next_table: next block to read (returned by FW) + * @ret_next_index: next index to read (returned by FW) + * @cd: pointer to command details structure + * + * Get internal FW/HW data (0xFF08) for debug purposes. + */ +int +ice_aq_get_internal_data(struct ice_hw *hw, u8 cluster_id, u16 table_id, + u32 start, void *buf, u16 buf_size, u16 *ret_buf_size, + u16 *ret_next_table, u32 *ret_next_index, + struct ice_sq_cd *cd) +{ + struct ice_aqc_debug_dump_internals *cmd; + struct ice_aq_desc desc; + int status; + + cmd = &desc.params.debug_dump; + + if (buf_size == 0 || !buf) + return -EINVAL; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_debug_dump_internals); + + cmd->cluster_id = cluster_id; + cmd->table_id = cpu_to_le16(table_id); + cmd->idx = cpu_to_le32(start); + + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + + if (!status) { + if (ret_buf_size) + *ret_buf_size = le16_to_cpu(desc.datalen); + if (ret_next_table) + *ret_next_table = le16_to_cpu(cmd->table_id); + if (ret_next_index) + *ret_next_index = le32_to_cpu(cmd->idx); + } + + return status; +} /** * ice_print_sched_elem - parse through an element struct in a branch @@ -4691,7 +5168,7 @@ ice_print_sched_elem(struct ice_hw *hw, int elem, struct ice_aqc_txsched_elem *d = &data->data; unsigned long valid_sec = d->valid_sections; char str[128]; - int i; + u16 i; dev_info(ice_hw_to_dev(hw), "\t\telement %d\n", elem); dev_info(ice_hw_to_dev(hw), "\t\t\tparent TEID %d\n", @@ -4774,23 +5251,23 @@ ice_print_sched_elem(struct ice_hw *hw, int elem, * ice_dump_port_dflt_topo - print scheduler tree topology for a port * @pi: pointer to the port_info structure */ -enum ice_status ice_dump_port_dflt_topo(struct ice_port_info *pi) +int ice_dump_port_dflt_topo(struct ice_port_info *pi) { struct ice_aqc_get_topo_elem *buf; struct ice_hw *hw = pi->hw; u16 j, buf_size, num_elem; - enum ice_status ret; u8 i, num_branches; + int ret; /* allocate memory for response buffer */ buf_size = sizeof(*buf) * ICE_TXSCHED_MAX_BRANCHES; buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; ret = ice_aq_get_dflt_topo(hw, pi->lport, buf, buf_size, &num_branches, NULL); if (ret) { - ret = ICE_ERR_CFG; + ret = -EIO; goto err_exit; } @@ -4830,7 +5307,7 @@ static void ice_sched_print_tree(struct ice_hw *hw, struct ice_sched_node *node) { struct ice_aqc_txsched_elem_data buf; struct ice_aqc_txsched_elem *data; - enum ice_status status; + int status; u8 i; if (!node) @@ -4929,6 +5406,8 @@ ice_dump_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, dev_info(ice_hw_to_dev(hw), "%s: mgmt_cem = %d\n", prefix, caps->mgmt_cem); dev_info(ice_hw_to_dev(hw), "%s: iwarp = %d\n", prefix, caps->iwarp); + dev_info(ice_hw_to_dev(hw), "%s: roce_lag = %d\n", prefix, + caps->roce_lag); dev_info(ice_hw_to_dev(hw), "%s: wr_csr_prot = 0x%llX\n", prefix, (unsigned long long)caps->wr_csr_prot); dev_info(ice_hw_to_dev(hw), "%s: num_wol_proxy_fltr = %d\n", prefix, @@ -4999,7 +5478,7 @@ void ice_dump_ptp_func_caps(struct ice_hw *hw) dev_info(ice_hw_to_dev(hw), "PTP func cap: tmr_index_owned = %d\n", ptpfunc->tmr_index_owned); dev_info(ice_hw_to_dev(hw), "PTP func cap: clk_freq = %d\n", - ptpfunc->clk_freq); + ptpfunc->time_ref); dev_info(ice_hw_to_dev(hw), "PTP func cap: clk_src = %d\n", ptpfunc->clk_src); dev_info(ice_hw_to_dev(hw), "PTP func cap: tmr_index_assoc = %d\n", @@ -5027,8 +5506,8 @@ void ice_dump_ptp_dev_caps(struct ice_hw *hw) ptpdev->tmr0_ena); dev_info(ice_hw_to_dev(hw), "PTP dev cap: tmr1_ena = %d\n", ptpdev->tmr1_ena); - dev_info(ice_hw_to_dev(hw), "PTP dev cap: ena_ports(bitmap) = %d\n", - ptpdev->ena_ports); + dev_info(ice_hw_to_dev(hw), "PTP dev cap: ts_ll_read = %d\n", + ptpdev->ts_ll_read); dev_info(ice_hw_to_dev(hw), "PTP dev cap: tmr_own_map = %d\n", ptpdev->tmr_own_map); } @@ -5052,10 +5531,6 @@ void ice_dump_port_info(struct ice_port_info *pi) dev_info(ice_hw_to_dev(pi->hw), "\tvirt_port = %d\n", pi->lport); dev_info(ice_hw_to_dev(pi->hw), "\tswid = %d\n", pi->sw_id); - dev_info(ice_hw_to_dev(pi->hw), "\tdflt_tx_vsi = %d\n", - pi->dflt_tx_vsi_num); - dev_info(ice_hw_to_dev(pi->hw), "\tdflt_rx_vsi = %d\n", - pi->dflt_rx_vsi_num); dev_info(ice_hw_to_dev(pi->hw), "\t%s_num = %d\n", (pi->is_vf ? "vf" : "pf"), pi->pf_vf_num); dev_info(ice_hw_to_dev(pi->hw), "\tlast_node_teid = %d\n", @@ -5066,8 +5541,6 @@ void ice_dump_port_info(struct ice_port_info *pi) dev_info(ice_hw_to_dev(pi->hw), "\tmac_addr: %pM\n", pi->mac.lan_addr); } - - /** * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC * @hw: pointer to the HW struct @@ -5105,7 +5578,7 @@ ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) * * This function adds one LAN queue */ -enum ice_status +int ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, struct ice_sq_cd *cd) @@ -5113,19 +5586,19 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, struct ice_aqc_txsched_elem_data node = { 0 }; struct ice_sched_node *parent; struct ice_q_ctx *q_ctx; - enum ice_status status; struct ice_hw *hw; + int status; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) - return ICE_ERR_CFG; + return -EIO; if (num_qgrps > 1 || buf->num_txqs > 1) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; hw = pi->hw; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&pi->sched_lock); @@ -5133,7 +5606,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, if (!q_ctx) { ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", q_handle); - status = ICE_ERR_PARAM; + status = -EINVAL; goto ena_txq_exit; } @@ -5141,7 +5614,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, ICE_SCHED_NODE_OWNER_LAN); if (!parent) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto ena_txq_exit; } @@ -5210,20 +5683,20 @@ ena_txq_exit: * * This function removes queues and their corresponding nodes in SW DB */ -enum ice_status +int ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, u16 *q_handles, u16 *q_ids, u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, struct ice_sq_cd *cd) { - enum ice_status status = ICE_ERR_DOES_NOT_EXIST; struct ice_aqc_dis_txq_item *qg_list; struct ice_q_ctx *q_ctx; + int status = -ENOENT; struct ice_hw *hw; u16 i, buf_size; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) - return ICE_ERR_CFG; + return -EIO; hw = pi->hw; @@ -5235,13 +5708,13 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, if (rst_src) return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, vmvf_num, NULL); - return ICE_ERR_CFG; + return -EIO; } buf_size = struct_size(qg_list, q_id, 1); qg_list = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); if (!qg_list) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; mutex_lock(&pi->sched_lock); @@ -5288,18 +5761,18 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, * * This function adds/updates the VSI queues per TC. */ -static enum ice_status +static int ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, u16 *maxqs, u8 owner) { - enum ice_status status = 0; + int status = 0; u8 i; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) - return ICE_ERR_CFG; + return -EIO; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&pi->sched_lock); @@ -5327,7 +5800,7 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, * * This function adds/updates the VSI LAN queues per TC. */ -enum ice_status +int ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, u16 *max_lanqs) { @@ -5344,7 +5817,7 @@ ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, * * This function adds/updates the VSI RDMA queues per TC. */ -enum ice_status +int ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, u16 *max_rdmaqs) { @@ -5363,34 +5836,34 @@ ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, * * This function adds RDMA qset */ -enum ice_status +int ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 *rdma_qset, u16 num_qsets, u32 *qset_teid) { struct ice_aqc_txsched_elem_data node = { 0 }; struct ice_aqc_add_rdma_qset_data *buf; struct ice_sched_node *parent; - enum ice_status status; struct ice_hw *hw; u16 i, buf_size; + int status; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) - return ICE_ERR_CFG; + return -EIO; hw = pi->hw; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; buf_size = struct_size(buf, rdma_qsets, num_qsets); buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; mutex_lock(&pi->sched_lock); parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, ICE_SCHED_NODE_OWNER_RDMA); if (!parent) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto rdma_error_exit; } buf->parent_teid = parent->info.node_teid; @@ -5439,25 +5912,25 @@ rdma_error_exit: * @qset_teid: TEID of qset node * @q_id: list of queue IDs being disabled */ -enum ice_status +int ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, u16 *q_id) { struct ice_aqc_dis_txq_item *qg_list; - enum ice_status status = 0; struct ice_hw *hw; + int status = 0; u16 qg_size; int i; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) - return ICE_ERR_CFG; + return -EIO; hw = pi->hw; qg_size = struct_size(qg_list, q_id, 1); qg_list = devm_kzalloc(ice_hw_to_dev(hw), qg_size, GFP_KERNEL); if (!qg_list) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; mutex_lock(&pi->sched_lock); @@ -5487,6 +5960,473 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, return status; } +/** + * ice_aq_cfg_cgu_err + * @hw: pointer to the HW struct + * @ena_event_report: enable or disable event reporting + * @ena_err_report: enable/re-enable or disable error reporting mechanism + * @cd: pointer to command details structure or NULL + * + * Configure CGU error reporting mechanism (0x0C60) + */ +int +ice_aq_cfg_cgu_err(struct ice_hw *hw, bool ena_event_report, + bool ena_err_report, struct ice_sq_cd *cd) +{ + struct ice_aqc_cfg_cgu_err *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.config_cgu_err; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_cgu_err); + + if (!ena_event_report) + cmd->cmd |= ICE_AQC_CFG_CGU_EVENT_DIS; + + if (!ena_err_report) + cmd->cmd |= ICE_AQC_CFG_CGU_ERR_DIS; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** + * ice_aq_get_cgu_abilities + * @hw: pointer to the HW struct + * @abilities: CGU abilities + * + * Get CGU abilities (0x0C61) + */ +int +ice_aq_get_cgu_abilities(struct ice_hw *hw, + struct ice_aqc_get_cgu_abilities *abilities) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_abilities); + return ice_aq_send_cmd(hw, &desc, abilities, sizeof(*abilities), NULL); +} + +/** + * ice_aq_set_input_pin_cfg + * @hw: pointer to the HW struct + * @input_idx: Input index + * @flags1: Input flags + * @flags2: Input flags + * @freq: Frequency in Hz + * @phase_delay: Delay in ps + * + * Set CGU input config (0x0C62) + */ +int +ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2, + u32 freq, s32 phase_delay) +{ + struct ice_aqc_set_cgu_input_config *cmd; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_input_config); + cmd = &desc.params.set_cgu_input_config; + cmd->input_idx = input_idx; + cmd->flags1 = flags1; + cmd->flags2 = flags2; + cmd->freq = cpu_to_le32(freq); + cmd->phase_delay = cpu_to_le32(phase_delay); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** + * ice_aq_get_input_pin_cfg + * @hw: pointer to the HW struct + * @cfg: DPLL config + * @input_idx: Input index + * + * Get CGU input config (0x0C63) + */ +int +ice_aq_get_input_pin_cfg(struct ice_hw *hw, + struct ice_aqc_get_cgu_input_config *cfg, u8 input_idx) +{ + struct ice_aqc_get_cgu_input_config *cmd; + struct ice_aq_desc desc; + int status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_config); + cmd = &desc.params.get_cgu_input_config; + cmd->input_idx = input_idx; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!status) + *cfg = *cmd; + + return status; +} + +/** + * ice_aq_set_output_pin_cfg + * @hw: pointer to the HW struct + * @output_idx: Output index + * @flags: Output flags + * @src_sel: Index of DPLL block + * @freq: Output frequency + * @phase_delay: Output phase compensation + * + * Set CGU output config (0x0C64) + */ +int +ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags, + u8 src_sel, u32 freq, s32 phase_delay) +{ + struct ice_aqc_set_cgu_output_config *cmd; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_output_config); + cmd = &desc.params.set_cgu_output_config; + cmd->output_idx = output_idx; + cmd->flags = flags; + cmd->src_sel = src_sel; + cmd->freq = cpu_to_le32(freq); + cmd->phase_delay = cpu_to_le32(phase_delay); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** + * ice_aq_get_output_pin_cfg + * @hw: pointer to the HW struct + * @output_idx: Output index + * @flags: Output flags + * @src_sel: Internal DPLL source + * @freq: Output frequency + * @src_freq: Source frequency + * + * Get CGU output config (0x0C65) + */ +int +ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags, + u8 *src_sel, u32 *freq, u32 *src_freq) +{ + struct ice_aqc_get_cgu_output_config *cmd; + struct ice_aq_desc desc; + int status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_output_config); + cmd = &desc.params.get_cgu_output_config; + cmd->output_idx = output_idx; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!status) { + *flags = cmd->flags; + *src_sel = cmd->src_sel; + *freq = le32_to_cpu(cmd->freq); + *src_freq = le32_to_cpu(cmd->src_freq); + } + + return status; +} + +/** + * convert_s48_to_s64 - convert 48 bit value to 64 bit value + * @signed_48: signed 64 bit variable storing signed 48 bit value + * + * Convert signed 48 bit value to its 64 bit representation. + * + * Return: signed 64 bit representation of signed 48 bit value. + */ +static inline +s64 convert_s48_to_s64(s64 signed_48) +{ + const s64 MASK_SIGN_BITS = GENMASK_ULL(63, 48); + const s64 SIGN_BIT_47 = BIT_ULL(47); + + return ((signed_48 & SIGN_BIT_47) ? (s64)(MASK_SIGN_BITS | signed_48) + : signed_48); +} + +/** + * ice_aq_get_cgu_dpll_status + * @hw: pointer to the HW struct + * @dpll_num: DPLL index + * @ref_state: Reference clock state + * @dpll_state: DPLL state + * @phase_offset: Phase offset in ns + * @eec_mode: EEC_mode + * + * Get CGU DPLL status (0x0C66) + */ +int +ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state, + u16 *dpll_state, s64 *phase_offset, u8 *eec_mode) +{ + struct ice_aqc_get_cgu_dpll_status *cmd; + const s64 NSEC_PER_PSEC = 1000LL; + struct ice_aq_desc desc; + int status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_dpll_status); + cmd = &desc.params.get_cgu_dpll_status; + cmd->dpll_num = dpll_num; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!status) { + *ref_state = cmd->ref_state; + *dpll_state = le16_to_cpu(cmd->dpll_state); + *phase_offset = le32_to_cpu(cmd->phase_offset_h); + *phase_offset <<= 32; + *phase_offset += le32_to_cpu(cmd->phase_offset_l); + *phase_offset = convert_s48_to_s64(*phase_offset) + / NSEC_PER_PSEC; + *eec_mode = cmd->eec_mode; + } + + return status; +} + +/** + * ice_aq_set_cgu_dpll_config + * @hw: pointer to the HW struct + * @dpll_num: DPLL index + * @ref_state: Reference clock state + * @config: DPLL config + * @eec_mode: EEC mode + * + * Set CGU DPLL config (0x0C67) + */ +int +ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state, + u8 config, u8 eec_mode) +{ + struct ice_aqc_set_cgu_dpll_config *cmd; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_dpll_config); + cmd = &desc.params.set_cgu_dpll_config; + cmd->dpll_num = dpll_num; + cmd->ref_state = ref_state; + cmd->config = config; + cmd->eec_mode = eec_mode; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** + * ice_aq_set_cgu_ref_prio + * @hw: pointer to the HW struct + * @dpll_num: DPLL index + * @ref_idx: Reference pin index + * @ref_priority: Reference input priority + * + * Set CGU reference priority (0x0C68) + */ +int +ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, + u8 ref_priority) +{ + struct ice_aqc_set_cgu_ref_prio *cmd; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_ref_prio); + cmd = &desc.params.set_cgu_ref_prio; + cmd->dpll_num = dpll_num; + cmd->ref_idx = ref_idx; + cmd->ref_priority = ref_priority; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** + * ice_aq_get_cgu_ref_prio + * @hw: pointer to the HW struct + * @dpll_num: DPLL index + * @ref_idx: Reference pin index + * @ref_prio: Reference input priority + * + * Get CGU reference priority (0x0C69) + */ +int +ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, + u8 *ref_prio) +{ + struct ice_aqc_get_cgu_ref_prio *cmd; + struct ice_aq_desc desc; + int status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_ref_prio); + cmd = &desc.params.get_cgu_ref_prio; + cmd->dpll_num = dpll_num; + cmd->ref_idx = ref_idx; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!status) + *ref_prio = cmd->ref_priority; + + return status; +} + +/** + * ice_aq_get_cgu_info + * @hw: pointer to the HW struct + * @cgu_id: CGU ID + * @cgu_cfg_ver: CGU config version + * @cgu_fw_ver: CGU firmware version + * + * Get CGU info (0x0C6A) + */ +int +ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver, + u32 *cgu_fw_ver) +{ + struct ice_aqc_get_cgu_info *cmd; + struct ice_aq_desc desc; + int status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_info); + cmd = &desc.params.get_cgu_info; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!status) { + *cgu_id = le32_to_cpu(cmd->cgu_id); + *cgu_cfg_ver = le32_to_cpu(cmd->cgu_cfg_ver); + *cgu_fw_ver = le32_to_cpu(cmd->cgu_fw_ver); + } + + return status; +} + +/** + * ice_aq_read_cgu_reg + * @hw: pointer to the HW struct + * @offset: offset of the CGU register + * @data_len: data size to read + * @data: pointer to data (0 to 16 bytes) to be read from the CGU register + * + * Read CGU register (0x0C6E) + */ +int +ice_aq_read_cgu_reg(struct ice_hw *hw, u16 offset, u8 data_len, u8 *data) +{ + struct ice_aqc_read_cgu_reg *cmd; + struct ice_aq_desc desc; + int status; + + if (data_len > ICE_AQC_READ_CGU_REG_MAX_DATA_LEN || + (data_len > 0 && !data)) + return -EINVAL; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_cgu_reg); + cmd = &desc.params.read_cgu_reg; + cmd->offset = cpu_to_le16(offset); + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!status) { + struct ice_aqc_read_cgu_reg_resp *resp; + u8 i; + + resp = &desc.params.read_cgu_reg_resp; + for (i = 0; i < data_len; i++) { + *data = resp->data[i]; + data++; + } + } + + return status; +} + +/** + * ice_aq_write_cgu_reg + * @hw: pointer to the HW struct + * @offset: offset of the CGU register + * @data_len: data size to write + * @data: pointer to data (0 to 7 bytes) to be written to the CGU register + * + * Write CGU register (0x0C6F) + */ +int +ice_aq_write_cgu_reg(struct ice_hw *hw, u16 offset, u8 data_len, u8 *data) +{ + struct ice_aqc_write_cgu_reg *cmd; + struct ice_aq_desc desc; + u8 i; + + if (data_len > ICE_AQC_WRITE_CGU_REG_MAX_DATA_LEN || + (data_len > 0 && !data)) + return -EINVAL; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_cgu_reg); + cmd = &desc.params.write_cgu_reg; + cmd->offset = cpu_to_le16(offset); + cmd->data_len = data_len; + for (i = 0; i < data_len; i++) { + cmd->data[i] = *data; + data++; + } + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** + * ice_aq_set_phy_rec_clk_out - set RCLK phy out + * @hw: pointer to the HW struct + * @phy_output: PHY reference clock output pin + * @enable: GPIO state to be applied + * @freq: PHY output frequency + * + * Set CGU reference priority (0x0630) + * Return 0 on success or negative value on failure. + */ +int +ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable, + u32 *freq) +{ + struct ice_aqc_set_phy_rec_clk_out *cmd; + struct ice_aq_desc desc; + int status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_rec_clk_out); + cmd = &desc.params.set_phy_rec_clk_out; + cmd->phy_output = phy_output; + cmd->port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT; + cmd->flags = enable & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN; + cmd->freq = cpu_to_le32(*freq); + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!status) + *freq = le32_to_cpu(cmd->freq); + + return status; +} + +/** + * ice_aq_get_phy_rec_clk_out + * @hw: pointer to the HW struct + * @phy_output: PHY reference clock output pin + * @port_num: Port number + * @flags: PHY flags + * @freq: PHY output frequency + * + * Get PHY recovered clock output (0x0631) + */ +int +ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, u8 *port_num, + u8 *flags, u32 *freq) +{ + struct ice_aqc_get_phy_rec_clk_out *cmd; + struct ice_aq_desc desc; + int status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_rec_clk_out); + cmd = &desc.params.get_phy_rec_clk_out; + cmd->phy_output = phy_output; + cmd->port_num = *port_num; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!status) { + *port_num = cmd->port_num; + *flags = cmd->flags; + *freq = le32_to_cpu(cmd->freq); + } + + return status; +} /** * ice_is_main_vsi - checks whether the VSI is main VSI @@ -5501,7 +6441,6 @@ static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle) return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle]; } - /** * ice_replay_pre_init - replay pre initialization * @hw: pointer to the HW struct @@ -5509,10 +6448,10 @@ static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle) * * Initializes required config data for VSI, FD, ACL, and RSS before replay. */ -static enum ice_status +int ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw) { - enum ice_status status; + int status; u8 i; /* Delete old entries from replay filter list head if there is any */ @@ -5541,14 +6480,14 @@ ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw) * Restore all VSI configuration after reset. It is required to call this * function with main VSI first. */ -enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) +int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) { struct ice_switch_info *sw = hw->switch_info; struct ice_port_info *pi = hw->port_info; - enum ice_status status; + int status; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; /* Replay pre-initialization if there is any */ if (ice_is_main_vsi(hw, vsi_handle)) { @@ -5656,8 +6595,6 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, *prev_stat = new_data; } - - /** * ice_sched_query_elem - query element information from HW * @hw: pointer to the HW struct @@ -5666,12 +6603,12 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, * * This function queries HW element information */ -enum ice_status +int ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf) { u16 buf_size, num_elem_ret = 0; - enum ice_status status; + int status; buf_size = sizeof(*buf); memset(buf, 0, buf_size); @@ -5683,7 +6620,6 @@ ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, return status; } - /** * ice_get_fw_mode - returns FW mode * @hw: pointer to the HW struct @@ -5697,7 +6633,6 @@ enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw) /* check the current FW mode */ fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M; - if (fw_mode & ICE_FW_MODE_DBG_M) return ICE_FW_MODE_DBG; else if (fw_mode & ICE_FW_MODE_REC_M) @@ -5708,7 +6643,6 @@ enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw) return ICE_FW_MODE_NORMAL; } - /** * ice_aq_read_i2c * @hw: pointer to the hw struct @@ -5722,21 +6656,21 @@ enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw) * * Read I2C (0x06E2) */ -enum ice_status +int ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, u16 bus_addr, __le16 addr, u8 params, u8 *data, struct ice_sq_cd *cd) { struct ice_aq_desc desc = { 0 }; struct ice_aqc_i2c *cmd; - enum ice_status status; u8 data_size; + int status; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c); cmd = &desc.params.read_write_i2c; if (!data) - return ICE_ERR_PARAM; + return -EINVAL; data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S; @@ -5772,7 +6706,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, * * Write I2C (0x06E3) */ -enum ice_status +int ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, u16 bus_addr, __le16 addr, u8 params, u8 *data, struct ice_sq_cd *cd) @@ -5788,7 +6722,7 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, /* data_size limited to 4 */ if (data_size > 4) - return ICE_ERR_PARAM; + return -EINVAL; cmd->i2c_bus_addr = cpu_to_le16(bus_addr); cmd->topo_addr = topo_addr; @@ -5818,7 +6752,7 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, * a single PF will write the parameter value, while all other PFs will only * read it. */ -enum ice_status +int ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, u32 value, struct ice_sq_cd *cd) { @@ -5826,14 +6760,14 @@ ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, struct ice_aq_desc desc; if (idx >= ICE_AQC_DRIVER_PARAM_MAX) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; cmd = &desc.params.drv_shared_params; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params); cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET; - cmd->param_indx = idx; + cmd->param_indx = (u8)idx; cmd->param_val = cpu_to_le32(value); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); @@ -5851,23 +6785,23 @@ ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, * Note that firmware provides no synchronization or locking. It is expected * that only a single PF will write a given parameter. */ -enum ice_status +int ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, u32 *value, struct ice_sq_cd *cd) { struct ice_aqc_driver_shared_params *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (idx >= ICE_AQC_DRIVER_PARAM_MAX) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; cmd = &desc.params.drv_shared_params; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params); cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET; - cmd->param_indx = idx; + cmd->param_indx = (u8)idx; status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); if (status) @@ -5888,7 +6822,7 @@ ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, * * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology */ -enum ice_status +int ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, struct ice_sq_cd *cd) { @@ -5897,7 +6831,7 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio); cmd = &desc.params.read_write_gpio; - cmd->gpio_ctrl_handle = gpio_ctrl_handle; + cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); cmd->gpio_num = pin_idx; cmd->gpio_val = value ? 1 : 0; @@ -5915,17 +6849,17 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of * the topology */ -enum ice_status +int ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool *value, struct ice_sq_cd *cd) { struct ice_aqc_gpio *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); cmd = &desc.params.read_write_gpio; - cmd->gpio_ctrl_handle = gpio_ctrl_handle; + cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); cmd->gpio_num = pin_idx; status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); @@ -5936,6 +6870,58 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, return 0; } +/** + * ice_is_fw_api_min_ver + * @hw: pointer to the hardware structure + * @maj: major version + * @min: minor version + * @patch: patch version + * + * Checks if the firmware is minimum version + */ +static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch) +{ + if (hw->api_maj_ver == maj) { + if (hw->api_min_ver > min) + return true; + if (hw->api_min_ver == min && hw->api_patch >= patch) + return true; + } else if (hw->api_maj_ver > maj) { + return true; + } + + return false; +} + +/** + * ice_is_fw_min_ver + * @hw: pointer to the hardware structure + * @branch: branch version + * @maj: major version + * @min: minor version + * @patch: patch version + * + * Checks if the firmware is minimum version + */ +static bool ice_is_fw_min_ver(struct ice_hw *hw, u8 branch, u8 maj, u8 min, + u8 patch) +{ + if (hw->fw_branch == branch) { + if (hw->fw_maj_ver > maj) + return true; + if (hw->fw_maj_ver == maj) { + if (hw->fw_min_ver > min) + return true; + if (hw->fw_min_ver == min && hw->fw_patch >= patch) + return true; + } + } else if (hw->fw_branch > branch) { + return true; + } + + return false; +} + /** * ice_fw_supports_link_override * @hw: pointer to the hardware structure @@ -5944,17 +6930,9 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, */ bool ice_fw_supports_link_override(struct ice_hw *hw) { - if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) { - if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN) - return true; - if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN && - hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH) - return true; - } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) { - return true; - } - - return false; + return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ, + ICE_FW_API_LINK_OVERRIDE_MIN, + ICE_FW_API_LINK_OVERRIDE_PATCH); } /** @@ -5964,13 +6942,13 @@ bool ice_fw_supports_link_override(struct ice_hw *hw) * * Gets the link default override for a port */ -enum ice_status +int ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, struct ice_port_info *pi) { u16 i, tlv, tlv_len, tlv_start, buf, offset; struct ice_hw *hw = pi->hw; - enum ice_status status; + int status; status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); @@ -6077,7 +7055,7 @@ bool ice_is_fw_health_report_supported(struct ice_hw *hw) * Configure the health status event types that the firmware will send to this * PF. The supported event types are: PF-specific, all PFs, and global */ -enum ice_status +int ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source, struct ice_sq_cd *cd) { @@ -6094,7 +7072,6 @@ ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source, return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } - /** * ice_aq_get_port_options * @hw: pointer to the hw struct @@ -6109,7 +7086,7 @@ ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source, * * Calls Get Port Options AQC (0x06ea) and verifies result. */ -enum ice_status +int ice_aq_get_port_options(struct ice_hw *hw, struct ice_aqc_get_port_options_elem *options, u8 *option_count, u8 lport, bool lport_valid, @@ -6117,14 +7094,14 @@ ice_aq_get_port_options(struct ice_hw *hw, { struct ice_aqc_get_port_options *cmd; struct ice_aq_desc desc; - enum ice_status status; u8 pmd_count; u8 max_speed; + int status; u8 i; /* options buffer shall be able to hold max returned options */ if (*option_count < ICE_AQC_PORT_OPT_COUNT_M) - return ICE_ERR_PARAM; + return -EINVAL; cmd = &desc.params.get_port_options; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options); @@ -6146,7 +7123,7 @@ ice_aq_get_port_options(struct ice_hw *hw, *active_option_idx = cmd->port_options & ICE_AQC_PORT_OPT_ACTIVE_M; if (*active_option_idx > (*option_count - 1)) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n", *active_option_idx); } @@ -6166,13 +7143,13 @@ ice_aq_get_port_options(struct ice_hw *hw, */ if (max_speed <= ICE_AQC_PORT_OPT_MAX_LANE_100G) { if (pmd_count > ICE_MAX_PORT_PER_PCI_DEV) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; if (pmd_count > 2 && max_speed > ICE_AQC_PORT_OPT_MAX_LANE_25G) - return ICE_ERR_CFG; + return -EIO; if (pmd_count > 7 && max_speed > ICE_AQC_PORT_OPT_MAX_LANE_10G) - return ICE_ERR_CFG; + return -EIO; } } @@ -6189,7 +7166,7 @@ ice_aq_get_port_options(struct ice_hw *hw, * * Set the LLDP MIB. (0x0A08) */ -enum ice_status +int ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, struct ice_sq_cd *cd) { @@ -6199,7 +7176,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, cmd = &desc.params.lldp_set_mib; if (buf_size == 0 || !buf) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); @@ -6218,19 +7195,12 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, */ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) { - if (hw->mac_type != ICE_MAC_E810) + if (hw->mac_type != ICE_MAC_E810 && hw->mac_type != ICE_MAC_GENERIC) return false; - if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) { - if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN) - return true; - if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN && - hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH) - return true; - } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) { - return true; - } - return false; + return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ, + ICE_FW_API_LLDP_FLTR_MIN, + ICE_FW_API_LLDP_FLTR_PATCH); } /** @@ -6239,7 +7209,7 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) * @vsi_num: absolute HW index for VSI * @add: boolean for if adding or removing a filter */ -enum ice_status +int ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) { struct ice_aqc_lldp_filter_ctrl *cmd; @@ -6259,6 +7229,19 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); } +/** + * ice_lldp_execute_pending_mib - execute LLDP pending MIB request + * @hw: pointer to HW struct + */ +int ice_lldp_execute_pending_mib(struct ice_hw *hw) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_execute_pending_lldp_mib); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + /** * ice_fw_supports_report_dflt_cfg * @hw: pointer to the hardware structure @@ -6267,202 +7250,35 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) */ bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) { - if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) { - if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN) - return true; - if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN && - hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH) - return true; - } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) { + return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ, + ICE_FW_API_REPORT_DFLT_CFG_MIN, + ICE_FW_API_REPORT_DFLT_CFG_PATCH); +} + +/** + * ice_fw_supports_fec_dis_auto + * @hw: pointer to the hardware structure + * + * Checks if the firmware supports FEC disable in Auto FEC mode + */ +bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw) +{ + return ice_is_fw_min_ver(hw, ICE_FW_FEC_DIS_AUTO_BRANCH, + ICE_FW_FEC_DIS_AUTO_MAJ, + ICE_FW_FEC_DIS_AUTO_MIN, + ICE_FW_FEC_DIS_AUTO_PATCH); +} +/** + * ice_is_fw_auto_drop_supported + * @hw: pointer to the hardware structure + * + * Checks if the firmware supports auto drop feature + */ +bool ice_is_fw_auto_drop_supported(struct ice_hw *hw) +{ + if (hw->api_maj_ver >= ICE_FW_API_AUTO_DROP_MAJ && + hw->api_min_ver >= ICE_FW_API_AUTO_DROP_MIN) return true; - } return false; } -/** - * ice_is_pca9575_sw_handle - * @hw: pointer to the hw struct - * @handle: GPIO controller's handle - * - * This command will check if the reset pin is present in the netlist for - * a given netlist handle. The SW controlled IO expander does not have this pin - * populated in the netlist. - */ -static bool -ice_is_pca9575_sw_handle(struct ice_hw *hw, u16 handle) -{ - struct ice_aqc_get_link_topo_pin *cmd; - struct ice_aq_desc desc; - - cmd = &desc.params.get_link_topo_pin; - - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo_pin); - - /* set node comtext to the given GPIO controller */ - cmd->addr.topo_params.node_type_ctx = - (ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED << - ICE_AQC_LINK_TOPO_NODE_CTX_S); - cmd->addr.handle = handle; - - /* Try finding the reset pin in the GPIO context */ - cmd->input_io_params = (ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_GPIO << - ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_S) | - ICE_AQC_LINK_TOPO_IO_FUNC_RESET_N; - - /* If the expander is controlled by software the following command - * should return error ICE_AQ_RC_ENXIO - */ - if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL) && - hw->adminq.sq_last_status == ICE_AQ_RC_ENXIO) - return true; - - return false; -} - -/** - * ice_get_pca9575_handle - * @hw: pointer to the hw struct - * @pca9575_handle: GPIO controller's handle - * - * Find and return the GPIO controller's handle in the netlist. - * When found - the value will be cached in the hw structure and following calls - * will return cached value - */ -static enum ice_status -ice_get_pca9575_handle(struct ice_hw *hw, __le16 *pca9575_handle) -{ - struct ice_aqc_get_link_topo *cmd; - struct ice_aq_desc desc; - enum ice_status status; - __le16 handle; - u8 idx; - - if (!hw || !pca9575_handle) - return ICE_ERR_PARAM; - - /* If handle was read previously return cached value */ - if (hw->io_expander_handle) { - *pca9575_handle = hw->io_expander_handle; - return 0; - } - - /* If handle was not detected read it from the netlist */ - cmd = &desc.params.get_link_topo; - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); - - /* Set node type to GPIO controller */ - cmd->addr.topo_params.node_type_ctx = - (ICE_AQC_LINK_TOPO_NODE_TYPE_M & - ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL); - -#define SW_PCA9575_MAX_TOPO_IDX 2 - - /* SW IO expander is usually the last one in the netlist. Scan the - * netlist backward and see if we find it. Index 0 is assigned to - * the IO widget so we skip it. - */ - for (idx = SW_PCA9575_MAX_TOPO_IDX; idx > 0; idx--) { - cmd->addr.topo_params.index = idx; - - status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); - if (status) - continue; - - handle = desc.params.get_link_topo.addr.handle; - - /* Verify if we found the right IO expander type */ - if (desc.params.get_link_topo.node_part_num == - ICE_ACQ_GET_LINK_TOPO_NODE_NR_PCA9575 && - ice_is_pca9575_sw_handle(hw, handle)) - break; - } - - /* Expander not found */ - if (!cmd->addr.topo_params.index) - return ICE_ERR_NOT_SUPPORTED; - - /* If present save the handle and return it */ - hw->io_expander_handle = desc.params.get_link_topo.addr.handle; - *pca9575_handle = hw->io_expander_handle; - - return 0; -} - -/** - * ice_read_e810t_pca9575_reg - * @hw: pointer to the hw struct - * @offset: GPIO controller register offset - * @data: pointer to data to be read from the GPIO controller - * - * Read the register from the GPIO controller - */ -enum ice_status -ice_read_e810t_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data) -{ - struct ice_aqc_link_topo_addr link_topo; - enum ice_status status; - __le16 addr; - - memset(&link_topo, 0, sizeof(link_topo)); - - status = ice_get_pca9575_handle(hw, &link_topo.handle); - if (status) - return status; - - link_topo.topo_params.node_type_ctx = - (ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED << - ICE_AQC_LINK_TOPO_NODE_CTX_S); - - addr = cpu_to_le16((u16)offset); - - return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL); -} - -/** - * ice_write_e810t_pca9575_reg - * @hw: pointer to the hw struct - * @offset: GPIO controller register offset - * @data: data to be written to the GPIO controller - * - * Write the data to the GPIO controller register - */ -enum ice_status -ice_write_e810t_pca9575_reg(struct ice_hw *hw, u8 offset, u8 data) -{ - struct ice_aqc_link_topo_addr link_topo; - enum ice_status status; - __le16 addr; - - memset(&link_topo, 0, sizeof(link_topo)); - - status = ice_get_pca9575_handle(hw, &link_topo.handle); - if (status) - return status; - - link_topo.topo_params.node_type_ctx = - (ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED << - ICE_AQC_LINK_TOPO_NODE_CTX_S); - - addr = cpu_to_le16((u16)offset); - - return ice_aq_write_i2c(hw, link_topo, 0, addr, 1, &data, NULL); -} - -/** - * ice_e810t_is_pca9575_present - * @hw: pointer to the hw struct - * - * Check if the SW IO expander is present on the board - */ -bool ice_e810t_is_pca9575_present(struct ice_hw *hw) -{ - enum ice_status status; - u8 data; - - status = ice_read_e810t_pca9575_reg(hw, ICE_PCA9575_P0_IN, &data); - - if (status) - return false; - - return true; -} diff --git a/drivers/thirdparty/ice/ice_common.h b/drivers/thirdparty/ice/ice_common.h index e328501a2330..7802403639f2 100644 --- a/drivers/thirdparty/ice/ice_common.h +++ b/drivers/thirdparty/ice/ice_common.h @@ -4,10 +4,10 @@ #ifndef _ICE_COMMON_H_ #define _ICE_COMMON_H_ -#include "ice.h" #include "ice_type.h" #include "ice_nvm.h" #include "ice_flex_pipe.h" +#include "ice_parser.h" #include "virtchnl.h" #include "ice_switch.h" #include "ice_fdir.h" @@ -22,81 +22,84 @@ enum ice_fw_modes { ICE_FW_MODE_ROLLBACK }; - +int ice_init_fltr_mgmt_struct(struct ice_hw *hw); +void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw); void ice_set_umac_shared(struct ice_hw *hw); -enum ice_status ice_init_hw(struct ice_hw *hw); +int ice_init_hw(struct ice_hw *hw); void ice_deinit_hw(struct ice_hw *hw); -enum ice_status ice_check_reset(struct ice_hw *hw); -enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req); +int ice_check_reset(struct ice_hw *hw); +int ice_reset(struct ice_hw *hw, enum ice_reset_req req); -enum ice_status ice_create_all_ctrlq(struct ice_hw *hw); -enum ice_status ice_init_all_ctrlq(struct ice_hw *hw); -void ice_shutdown_all_ctrlq(struct ice_hw *hw); +int ice_create_all_ctrlq(struct ice_hw *hw); +int ice_init_all_ctrlq(struct ice_hw *hw); +void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading); void ice_destroy_all_ctrlq(struct ice_hw *hw); -enum ice_status +int ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_rq_event_info *e, u16 *pending); -enum ice_status +int ice_get_link_status(struct ice_port_info *pi, bool *link_up); -enum ice_status ice_update_link_info(struct ice_port_info *pi); -enum ice_status +int ice_update_link_info(struct ice_port_info *pi); +int ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, enum ice_aq_res_access_type access, u32 timeout); void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res); -enum ice_status +int ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res); -enum ice_status +int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res); -enum ice_status +int ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, enum ice_adminq_opc opc, struct ice_sq_cd *cd); -enum ice_status +int ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd); -enum ice_status +int ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd); void ice_clear_pxe_mode(struct ice_hw *hw); -enum ice_status ice_get_caps(struct ice_hw *hw); +int ice_get_caps(struct ice_hw *hw); void ice_set_safe_mode_caps(struct ice_hw *hw); +int +ice_aq_get_internal_data(struct ice_hw *hw, u8 cluster_id, u16 table_id, + u32 start, void *buf, u16 buf_size, u16 *ret_buf_size, + u16 *ret_next_table, u32 *ret_next_index, + struct ice_sq_cd *cd); - - - -enum ice_status +int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, u32 rxq_index); -enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index); -enum ice_status +int ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index); +int ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index); -enum ice_status +int ice_write_tx_cmpltnq_ctx(struct ice_hw *hw, struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx, u32 tx_cmpltnq_index); -enum ice_status +int ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index); -enum ice_status +int ice_write_tx_drbell_q_ctx(struct ice_hw *hw, struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx, u32 tx_drbell_q_index); -enum ice_status +int ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params); -enum ice_status +int ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params); -enum ice_status +int ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *keys); -enum ice_status +int ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *keys); -enum ice_status +int ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move, bool is_tc_change, bool subseq_call, bool flush_pipe, u8 timeout, u32 *blocked_cgds, @@ -104,59 +107,70 @@ ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move, u8 *txqs_moved, struct ice_sq_cd *cd); bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq); -enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); +int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); extern const struct ice_ctx_ele ice_tlan_ctx_info[]; -enum ice_status +int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info); extern struct mutex ice_global_cfg_lock_sw; -enum ice_status +int ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd); -enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd); +int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi, bool save_bad_pac, bool pad_short_pac, bool double_vlan, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps_data *caps, struct ice_sq_cd *cd); -enum ice_status +int +ice_aq_get_netlist_node_pin(struct ice_hw *hw, + struct ice_aqc_get_link_topo_pin *cmd, + u16 *node_handle); +int +ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, + u8 *node_part_number, u16 *node_handle); +int +ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number, + u16 *node_handle); +int ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, enum ice_adminq_opc opc, struct ice_sq_cd *cd); -enum ice_status +int ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps); void ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, u16 link_speeds_bitmap); -enum ice_status +int ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, struct ice_sq_cd *cd); -enum ice_status ice_clear_pf_cfg(struct ice_hw *hw); -enum ice_status +int ice_clear_pf_cfg(struct ice_hw *hw); +int ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd); bool ice_fw_supports_link_override(struct ice_hw *hw); -enum ice_status +bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw); +int ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, struct ice_port_info *pi); bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps); enum ice_fc_mode ice_caps_to_fc_mode(u8 caps); enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options); -enum ice_status +int ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update); -enum ice_status +int ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fc_mode req_mode); bool @@ -166,85 +180,130 @@ void ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, struct ice_aqc_get_phy_caps_data *caps, struct ice_aqc_set_phy_cfg_data *cfg); -enum ice_status +int ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec); -enum ice_status +int ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, struct ice_sq_cd *cd); -enum ice_status -ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd); -enum ice_status +int +ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, bool auto_drop, + struct ice_sq_cd *cd); +int ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, struct ice_link_status *link, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd); - -enum ice_status +int ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, bool write, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_prog_topo_dev_nvm(struct ice_hw *hw, struct ice_aqc_link_topo_params *topo_params, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_read_topo_dev_nvm(struct ice_hw *hw, struct ice_aqc_link_topo_params *topo_params, u32 start_address, u8 *buf, u8 buf_size, struct ice_sq_cd *cd); - void ice_dump_port_info(struct ice_port_info *pi); void ice_dump_caps(struct ice_hw *hw); void ice_dump_ptp_dev_caps(struct ice_hw *hw); void ice_dump_ptp_func_caps(struct ice_hw *hw); -enum ice_status ice_dump_port_dflt_topo(struct ice_port_info *pi); +int ice_dump_port_dflt_topo(struct ice_port_info *pi); void ice_dump_port_topo(struct ice_port_info *pi); -enum ice_status +int ice_aq_get_port_options(struct ice_hw *hw, struct ice_aqc_get_port_options_elem *options, u8 *option_count, u8 lport, bool lport_valid, u8 *active_option_idx, bool *active_option_valid); -enum ice_status +int ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, u16 *max_rdmaqs); -enum ice_status +int ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 *rdma_qset, u16 num_qsets, u32 *qset_teid); -enum ice_status +int ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, u16 *q_id); -enum ice_status +int ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, u16 *q_handle, u16 *q_ids, u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, struct ice_sq_cd *cd); -enum ice_status +int ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, u16 *max_lanqs); -enum ice_status +int ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, struct ice_sq_cd *cd); -enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); +int +ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw); +int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); void ice_replay_post(struct ice_hw *hw); struct ice_q_ctx * ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle); -enum ice_status ice_sbq_rw_reg_lp(struct ice_hw *hw, +int ice_sbq_rw_reg_lp(struct ice_hw *hw, struct ice_sbq_msg_input *in, bool lock); void ice_sbq_lock(struct ice_hw *hw); void ice_sbq_unlock(struct ice_hw *hw); -enum ice_status ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in); +int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in); +int +ice_aq_cfg_cgu_err(struct ice_hw *hw, bool ena_event_report, bool ena_err_report, + struct ice_sq_cd *cd); +int +ice_aq_get_cgu_abilities(struct ice_hw *hw, + struct ice_aqc_get_cgu_abilities *abilities); +int +ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2, + u32 freq, s32 phase_delay); +int +ice_aq_get_input_pin_cfg(struct ice_hw *hw, + struct ice_aqc_get_cgu_input_config *cfg, + u8 input_idx); +int +ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags, + u8 src_sel, u32 freq, s32 phase_delay); +int +ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags, + u8 *src_sel, u32 *freq, u32 *src_freq); +int +ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state, + u16 *dpll_state, s64 *phase_offset, u8 *eec_mode); +int +ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state, + u8 config, u8 eec_mode); +int +ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, + u8 ref_priority); +int +ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, + u8 *ref_prio); +int +ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver, + u32 *cgu_fw_ver); +int +ice_aq_read_cgu_reg(struct ice_hw *hw, u16 offset, u8 data_len, u8 *data); +int +ice_aq_write_cgu_reg(struct ice_hw *hw, u16 offset, u8 data_len, u8 *data); +int +ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable, + u32 *freq); +int +ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, u8 *port_num, + u8 *flags, u32 *freq); void ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); @@ -255,60 +314,44 @@ enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw); void ice_print_rollback_msg(struct ice_hw *hw); bool ice_is_generic_mac(struct ice_hw *hw); bool ice_is_e810(struct ice_hw *hw); -enum ice_status +bool ice_is_e810t(struct ice_hw *hw); +bool ice_is_e823(struct ice_hw *hw); +int ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf); -enum ice_status +int ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, u32 value, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, u32 *value, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool *value, struct ice_sq_cd *cd); -enum ice_status +bool ice_is_100m_speed_supported(struct ice_hw *hw); +int ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, struct ice_sq_cd *cd); bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw); -enum ice_status +int ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add); -enum ice_status +int ice_lldp_execute_pending_mib(struct ice_hw *hw); +int ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, u16 bus_addr, __le16 addr, u8 params, u8 *data, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, u16 bus_addr, __le16 addr, u8 params, u8 *data, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source, struct ice_sq_cd *cd); bool ice_is_fw_health_report_supported(struct ice_hw *hw); bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw); - -/* E810T PCA9575 IO controller registers */ -#define ICE_PCA9575_P0_IN 0x0 -#define ICE_PCA9575_P1_IN 0x1 -#define ICE_PCA9575_P0_CFG 0x8 -#define ICE_PCA9575_P1_CFG 0x9 -#define ICE_PCA9575_P0_OUT 0xA -#define ICE_PCA9575_P1_OUT 0xB - -/* E810T PCA9575 IO controller pin control */ -#define ICE_E810T_P0_GNSS_PRSNT_N BIT(4) -#define ICE_E810T_P1_SMA1_DIR_EN BIT(4) -#define ICE_E810T_P1_SMA1_TX_EN BIT(5) -#define ICE_E810T_P1_SMA2_UFL2_RX_DIS BIT(3) -#define ICE_E810T_P1_SMA2_DIR_EN BIT(6) -#define ICE_E810T_P1_SMA2_TX_EN BIT(7) - -enum ice_status -ice_read_e810t_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data); -enum ice_status -ice_write_e810t_pca9575_reg(struct ice_hw *hw, u8 offset, u8 data); -bool ice_e810t_is_pca9575_present(struct ice_hw *hw); +/* AQ API version for FW auto drop reports */ +bool ice_is_fw_auto_drop_supported(struct ice_hw *hw); #endif /* _ICE_COMMON_H_ */ diff --git a/drivers/thirdparty/ice/ice_controlq.c b/drivers/thirdparty/ice/ice_controlq.c index 345ca841b429..262565cfcde7 100644 --- a/drivers/thirdparty/ice/ice_controlq.c +++ b/drivers/thirdparty/ice/ice_controlq.c @@ -3,7 +3,6 @@ #include "ice_common.h" - #define ICE_CQ_INIT_REGS(qinfo, prefix) \ do { \ (qinfo)->sq.head = prefix##_ATQH; \ @@ -26,7 +25,6 @@ do { \ (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \ } while (0) - /** * ice_adminq_init_regs - Initialize AdminQ registers * @hw: pointer to the hardware structure @@ -40,7 +38,6 @@ static void ice_adminq_init_regs(struct ice_hw *hw) ICE_CQ_INIT_REGS(cq, PF_FW); } - /** * ice_mailbox_init_regs - Initialize Mailbox registers * @hw: pointer to the hardware structure @@ -90,7 +87,7 @@ bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq) * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue */ -static enum ice_status +static int ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) { size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc); @@ -99,7 +96,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) &cq->sq.desc_buf.pa, GFP_KERNEL | __GFP_ZERO); if (!cq->sq.desc_buf.va) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; cq->sq.desc_buf.size = size; cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, @@ -110,7 +107,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq->sq.desc_buf.va = NULL; cq->sq.desc_buf.pa = 0; cq->sq.desc_buf.size = 0; - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } return 0; @@ -121,7 +118,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue */ -static enum ice_status +static int ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) { size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc); @@ -130,7 +127,7 @@ ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) &cq->rq.desc_buf.pa, GFP_KERNEL | __GFP_ZERO); if (!cq->rq.desc_buf.va) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; cq->rq.desc_buf.size = size; return 0; } @@ -157,7 +154,7 @@ static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring) * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue */ -static enum ice_status +static int ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { int i; @@ -168,7 +165,7 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries, sizeof(cq->rq.desc_buf), GFP_KERNEL); if (!cq->rq.dma_head) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head; /* allocate the mapped buffers */ @@ -221,7 +218,7 @@ unwind_alloc_rq_bufs: devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); cq->rq.dma_head = NULL; - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } /** @@ -229,7 +226,7 @@ unwind_alloc_rq_bufs: * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue */ -static enum ice_status +static int ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { int i; @@ -238,7 +235,7 @@ ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, sizeof(cq->sq.desc_buf), GFP_KERNEL); if (!cq->sq.dma_head) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; /* allocate the mapped buffers */ @@ -269,10 +266,10 @@ unwind_alloc_sq_bufs: devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); cq->sq.dma_head = NULL; - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } -static enum ice_status +static int ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) { /* Clear Head and Tail */ @@ -286,7 +283,7 @@ ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) /* Check one register to verify that config was applied */ if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa)) - return ICE_ERR_AQ_ERROR; + return -EIO; return 0; } @@ -298,7 +295,7 @@ ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) * * Configure base address and length registers for the transmit queue */ -static enum ice_status +static int ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries); @@ -311,10 +308,10 @@ ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) * * Configure base address and length registers for the receive (event queue) */ -static enum ice_status +static int ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { - enum ice_status status; + int status; status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries); if (status) @@ -364,19 +361,19 @@ do { \ * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe */ -static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { - enum ice_status ret_code; + int ret_code; if (cq->sq.count > 0) { /* queue already initialized */ - ret_code = ICE_ERR_NOT_READY; + ret_code = -EBUSY; goto init_ctrlq_exit; } /* verify input for valid configuration */ if (!cq->num_sq_entries || !cq->sq_buf_size) { - ret_code = ICE_ERR_CFG; + ret_code = -EIO; goto init_ctrlq_exit; } @@ -424,19 +421,19 @@ init_ctrlq_exit: * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe */ -static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { - enum ice_status ret_code; + int ret_code; if (cq->rq.count > 0) { /* queue already initialized */ - ret_code = ICE_ERR_NOT_READY; + ret_code = -EBUSY; goto init_ctrlq_exit; } /* verify input for valid configuration */ if (!cq->num_rq_entries || !cq->rq_buf_size) { - ret_code = ICE_ERR_CFG; + ret_code = -EIO; goto init_ctrlq_exit; } @@ -477,15 +474,15 @@ init_ctrlq_exit: * * The main shutdown routine for the Control Transmit Queue */ -static enum ice_status +static int ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { - enum ice_status ret_code = 0; + int ret_code = 0; mutex_lock(&cq->sq_lock); if (!cq->sq.count) { - ret_code = ICE_ERR_NOT_READY; + ret_code = -EBUSY; goto shutdown_sq_out; } @@ -525,14 +522,20 @@ static bool ice_aq_ver_check(struct ice_hw *hw) } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) { if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2)) dev_info(ice_hw_to_dev(hw), - "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); + "The driver for the device detected a newer version (%u.%u) of the NVM image than expected (%u.%u). Please install the most recent version of the network driver.\n", + hw->api_maj_ver, hw->api_min_ver, + EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR); else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR) dev_info(ice_hw_to_dev(hw), - "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); + "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n", + hw->api_maj_ver, hw->api_min_ver, + EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR); } else { /* Major API version is older than expected, log a warning */ dev_info(ice_hw_to_dev(hw), - "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); + "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n", + hw->api_maj_ver, hw->api_min_ver, + EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR); } return true; } @@ -544,15 +547,15 @@ static bool ice_aq_ver_check(struct ice_hw *hw) * * The main shutdown routine for the Control Receive Queue */ -static enum ice_status +static int ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { - enum ice_status ret_code = 0; + int ret_code = 0; mutex_lock(&cq->rq_lock); if (!cq->rq.count) { - ret_code = ICE_ERR_NOT_READY; + ret_code = -EBUSY; goto shutdown_rq_out; } @@ -575,23 +578,21 @@ shutdown_rq_out: return ret_code; } - /** * ice_init_check_adminq - Check version for Admin Queue to know if its alive * @hw: pointer to the hardware structure */ -static enum ice_status ice_init_check_adminq(struct ice_hw *hw) +static int ice_init_check_adminq(struct ice_hw *hw) { struct ice_ctl_q_info *cq = &hw->adminq; - enum ice_status status; + int status; status = ice_aq_get_fw_ver(hw, NULL); if (status) goto init_ctrlq_free_rq; - if (!ice_aq_ver_check(hw)) { - status = ICE_ERR_FW_API_VER; + status = -EIO; goto init_ctrlq_free_rq; } @@ -617,10 +618,10 @@ init_ctrlq_free_rq: * * NOTE: this function does not initialize the controlq locks */ -static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) +static int ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) { struct ice_ctl_q_info *cq; - enum ice_status ret_code; + int ret_code; switch (q_type) { case ICE_CTL_Q_ADMIN: @@ -636,14 +637,14 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) cq = &hw->mailboxq; break; default: - return ICE_ERR_PARAM; + return -EINVAL; } cq->qtype = q_type; /* verify input for valid configuration */ if (!cq->num_rq_entries || !cq->num_sq_entries || !cq->rq_buf_size || !cq->sq_buf_size) { - return ICE_ERR_CFG; + return -EIO; } /* setup SQ command write back timeout */ @@ -683,10 +684,12 @@ static bool ice_is_sbq_supported(struct ice_hw *hw) * ice_shutdown_ctrlq - shutdown routine for any control queue * @hw: pointer to the hardware structure * @q_type: specific Control queue type + * @unloading: is the driver unloading itself * * NOTE: this function does not destroy the control queue locks. */ -static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) +static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type, + bool unloading) { struct ice_ctl_q_info *cq; @@ -694,7 +697,7 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) case ICE_CTL_Q_ADMIN: cq = &hw->adminq; if (ice_check_sq_alive(hw, cq)) - ice_aq_q_shutdown(hw, true); + ice_aq_q_shutdown(hw, unloading); break; case ICE_CTL_Q_SB: cq = &hw->sbq; @@ -713,20 +716,21 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) /** * ice_shutdown_all_ctrlq - shutdown routine for all control queues * @hw: pointer to the hardware structure + * @unloading: is the driver unloading itself * * NOTE: this function does not destroy the control queue locks. The driver * may call this at runtime to shutdown and later restart control queues, such * as in response to a reset event. */ -void ice_shutdown_all_ctrlq(struct ice_hw *hw) +void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading) { /* Shutdown FW admin queue */ - ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); + ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading); /* Shutdown PHY Sideband */ if (ice_is_sbq_supported(hw)) - ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB); + ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB, unloading); /* Shutdown PF-VF Mailbox */ - ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX); + ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading); } /** @@ -742,10 +746,10 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw) * * NOTE: this function does not initialize the controlq locks. */ -enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) +int ice_init_all_ctrlq(struct ice_hw *hw) { - enum ice_status status; u32 retry = 0; + int status; /* Init FW admin queue */ do { @@ -754,11 +758,11 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) return status; status = ice_init_check_adminq(hw); - if (status != ICE_ERR_AQ_FW_CRITICAL) + if (status != -EIO) break; ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n"); - ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); + ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, true); msleep(ICE_CTL_Q_ADMIN_INIT_MSEC); } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT); @@ -805,7 +809,7 @@ static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq) * driver needs to re-initialize control queues at run time it should call * ice_init_all_ctrlq instead. */ -enum ice_status ice_create_all_ctrlq(struct ice_hw *hw) +int ice_create_all_ctrlq(struct ice_hw *hw) { ice_init_ctrlq_locks(&hw->adminq); if (ice_is_sbq_supported(hw)) @@ -839,7 +843,7 @@ static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq) void ice_destroy_all_ctrlq(struct ice_hw *hw) { /* shut down all the control queues first */ - ice_shutdown_all_ctrlq(hw); + ice_shutdown_all_ctrlq(hw, true); ice_destroy_ctrlq_locks(&hw->adminq); if (ice_is_sbq_supported(hw)) @@ -956,7 +960,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) * This is the main send command routine for the ATQ. It runs the queue, * cleans the queue, etc. */ -enum ice_status +int ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) @@ -964,26 +968,26 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_dma_mem *dma_buf = NULL; struct ice_aq_desc *desc_on_ring; bool cmd_completed = false; - enum ice_status status = 0; struct ice_sq_cd *details; u32 total_delay = 0; + int status = 0; u16 retval = 0; u32 val = 0; /* if reset is in progress return a soft error */ if (hw->reset_ongoing) - return ICE_ERR_RESET_ONGOING; + return -EBUSY; cq->sq_last_status = ICE_AQ_RC_OK; if (!cq->sq.count) { ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n"); - status = ICE_ERR_AQ_EMPTY; + status = -EIO; goto sq_send_command_error; } if ((buf && !buf_size) || (!buf && buf_size)) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto sq_send_command_error; } @@ -991,7 +995,7 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (buf_size > cq->sq_buf_size) { ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n", buf_size); - status = ICE_ERR_INVAL_SIZE; + status = -EINVAL; goto sq_send_command_error; } @@ -1004,7 +1008,7 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (val >= cq->num_sq_entries) { ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n", val); - status = ICE_ERR_AQ_EMPTY; + status = -EIO; goto sq_send_command_error; } @@ -1021,7 +1025,7 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq, */ if (ice_clean_sq(hw, cq) == 0) { ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n"); - status = ICE_ERR_AQ_FULL; + status = -ENOSPC; goto sq_send_command_error; } @@ -1075,7 +1079,7 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (copy_size > buf_size) { ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n", copy_size, buf_size); - status = ICE_ERR_AQ_ERROR; + status = -EIO; } else { memcpy(buf, dma_buf->va, copy_size); } @@ -1091,7 +1095,7 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq, } cmd_completed = true; if (!status && retval != ICE_AQ_RC_OK) - status = ICE_ERR_AQ_ERROR; + status = -EIO; cq->sq_last_status = (enum ice_aq_err)retval; } @@ -1109,10 +1113,10 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask || rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) { ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n"); - status = ICE_ERR_AQ_FW_CRITICAL; + status = -EIO; } else { ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n"); - status = ICE_ERR_AQ_TIMEOUT; + status = -EIO; } } @@ -1132,16 +1136,16 @@ sq_send_command_error: * This is the main send command routine for the ATQ. It runs the queue, * cleans the queue, etc. */ -enum ice_status +int ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) { - enum ice_status status = 0; + int status = 0; /* if reset is in progress return a soft error */ if (hw->reset_ongoing) - return ICE_ERR_RESET_ONGOING; + return -EBUSY; mutex_lock(&cq->sq_lock); status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd); @@ -1176,15 +1180,15 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode) * the contents through e. It can also return how many events are * left to process through 'pending'. */ -enum ice_status +int ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_rq_event_info *e, u16 *pending) { u16 ntc = cq->rq.next_to_clean; enum ice_aq_err rq_last_status; - enum ice_status ret_code = 0; struct ice_aq_desc *desc; struct ice_dma_mem *bi; + int ret_code = 0; u16 desc_idx; u16 datalen; u16 flags; @@ -1198,7 +1202,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (!cq->rq.count) { ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n"); - ret_code = ICE_ERR_AQ_EMPTY; + ret_code = -EIO; goto clean_rq_elem_err; } @@ -1207,7 +1211,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ - ret_code = ICE_ERR_AQ_NO_WORK; + ret_code = -EALREADY; goto clean_rq_elem_out; } @@ -1218,7 +1222,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval); flags = le16_to_cpu(desc->flags); if (flags & ICE_AQ_FLAG_ERR) { - ret_code = ICE_ERR_AQ_ERROR; + ret_code = -EIO; ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n", le16_to_cpu(desc->opcode), rq_last_status); } @@ -1232,7 +1236,6 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size); - /* Restore the original datalen and buffer address in the desc, * FW updates datalen to indicate the event message size */ diff --git a/drivers/thirdparty/ice/ice_controlq.h b/drivers/thirdparty/ice/ice_controlq.h index 4ab82a4cf43c..85a5987441b2 100644 --- a/drivers/thirdparty/ice/ice_controlq.h +++ b/drivers/thirdparty/ice/ice_controlq.h @@ -6,7 +6,6 @@ #include "ice_adminq_cmd.h" - /* Maximum buffer lengths for all control queue types */ #define ICE_AQ_MAX_BUF_LEN 4096 #define ICE_MBXQ_MAX_BUF_LEN 4096 diff --git a/drivers/thirdparty/ice/ice_dcb.c b/drivers/thirdparty/ice/ice_dcb.c index 35b2b6c60262..ae73d1967c8c 100644 --- a/drivers/thirdparty/ice/ice_dcb.c +++ b/drivers/thirdparty/ice/ice_dcb.c @@ -18,19 +18,19 @@ * * Requests the complete LLDP MIB (entire packet). (0x0A00) */ -enum ice_status +int ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf, u16 buf_size, u16 *local_len, u16 *remote_len, struct ice_sq_cd *cd) { struct ice_aqc_lldp_get_mib *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.lldp_get_mib; if (buf_size == 0 || !buf) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_get_mib); @@ -60,7 +60,7 @@ ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf, * Enable or Disable posting of an event on ARQ when LLDP MIB * associated with the interface changes (0x0A01) */ -static enum ice_status +static int ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, struct ice_sq_cd *cd) { @@ -73,6 +73,9 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, if (!ena_update) cmd->command |= ICE_AQ_LLDP_MIB_UPDATE_DIS; + else + cmd->command |= ICE_AQ_LLDP_MIB_PENDING_ENABLE << + ICE_AQ_LLDP_MIB_PENDING_S; return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } @@ -99,17 +102,17 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, * Delete the specified TLV from LLDP Local MIB for the given bridge type. * The firmware places the entire LLDP MIB in the response buffer. (0x0A04) */ -enum ice_status +int ice_aq_add_delete_lldp_tlv(struct ice_hw *hw, u8 bridge_type, bool add_lldp_tlv, void *buf, u16 buf_size, u16 tlv_len, u16 *mib_len, struct ice_sq_cd *cd) { struct ice_aqc_lldp_add_delete_tlv *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (tlv_len == 0) - return ICE_ERR_PARAM; + return -EINVAL; cmd = &desc.params.lldp_add_delete_tlv; @@ -148,19 +151,19 @@ ice_aq_add_delete_lldp_tlv(struct ice_hw *hw, u8 bridge_type, bool add_lldp_tlv, * Firmware will place the complete LLDP MIB in response buffer with the * updated TLV. (0x0A03) */ -enum ice_status +int ice_aq_update_lldp_tlv(struct ice_hw *hw, u8 bridge_type, void *buf, u16 buf_size, u16 old_len, u16 new_len, u16 offset, u16 *mib_len, struct ice_sq_cd *cd) { struct ice_aqc_lldp_update_tlv *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.lldp_update_tlv; if (offset == 0 || old_len == 0 || new_len == 0) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_update_tlv); @@ -190,7 +193,7 @@ ice_aq_update_lldp_tlv(struct ice_hw *hw, u8 bridge_type, void *buf, * * Stop or Shutdown the embedded LLDP Agent (0x0A05) */ -enum ice_status +int ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist, struct ice_sq_cd *cd) { @@ -218,7 +221,7 @@ ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist, * * Start the embedded LLDP Agent on all ports. (0x0A06) */ -enum ice_status +int ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd) { struct ice_aqc_lldp_start *cmd; @@ -699,18 +702,18 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) * * Parse DCB configuration from the LLDPDU */ -static enum ice_status +static int ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg) { struct ice_lldp_org_tlv *tlv; - enum ice_status ret = 0; u16 offset = 0; + int ret = 0; u16 typelen; u16 type; u16 len; if (!lldpmib || !dcbcfg) - return ICE_ERR_PARAM; + return -EINVAL; /* set to the start of LLDPDU */ lldpmib += ETH_HLEN; @@ -750,17 +753,17 @@ ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg) * * Query DCB configuration from the firmware */ -enum ice_status +int ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, struct ice_dcbx_cfg *dcbcfg) { - enum ice_status ret; u8 *lldpmib; + int ret; /* Allocate the LLDPDU */ lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL); if (!lldpmib) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; ret = ice_aq_get_lldp_mib(hw, bridgetype, mib_type, (void *)lldpmib, ICE_LLDPDU_SIZE, NULL, NULL, NULL); @@ -785,13 +788,13 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, * This sends out request/release to ignore PFC condition for a TC. * It will return the TCs for which PFC is currently ignored. (0x0301) */ -enum ice_status +int ice_aq_dcb_ignore_pfc(struct ice_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret, struct ice_sq_cd *cd) { struct ice_aqc_pfc_ignore *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.pfc_ignore; @@ -821,17 +824,17 @@ ice_aq_dcb_ignore_pfc(struct ice_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret, * @cd: pointer to command details structure or NULL * * Start/Stop the embedded dcbx Agent. In case that this wrapper function - * returns ICE_SUCCESS, caller will need to check if FW returns back the same + * returns 0, caller will need to check if FW returns back the same * value as stated in dcbx_agent_status, and react accordingly. (0x0A09) */ -enum ice_status +int ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent, bool *dcbx_agent_status, struct ice_sq_cd *cd) { struct ice_aqc_lldp_stop_start_specific_agent *cmd; - enum ice_status status; + enum ice_adminq_opc opcode; struct ice_aq_desc desc; - u16 opcode; + int status; cmd = &desc.params.lldp_agent_ctrl; @@ -861,7 +864,7 @@ ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent, * * Get CEE DCBX mode operational configuration from firmware (0x0A07) */ -static enum ice_status +static int ice_aq_get_cee_dcb_cfg(struct ice_hw *hw, struct ice_aqc_get_cee_dcb_cfg_resp *buff, struct ice_sq_cd *cd) @@ -882,12 +885,12 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw, * This will return an indication if DSCP-based PFC or VLAN-based PFC * is enabled. (0x0302) */ -enum ice_status +int ice_aq_query_pfc_mode(struct ice_hw *hw, u8 *pfcmode_ret, struct ice_sq_cd *cd) { struct ice_aqc_set_query_pfc_mode *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.set_query_pfc_mode; @@ -910,15 +913,15 @@ ice_aq_query_pfc_mode(struct ice_hw *hw, u8 *pfcmode_ret, struct ice_sq_cd *cd) * This AQ call configures the PFC mdoe to DSCP-based PFC mode or VLAN * -based PFC (0x0303) */ -enum ice_status +int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd) { struct ice_aqc_set_query_pfc_mode *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (pfc_mode > ICE_AQC_PFC_DSCP_BASED_PFC) - return ICE_ERR_PARAM; + return -EINVAL; cmd = &desc.params.set_query_pfc_mode; @@ -930,14 +933,13 @@ ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd) if (status) return status; - /* The spec isn't clear about whether the FW will return an error code - * if the PFC mode requested by the driver was not set. The spec just - * says that the FW will write the PFC mode set back into cmd->pfc_mode, - * so after the AQ has been executed, check if cmd->pfc_mode is what was - * requested. + /* FW will write the PFC mode set back into cmd->pfc_mode, but if DCB is + * disabled, FW will write back 0 to cmd->pfc_mode. After the AQ has + * been executed, check if cmd->pfc_mode is what was requested. If not, + * return an error. */ if (cmd->pfc_mode != pfc_mode) - return ICE_ERR_NOT_SUPPORTED; + return -EOPNOTSUPP; return 0; } @@ -951,7 +953,7 @@ ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd) * This AQ command will tell FW if it will apply or not apply the default DCB * configuration when link up (0x0306). */ -enum ice_status +int ice_aq_set_dcb_parameters(struct ice_hw *hw, bool dcb_enable, struct ice_sq_cd *cd) { @@ -981,8 +983,8 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, struct ice_port_info *pi) { u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status); - u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift; - u8 i, j, err, sync, oper, app_index, ice_app_sel_type; + u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift, j; + u8 i, err, sync, oper, app_index, ice_app_sel_type; u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift; struct ice_dcbx_cfg *cmp_dcbcfg, *dcbcfg; @@ -1079,8 +1081,8 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, */ if (!err && sync && oper) { dcbcfg->app[app_index].priority = - (app_prio & ice_aqc_cee_app_mask) >> - ice_aqc_cee_app_shift; + (u8)((app_prio & ice_aqc_cee_app_mask) >> + ice_aqc_cee_app_shift); dcbcfg->app[app_index].selector = ice_app_sel_type; dcbcfg->app[app_index].prot_id = ice_app_prot_id_type; app_index++; @@ -1097,14 +1099,14 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, * * Get IEEE or CEE mode DCB configuration from the Firmware */ -static enum ice_status +static int ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode) { struct ice_dcbx_cfg *dcbx_cfg = NULL; - enum ice_status ret; + int ret; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; if (dcbx_mode == ICE_DCBX_MODE_IEEE) dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; @@ -1137,14 +1139,14 @@ out: * * Get DCB configuration from the Firmware */ -enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi) +int ice_get_dcb_cfg(struct ice_port_info *pi) { struct ice_aqc_get_cee_dcb_cfg_resp cee_cfg; struct ice_dcbx_cfg *dcbx_cfg; - enum ice_status ret; + int ret; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL); if (!ret) { @@ -1161,6 +1163,43 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi) return ret; } +/** + * ice_get_dcb_cfg_from_mib_change + * @pi: port information structure + * @event: pointer to the admin queue receive event + * + * Set DCB configuration from received MIB Change event + */ +void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi, + struct ice_rq_event_info *event) +{ + struct ice_dcbx_cfg *dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; + struct ice_aqc_lldp_get_mib *mib; + u8 change_type, dcbx_mode; + + mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw; + + change_type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M; + if (change_type == ICE_AQ_LLDP_MIB_REMOTE) + dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg; + + dcbx_mode = ((mib->type & ICE_AQ_LLDP_DCBX_M) >> + ICE_AQ_LLDP_DCBX_S); + + switch (dcbx_mode) { + case ICE_AQ_LLDP_DCBX_IEEE: + dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE; + ice_lldp_to_dcb_cfg(event->msg_buf, dcbx_cfg); + break; + + case ICE_AQ_LLDP_DCBX_CEE: + pi->qos_cfg.desired_dcbx_cfg = pi->qos_cfg.local_dcbx_cfg; + ice_cee_to_dcb_cfg((struct ice_aqc_get_cee_dcb_cfg_resp *) + event->msg_buf, pi); + break; + } +} + /** * ice_init_dcb * @hw: pointer to the HW struct @@ -1168,13 +1207,13 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi) * * Update DCB configuration from the Firmware */ -enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) +int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) { struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg; - enum ice_status ret = 0; + int ret = 0; if (!hw->func_caps.common_cap.dcb) - return ICE_ERR_NOT_SUPPORTED; + return -EOPNOTSUPP; qos_cfg->is_sw_lldp = true; @@ -1190,7 +1229,7 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) return ret; qos_cfg->is_sw_lldp = false; } else if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) { - return ICE_ERR_NOT_READY; + return -EBUSY; } /* Configure the LLDP MIB change event */ @@ -1210,19 +1249,19 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) * * Configure (disable/enable) MIB */ -enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib) +int ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib) { struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg; - enum ice_status ret; + int ret; if (!hw->func_caps.common_cap.dcb) - return ICE_ERR_NOT_SUPPORTED; + return -EOPNOTSUPP; /* Get DCBX status */ qos_cfg->dcbx_status = ice_get_dcbx_status(hw); if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) - return ICE_ERR_NOT_READY; + return -EBUSY; ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL); if (!ret) @@ -1570,7 +1609,7 @@ ice_add_dscp_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) tlv->ouisubtype = htonl(ouisubtype); buf[0] = dcbcfg->pfc.pfccap & 0xF; - buf[1] = dcbcfg->pfc.pfcena & 0xF; + buf[1] = dcbcfg->pfc.pfcena; } /** @@ -1663,16 +1702,16 @@ ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg) * * Set DCB configuration to the Firmware */ -enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi) +int ice_set_dcb_cfg(struct ice_port_info *pi) { u8 mib_type, *lldpmib = NULL; struct ice_dcbx_cfg *dcbcfg; - enum ice_status ret; struct ice_hw *hw; u16 miblen; + int ret; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; @@ -1681,7 +1720,7 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi) /* Allocate the LLDPDU */ lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL); if (!lldpmib) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING) @@ -1705,20 +1744,21 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi) * * query current port ETS configuration */ -static enum ice_status +static int ice_aq_query_port_ets(struct ice_port_info *pi, struct ice_aqc_port_ets_elem *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_query_port_ets *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; cmd = &desc.params.port_ets; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets); - cmd->port_teid = pi->root->info.node_teid; + if (pi->root) + cmd->port_teid = pi->root->info.node_teid; status = ice_aq_send_cmd(pi->hw, &desc, buf, buf_size, cd); return status; @@ -1731,18 +1771,18 @@ ice_aq_query_port_ets(struct ice_port_info *pi, * * update the SW DB with the new TC changes */ -static enum ice_status +static int ice_update_port_tc_tree_cfg(struct ice_port_info *pi, struct ice_aqc_port_ets_elem *buf) { struct ice_sched_node *node, *tc_node; struct ice_aqc_txsched_elem_data elem; - enum ice_status status = 0; u32 teid1, teid2; + int status = 0; u8 i, j; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; /* suspend the missing TC nodes */ for (i = 0; i < pi->root->num_children; i++) { teid1 = le32_to_cpu(pi->root->children[i]->info.node_teid); @@ -1799,12 +1839,12 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi, * query current port ETS configuration and update the * SW DB with the TC changes */ -enum ice_status +int ice_query_port_ets(struct ice_port_info *pi, struct ice_aqc_port_ets_elem *buf, u16 buf_size, struct ice_sq_cd *cd) { - enum ice_status status; + int status; mutex_lock(&pi->sched_lock); status = ice_aq_query_port_ets(pi, buf, buf_size, cd); diff --git a/drivers/thirdparty/ice/ice_dcb.h b/drivers/thirdparty/ice/ice_dcb.h index 370f673c5746..f63dd53534ea 100644 --- a/drivers/thirdparty/ice/ice_dcb.h +++ b/drivers/thirdparty/ice/ice_dcb.h @@ -6,6 +6,7 @@ #include "ice_type.h" #include "ice_common.h" +#include #define ICE_DCBX_STATUS_NOT_STARTED 0 #define ICE_DCBX_STATUS_IN_PROGRESS 1 @@ -144,51 +145,52 @@ struct ice_cee_app_prio { u8 prio_map; } __packed; - -enum ice_status +int ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf, u16 buf_size, u16 *local_len, u16 *remote_len, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_add_delete_lldp_tlv(struct ice_hw *hw, u8 bridge_type, bool add_lldp_tlv, void *buf, u16 buf_size, u16 tlv_len, u16 *mib_len, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_update_lldp_tlv(struct ice_hw *hw, u8 bridge_type, void *buf, u16 buf_size, u16 old_len, u16 new_len, u16 offset, u16 *mib_len, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_dcb_ignore_pfc(struct ice_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_query_pfc_mode(struct ice_hw *hw, u8 *pfcmode_ret, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_set_dcb_parameters(struct ice_hw *hw, bool dcb_enable, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, struct ice_dcbx_cfg *dcbcfg); -enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi); -enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi); -enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change); -enum ice_status +int ice_get_dcb_cfg(struct ice_port_info *pi); +int ice_set_dcb_cfg(struct ice_port_info *pi); +void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi, + struct ice_rq_event_info *event); +int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change); +int ice_query_port_ets(struct ice_port_info *pi, struct ice_aqc_port_ets_elem *buf, u16 buf_size, struct ice_sq_cd *cmd_details); #ifdef CONFIG_DCB -enum ice_status +int ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent, bool *dcbx_agent_status, struct ice_sq_cd *cd); -enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib); +int ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib); #else /* CONFIG_DCB */ -static inline enum ice_status +static inline int ice_aq_stop_lldp(struct ice_hw __always_unused *hw, bool __always_unused shutdown_lldp_agent, bool __always_unused persist, @@ -197,7 +199,7 @@ ice_aq_stop_lldp(struct ice_hw __always_unused *hw, return 0; } -static inline enum ice_status +static inline int ice_aq_start_lldp(struct ice_hw __always_unused *hw, bool __always_unused persist, struct ice_sq_cd __always_unused *cd) @@ -205,7 +207,7 @@ ice_aq_start_lldp(struct ice_hw __always_unused *hw, return 0; } -static inline enum ice_status +static inline int ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw, bool __always_unused start_dcbx_agent, bool *dcbx_agent_status, @@ -216,7 +218,7 @@ ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw, return 0; } -static inline enum ice_status +static inline int ice_cfg_lldp_mib_change(struct ice_hw __always_unused *hw, bool __always_unused ena_mib) { diff --git a/drivers/thirdparty/ice/ice_dcb_lib.c b/drivers/thirdparty/ice/ice_dcb_lib.c index 8475e0a52925..bf116eb9a05b 100644 --- a/drivers/thirdparty/ice/ice_dcb_lib.c +++ b/drivers/thirdparty/ice/ice_dcb_lib.c @@ -182,15 +182,16 @@ void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi) switch (vsi->type) { case ICE_VSI_PF: + case ICE_VSI_VF: vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg); vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg); break; #ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO case ICE_VSI_CHNL: #endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS case ICE_VSI_OFFLOAD_MACVLAN: -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ case ICE_VSI_VMDQ2: case ICE_VSI_SWITCHDEV_CTRL: vsi->tc_cfg.ena_tc = BIT(ice_get_first_droptc(vsi)); @@ -222,8 +223,7 @@ u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index) */ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) { - u16 qoffset; - u16 qcount; + u16 qoffset, qcount; int i, n; if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) { @@ -252,7 +252,7 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) vsi->rx_rings[i]->dcb_tc = n; } -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS /* when DCB is configured TC for MACVLAN queues should be * the first drop TC of the main VSI */ @@ -264,7 +264,7 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) ice_for_each_alloc_rxq(vsi, i) vsi->rx_rings[i]->dcb_tc = first_droptc; } -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ #ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO if (vsi->type == ICE_VSI_PF) { @@ -288,30 +288,6 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) #endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ } -/** - * ice_peer_prep_tc_change - Pre-notify RDMA Peer in blocking call of TC change - * @peer_obj_int: ptr to peer device internal struct - * @data: ptr to opaque data - */ -static int -ice_peer_prep_tc_change(struct ice_peer_obj_int *peer_obj_int, - void __always_unused *data) -{ - struct ice_peer_obj *peer_obj; - - peer_obj = ice_get_peer_obj(peer_obj_int); - if (!ice_validate_peer_obj(peer_obj)) - return 0; - - if (!test_bit(ICE_PEER_OBJ_STATE_OPENED, peer_obj_int->state)) - return 0; - - if (peer_obj->peer_ops && peer_obj->peer_ops->prep_tc_change) - peer_obj->peer_ops->prep_tc_change(peer_obj); - - return 0; -} - /** * ice_dcb_ena_dis_vsi - disable certain VSIs for DCB config/reconfig * @pf: pointer to the PF instance @@ -352,7 +328,7 @@ static void ice_dcb_ena_dis_vsi(struct ice_pf *pf, bool ena, bool locked) /** * ice_dcb_bwchk - check if ETS bandwidth input parameters are correct - * @pf: pointer to PF struct + * @pf: pointer to the PF struct * @dcbcfg: pointer to DCB config structure */ int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg) @@ -380,8 +356,7 @@ int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg) if (!total_bw) { etscfg->tcbwtable[0] = ICE_TC_MAX_BW; } else if (total_bw != ICE_TC_MAX_BW) { - dev_err(ice_pf_to_dev(pf), - "Invalid config, total bandwidth must equal 100\n"); + dev_err(ice_pf_to_dev(pf), "Invalid config, total bandwidth must equal 100\n"); return -EINVAL; } @@ -400,6 +375,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) struct ice_dcbx_cfg *old_cfg, *curr_cfg; struct device *dev = ice_pf_to_dev(pf); int ret = ICE_DCB_NO_HW_CHG; + struct iidc_event *event; curr_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; @@ -430,8 +406,16 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) return -ENOMEM; dev_info(dev, "Commit DCB Configuration to the hardware\n"); - /* Notify capable peers about impending change to TCs */ - ice_for_each_peer(pf, NULL, ice_peer_prep_tc_change); + /* Notify capable aux drivers about impending change to TCs */ + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (!event) { + kfree(old_cfg); + return -ENOMEM; + } + + set_bit(IIDC_EVENT_BEFORE_TC_CHANGE, event->type); + ice_send_event_to_auxs(pf, event); + kfree(event); /* avoid race conditions by holding the lock while disabling and * re-enabling the VSI @@ -558,7 +542,7 @@ void ice_dcb_rebuild(struct ice_pf *pf) struct ice_aqc_port_ets_elem buf = { 0 }; struct device *dev = ice_pf_to_dev(pf); struct ice_dcbx_cfg *err_cfg; - enum ice_status ret; + int ret; ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); if (ret) { @@ -738,7 +722,7 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf) /* Configure SW DCB default with ETS non-willing */ ret = ice_dcb_sw_dflt_cfg(pf, false, true); if (ret) { - dev_err(dev, "Failed to set local DCB config %d\n", ret); + ice_dev_err_errno(dev, ret, "Failed to set local DCB config"); return ret; } @@ -762,10 +746,11 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf) void ice_pf_dcb_recfg(struct ice_pf *pf) { struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; + struct iidc_core_dev_info *cdev_info; + struct iidc_event *event; u8 tc_map = 0; int v, ret; - /* Update each VSI */ ice_for_each_vsi(pf, v) { struct ice_vsi *vsi = pf->vsi[v]; @@ -774,6 +759,8 @@ void ice_pf_dcb_recfg(struct ice_pf *pf) continue; if (vsi->type == ICE_VSI_PF) { + if (ice_dcb_get_num_tc(dcbcfg) > vsi->alloc_txq) + dev_warn(ice_pf_to_dev(vsi->back), "More TCs defined than queues/rings allocated.\n"); tc_map = ice_dcb_get_ena_tc(dcbcfg); /* If DCBX request non-contiguous TC, then configure @@ -783,11 +770,11 @@ void ice_pf_dcb_recfg(struct ice_pf *pf) tc_map = ICE_DFLT_TRAFFIC_CLASS; ice_dcb_noncontig_cfg(pf); } -#if defined(HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO) && defined(HAVE_NETDEV_SB_DEV) +#if defined(HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO) && defined(HAVE_NDO_DFWD_OPS) } else if (vsi->type == ICE_VSI_CHNL || vsi->type == ICE_VSI_OFFLOAD_MACVLAN) { tc_map = BIT(ice_get_first_droptc(vsi)); -# endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO && HAVE_NETDEV_SB_DEV */ +# endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO && HAVE_NDO_DFWD_OPS */ } else { tc_map = ICE_DFLT_TRAFFIC_CLASS; } @@ -798,20 +785,32 @@ void ice_pf_dcb_recfg(struct ice_pf *pf) vsi->idx); continue; } - /* no need to proceed with remaining cfg if it is CHNL VSI */ - if (vsi->type == ICE_VSI_CHNL) + /* no need to proceed with remaining cfg if it is CHNL + * or switchdev VSI + */ + if (vsi->type == ICE_VSI_CHNL || + vsi->type == ICE_VSI_SWITCHDEV_CTRL) continue; ice_vsi_map_rings_to_vectors(vsi); if (vsi->type == ICE_VSI_PF) ice_dcbnl_set_all(vsi); } - /* If the RDMA peer is registered, update that peer's initial_qos_info struct. - * The peer is closed during this process, so when it is opened, it will access - * the initial_qos_info element to configure itself. + /* Notify the aux drivers that TC change is finished */ - if (pf->rdma_peer) - ice_setup_dcb_qos_info(pf, &pf->rdma_peer->initial_qos_info); + cdev_info = ice_find_cdev_info_by_id(pf, IIDC_RDMA_ID); + if (cdev_info) { + ice_setup_dcb_qos_info(pf, &cdev_info->qos_info); + + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (!event) + return; + + set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type); + event->info.port_qos = cdev_info->qos_info; + ice_send_event_to_auxs(pf, event); + kfree(event); + } } /** @@ -824,16 +823,13 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) struct device *dev = ice_pf_to_dev(pf); struct ice_port_info *port_info; struct ice_hw *hw = &pf->hw; - enum ice_status status; int err; port_info = hw->port_info; - status = ice_init_dcb(hw, false); - if (status && !port_info->qos_cfg.is_sw_lldp) { - dev_err(dev, "Error initializing DCB %s\n", - ice_stat_str(status)); - err = ice_status_to_errno(status); + err = ice_init_dcb(hw, false); + if (err && !port_info->qos_cfg.is_sw_lldp) { + dev_err(dev, "Error initializing DCB %d\n", err); goto dcb_init_err; } @@ -848,12 +844,12 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) err = ice_aq_set_pfc_mode(&pf->hw, ICE_AQC_PFC_VLAN_BASED_PFC, NULL); if (err) - dev_info(dev, "Fail to set VLAN PFC mode\n"); + dev_info(dev, "Failed to set VLAN PFC mode\n"); err = ice_dcb_sw_dflt_cfg(pf, true, locked); if (err) { - dev_err(dev, "Failed to set local DCB config %d\n", - err); + ice_dev_err_errno(dev, err, + "Failed to set local DCB config"); err = -EIO; goto dcb_init_err; } @@ -883,7 +879,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) if (err) goto dcb_init_err; - return err; + return 0; dcb_init_err: dev_err(dev, "DCB init failed\n"); @@ -969,19 +965,28 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring, * @pf: ptr to ice_pf * @qos_info: QoS param instance */ -void ice_setup_dcb_qos_info(struct ice_pf *pf, struct ice_qos_params *qos_info) +void ice_setup_dcb_qos_info(struct ice_pf *pf, struct iidc_qos_params *qos_info) { + struct iidc_core_dev_info *cdev_info; struct ice_dcbx_cfg *dcbx_cfg; unsigned int i; u32 up2tc; + if (!pf || !qos_info) + return; + + cdev_info = ice_find_cdev_info_by_id(pf, IIDC_RDMA_ID); + + if (!cdev_info) + return; + dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; up2tc = rd32(&pf->hw, PRTDCB_TUP2TC); qos_info->num_apps = dcbx_cfg->numapps; qos_info->num_tc = ice_dcb_get_num_tc(dcbx_cfg); - for (i = 0; i < ICE_IDC_MAX_USER_PRIORITY; i++) + for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++) qos_info->up2tc[i] = (up2tc >> (i * 3)) & 0x7; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) @@ -995,10 +1000,20 @@ void ice_setup_dcb_qos_info(struct ice_pf *pf, struct ice_qos_params *qos_info) } qos_info->pfc_mode = dcbx_cfg->pfc_mode; - for (i = 0; i < ICE_IDC_DSCP_NUM_VAL; i++) + for (i = 0; i < ICE_DSCP_NUM_VAL; i++) qos_info->dscp_map[i] = dcbx_cfg->dscp_map[i]; } +/** + * ice_dcb_is_mib_change_pending - Check if MIB change is pending + * @state: MIB change state + */ +static bool ice_dcb_is_mib_change_pending(u8 state) +{ + return ICE_AQ_LLDP_MIB_CHANGE_PENDING == + FIELD_GET(ICE_AQ_LLDP_MIB_CHANGE_STATE_M, state); +} + /** * ice_dcb_process_lldp_set_mib_change - Process MIB change * @pf: ptr to ice_pf @@ -1010,11 +1025,14 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, { struct ice_aqc_port_ets_elem buf = { 0 }; struct device *dev = ice_pf_to_dev(pf); + struct iidc_core_dev_info *cdev_info; struct ice_aqc_lldp_get_mib *mib; struct ice_dcbx_cfg tmp_dcbx_cfg; + bool pending_handled = true; bool need_reconfig = false; struct ice_port_info *pi; u8 mib_type; + u32 numtc; int ret; /* Not DCB capable or capability disabled */ @@ -1028,41 +1046,58 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, pi = pf->hw.port_info; mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw; + /* Ignore if event is not for Nearest Bridge */ - mib_type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) & - ICE_AQ_LLDP_BRID_TYPE_M); + mib_type = FIELD_GET(ICE_AQ_LLDP_BRID_TYPE_M, mib->type); dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", mib_type); if (mib_type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID) return; + /* A pending change event contains accurate config information, and + * the FW setting has not been updaed yet, so detect if change is + * pending to determine where to pull config information from + * (FW vs event) + */ + if (ice_dcb_is_mib_change_pending(mib->state)) + pending_handled = false; + /* Check MIB Type and return if event for Remote MIB update */ - mib_type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M; + mib_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type); dev_dbg(dev, "LLDP event mib type %s\n", mib_type ? "remote" : "local"); if (mib_type == ICE_AQ_LLDP_MIB_REMOTE) { /* Update the remote cached instance and return */ - ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE, - ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, - &pi->qos_cfg.remote_dcbx_cfg); - if (ret) { - dev_err(dev, "Failed to get remote DCB config\n"); - return; + if (!pending_handled) { + ice_get_dcb_cfg_from_mib_change(pi, event); + } else { + ret = + ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE, + ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, + &pi->qos_cfg.remote_dcbx_cfg); + if (ret) + dev_dbg(dev, "Failed to get remote DCB config\n"); } + return; } + /* That a DCB change has happened is now determined */ mutex_lock(&pf->tc_mutex); /* store the old configuration */ - tmp_dcbx_cfg = pf->hw.port_info->qos_cfg.local_dcbx_cfg; + tmp_dcbx_cfg = pi->qos_cfg.local_dcbx_cfg; /* Reset the old DCBX configuration data */ memset(&pi->qos_cfg.local_dcbx_cfg, 0, sizeof(pi->qos_cfg.local_dcbx_cfg)); /* Get updated DCBX data from firmware */ - ret = ice_get_dcb_cfg(pf->hw.port_info); - if (ret) { - dev_err(dev, "Failed to get DCB config\n"); - goto out; + if (!pending_handled) { + ice_get_dcb_cfg_from_mib_change(pi, event); + } else { + ret = ice_get_dcb_cfg(pi); + if (ret) { + dev_err(dev, "Failed to get DCB config\n"); + goto out; + } } /* No change detected in DCBX configs */ @@ -1081,7 +1116,8 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, goto out; /* Enable DCB tagging only when more than one TC */ - if (ice_dcb_get_num_tc(&pi->qos_cfg.local_dcbx_cfg) > 1) { + numtc = ice_dcb_get_num_tc(&pi->qos_cfg.local_dcbx_cfg); + if (numtc > 1) { dev_dbg(dev, "DCB tagging enabled (num TC > 1)\n"); set_bit(ICE_FLAG_DCB_ENA, pf->flags); } else { @@ -1089,11 +1125,36 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, clear_bit(ICE_FLAG_DCB_ENA, pf->flags); } + if (numtc > pf->hw.func_caps.common_cap.maxtc) + dev_warn(dev, "%d TCs more than supported max of %d\n", numtc, + pf->hw.func_caps.common_cap.maxtc); + + cdev_info = ice_find_cdev_info_by_id(pf, IIDC_RDMA_ID); + if (cdev_info) { + struct iidc_event *ievent; + + /* can't fail the LAN flow based on a failure to notify + * the RDMA driver + */ + ievent = kzalloc(sizeof(*ievent), GFP_KERNEL); + if (ievent) { + set_bit(IIDC_EVENT_BEFORE_TC_CHANGE, ievent->type); + ice_send_event_to_auxs(pf, ievent); + kfree(ievent); + } + } + + /* Send Execute Pending MIB Change event if it is a Pending event */ + if (!pending_handled) { + ice_lldp_execute_pending_mib(&pf->hw); + pending_handled = true; + } + rtnl_lock(); /* disable VSIs affected by DCB changes */ ice_dcb_ena_dis_vsi(pf, false, true); - ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); + ret = ice_query_port_ets(pi, &buf, sizeof(buf), NULL); if (ret) { dev_err(dev, "Query Port ETS failed\n"); goto unlock_rtnl; @@ -1108,4 +1169,8 @@ unlock_rtnl: rtnl_unlock(); out: mutex_unlock(&pf->tc_mutex); + + /* Send Execute Pending MIB Change event if it is a Pending event */ + if (!pending_handled) + ice_lldp_execute_pending_mib(&pf->hw); } diff --git a/drivers/thirdparty/ice/ice_dcb_lib.h b/drivers/thirdparty/ice/ice_dcb_lib.h index 971a8561415a..0c8610ca7a45 100644 --- a/drivers/thirdparty/ice/ice_dcb_lib.h +++ b/drivers/thirdparty/ice/ice_dcb_lib.h @@ -31,7 +31,8 @@ void ice_update_dcb_stats(struct ice_pf *pf); void ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring, struct ice_tx_buf *first); -void ice_setup_dcb_qos_info(struct ice_pf *pf, struct ice_qos_params *qos_info); +void +ice_setup_dcb_qos_info(struct ice_pf *pf, struct iidc_qos_params *qos_info); void ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event); @@ -124,16 +125,19 @@ ice_is_pfc_causing_hung_q(struct ice_pf __always_unused *pf, static inline u8 ice_get_pfc_mode(struct ice_pf *pf) { - return -EOPNOTSUPP; + return 0; } static inline void ice_pf_dcb_recfg(struct ice_pf *pf) { } static inline void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) { } static inline void ice_update_dcb_stats(struct ice_pf *pf) { } -static inline void ice_setup_dcb_qos_info(struct ice_pf *pf, struct ice_qos_params *qos_info) { } -static inline -void ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event) { } -static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring) { } +static inline void +ice_setup_dcb_qos_info(struct ice_pf *pf, struct iidc_qos_params *qos_info) { } +static inline void +ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, + struct ice_rq_event_info *event) { } +static inline void +ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring) { } #endif /* CONFIG_DCB */ #endif /* _ICE_DCB_LIB_H_ */ diff --git a/drivers/thirdparty/ice/ice_dcb_nl.c b/drivers/thirdparty/ice/ice_dcb_nl.c index 4e7acd02f446..8fbbf2ebcfd9 100644 --- a/drivers/thirdparty/ice/ice_dcb_nl.c +++ b/drivers/thirdparty/ice/ice_dcb_nl.c @@ -66,6 +66,11 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets) int bwcfg = 0, bwrec = 0; int err, i; +#ifdef HAVE_NETDEV_UPPER_INFO + if (pf->lag->bonded) + return -EINVAL; +#endif /* HAVE_NETDEV_UPPER_INFO */ + if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) return -EINVAL; @@ -104,6 +109,12 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets) new_cfg->etscfg.maxtcs = pf->hw.func_caps.common_cap.maxtc; + /* Not all TCs can have a BW of zero, FW requires at least one TC + * with BW assigned, and sum of all has to be 100%. Set TC0 to 100% + */ + if (!bwcfg) + new_cfg->etscfg.tcbwtable[0] = 100; + if (!bwrec) new_cfg->etsrec.tcbwtable[0] = 100; @@ -160,6 +171,11 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode) struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_qos_cfg *qos_cfg; +#ifdef HAVE_NETDEV_UPPER_INFO + if (pf->lag->bonded) + return ICE_DCB_NO_HW_CHG; + +#endif /* HAVE_NETDEV_UPPER_INFO */ /* if FW LLDP agent is running, DCBNL not allowed to change mode */ if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) return ICE_DCB_NO_HW_CHG; @@ -174,15 +190,17 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode) if (mode == pf->dcbx_cap) return ICE_DCB_NO_HW_CHG; - pf->dcbx_cap = mode; qos_cfg = &pf->hw.port_info->qos_cfg; - if (mode & DCB_CAP_DCBX_VER_CEE) { - if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP) - return ICE_DCB_NO_HW_CHG; + + /* DSCP configuration is not DCBx negotiated */ + if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP) + return ICE_DCB_NO_HW_CHG; + + pf->dcbx_cap = mode; + if (mode & DCB_CAP_DCBX_VER_CEE) qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE; - } else { + else qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE; - } dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode); return ICE_DCB_HW_CHG_RST; @@ -258,6 +276,12 @@ static int ice_dcbnl_setpfc(struct net_device *netdev, struct ieee_pfc *pfc) struct ice_dcbx_cfg *new_cfg; int err; +#ifdef HAVE_NETDEV_UPPER_INFO + if (pf->lag->bonded) + return -EINVAL; + +#endif /* HAVE_NETDEV_UPPER_INFO */ + if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) return -EINVAL; @@ -327,6 +351,12 @@ static void ice_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 set) struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_dcbx_cfg *new_cfg; +#ifdef HAVE_NETDEV_UPPER_INFO + if (pf->lag->bonded) + return; + +#endif /* HAVE_NETDEV_UPPER_INFO */ + if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) return; @@ -386,6 +416,12 @@ static u8 ice_dcbnl_setstate(struct net_device *netdev, u8 state) { struct ice_pf *pf = ice_netdev_to_pf(netdev); +#ifdef HAVE_NETDEV_UPPER_INFO + if (pf->lag->bonded) + return ICE_DCB_NO_HW_CHG; + +#endif /* HAVE_NETDEV_UPPER_INFO */ + if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) return ICE_DCB_NO_HW_CHG; @@ -455,6 +491,12 @@ ice_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, struct ice_dcbx_cfg *new_cfg; int i; +#ifdef HAVE_NETDEV_UPPER_INFO + if (pf->lag->bonded) + return; + +#endif /* HAVE_NETDEV_UPPER_INFO */ + if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) return; @@ -509,6 +551,12 @@ ice_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 bw_pct) struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_dcbx_cfg *new_cfg; +#ifdef HAVE_NETDEV_UPPER_INFO + if (pf->lag->bonded) + return; + +#endif /* HAVE_NETDEV_UPPER_INFO */ + if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) return; @@ -718,6 +766,12 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app) u8 max_tc; int ret; +#ifdef HAVE_NETDEV_UPPER_INFO + if (pf->lag->bonded) + return -EINVAL; + +#endif /* HAVE_NETDEV_UPPER_INFO */ + /* ONLY DSCP APP TLVs have operational significance */ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP) return -EINVAL; @@ -754,7 +808,6 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app) return -EINVAL; } - /* grab TC mutex */ mutex_lock(&pf->tc_mutex); @@ -854,6 +907,12 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app) unsigned int i, j; int ret = 0; +#ifdef HAVE_NETDEV_UPPER_INFO + if (pf->lag->bonded) + return -EINVAL; + +#endif /* HAVE_NETDEV_UPPER_INFO */ + if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) { netdev_err(netdev, "can't delete DSCP netlink app when FW DCB agent is active\n"); return -EINVAL; @@ -964,6 +1023,12 @@ static u8 ice_dcbnl_cee_set_all(struct net_device *netdev) struct ice_dcbx_cfg *new_cfg; int err; +#ifdef HAVE_NETDEV_UPPER_INFO + if (pf->lag->bonded) + return ICE_DCB_NO_HW_CHG; + +#endif /* HAVE_NETDEV_UPPER_INFO */ + if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) return ICE_DCB_NO_HW_CHG; diff --git a/drivers/thirdparty/ice/ice_dcb_nl.h b/drivers/thirdparty/ice/ice_dcb_nl.h index 69bea270e372..40cb63778688 100644 --- a/drivers/thirdparty/ice/ice_dcb_nl.h +++ b/drivers/thirdparty/ice/ice_dcb_nl.h @@ -13,8 +13,8 @@ ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg, #else static inline void ice_dcbnl_setup(struct ice_vsi *vsi) { } static inline void ice_dcbnl_set_all(struct ice_vsi *vsi) { } -static inline void ice_dcbnl_flush_apps(struct ice_pf *pf, - struct ice_dcbx_cfg *old_cfg, - struct ice_dcbx_cfg *new_cfg) { } +static inline void +ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg, + struct ice_dcbx_cfg *new_cfg) { } #endif /* CONFIG_DCB */ #endif /* _ICE_DCB_NL_H_ */ diff --git a/drivers/thirdparty/ice/ice_dcf.c b/drivers/thirdparty/ice/ice_dcf.c index 7810116d9d3b..f7c6abce00db 100644 --- a/drivers/thirdparty/ice/ice_dcf.c +++ b/drivers/thirdparty/ice/ice_dcf.c @@ -54,6 +54,9 @@ static const enum ice_adminq_opc aqc_permitted_tbl[] = { ice_aqc_opc_query_acl_entry, ice_aqc_opc_query_acl_actpair, ice_aqc_opc_query_acl_counter, + + /* QoS */ + ice_aqc_opc_query_port_ets, }; /** @@ -117,6 +120,58 @@ bool ice_dcf_is_udp_tunnel_aq_cmd(struct ice_aq_desc *desc, u8 *aq_buf) return false; } +/** + * ice_is_vf_adq_enabled - Check if any VF has ADQ enabled + * @pf: pointer to the PF structure + * @vf_id: on true return, the first VF ID that we found had ADQ enabled + * + * Return true if any VF has ADQ enabled. Return false otherwise. + */ +static bool ice_is_vf_adq_enabled(struct ice_pf *pf, u16 *vf_id) +{ + bool adq_enabled = false; + struct ice_vf *vf; + unsigned int bkt; + + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) { + if (vf->adq_enabled) { + *vf_id = vf->vf_id; + adq_enabled = true; + break; + } + } + rcu_read_unlock(); + + return adq_enabled; +} + +/** + * ice_vf_chnl_fltrs_enabled - Check if a VF has TC filters enabled + * @pf: pointer to the PF structure + * @vf_id: on true return, the first VF ID that we found had TC filters + * + * Return true if any VF has TC filters. Return false otherwise. + */ +static bool ice_vf_chnl_fltrs_enabled(struct ice_pf *pf, u16 *vf_id) +{ + bool chnl_fltrs_enabled = false; + struct ice_vf *vf; + unsigned int bkt; + + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) { + if (vf->num_dmac_chnl_fltrs) { + *vf_id = vf->vf_id; + chnl_fltrs_enabled = true; + break; + } + } + rcu_read_unlock(); + + return chnl_fltrs_enabled; +} + /** * ice_check_dcf_allowed - check if DCF is allowed based on various checks * @vf: pointer to the VF to check @@ -130,25 +185,15 @@ bool ice_check_dcf_allowed(struct ice_vf *vf) dev = ice_pf_to_dev(pf); - if (vf->vf_id != ICE_DCF_VFID0 && vf->vf_id != ICE_DCF_VFID1) { - dev_err(dev, "VF %d requested DCF capability, but only VF %d and %d are allowed to request DCF capability\n", - vf->vf_id, ICE_DCF_VFID0, ICE_DCF_VFID1); + if (vf->vf_id != ICE_DCF_VFID) { + dev_err(dev, "VF %d requested DCF capability, but only VF %d is allowed to request DCF capability\n", + vf->vf_id, ICE_DCF_VFID); return false; } if (!vf->trusted) { -#ifdef HAVE_NDO_SET_VF_TRUST dev_err(dev, "VF needs to be trusted to configure DCF capability\n"); return false; -#else - - int ret; - ret = ice_set_vf_trust(ice_get_main_vsi(pf)->netdev, vf->vf_id, true); - if (ret) { - dev_err(dev, "Failed to set trusted VF to configure DCF capability.\n"); - return false; - } -#endif /* HAVE_NDO_SET_VF_TRUST */ } /* DCF and ADQ are mutually exclusive. */ @@ -158,12 +203,11 @@ bool ice_check_dcf_allowed(struct ice_vf *vf) return false; } #endif /* NETIF_F_HW_TC */ - ice_for_each_vf(pf, i) { - if (pf->vf[i].adq_enabled) { - dev_err(dev, "ADQ on VF %d is currently enabled. Device Control Functionality cannot be enabled.\n", - pf->vf[i].vf_id); - return false; - } + + if (ice_is_vf_adq_enabled(pf, &i)) { + dev_err(dev, "ADQ on VF %d is currently enabled. Device Control Functionality cannot be enabled.\n", + i); + return false; } #ifdef HAVE_TC_SETUP_CLSFLOWER @@ -172,20 +216,19 @@ bool ice_check_dcf_allowed(struct ice_vf *vf) return false; } #endif /* HAVE_TC_SETUP_CLSFLOWER */ - ice_for_each_vf(pf, i) { - if (pf->vf[i].num_dmac_chnl_fltrs) { - dev_err(dev, "TC filters on VF %d are currently in use. Device Control Functionality cannot be enabled.\n", - pf->vf[i].vf_id); - return false; - } + + if (ice_vf_chnl_fltrs_enabled(pf, &i)) { + dev_err(dev, "TC filters on VF %d are currently in use. Device Control Functionality cannot be enabled.\n", + i); + return false; } -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS if (ice_is_offloaded_macvlan_ena(pf)) { dev_err(dev, "L2 Forwarding Offload is currently enabled. Device Control Functionality cannot be enabled.\n"); return false; } -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ sw = pf->hw.switch_info; for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { @@ -268,7 +311,7 @@ ice_dcf_rm_sw_rule_to_vsi(struct ice_pf *pf, struct ice_dcf_sw_rule_entry *s_entry) { struct ice_aqc_sw_rules_elem *s_rule; - enum ice_status status; + int status; s_rule = kzalloc(ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL); if (!s_rule) @@ -301,9 +344,9 @@ ice_dcf_rm_sw_rule_to_vsi_list(struct ice_pf *pf, struct ice_dcf_vsi_list_info *vsi_list_info = s_entry->vsi_list_info; struct ice_aqc_alloc_free_res_elem *res_buf; struct ice_aqc_sw_rules_elem *s_rule; - enum ice_status status; u16 rule_sz; u16 vsi_id; + int status; int i = 0; if (!vsi_list_info) @@ -367,7 +410,7 @@ ice_dcf_rm_vsi_from_list(struct ice_pf *pf, u16 hw_vsi_id) { struct ice_aqc_sw_rules_elem *s_rule; - enum ice_status status; + int status; if (!vsi_list_info || !vsi_list_info->vsi_count || !test_bit(hw_vsi_id, vsi_list_info->hw_vsi_map)) @@ -415,15 +458,15 @@ void ice_rm_dcf_sw_vsi_rule(struct ice_pf *pf, u16 hw_vsi_id) s_entry->vsi_list_info, hw_vsi_id); if (ret && ret != -ENOENT) - dev_err(ice_pf_to_dev(pf), - "Failed to remove VSI %u from VSI list : %d\n", - hw_vsi_id, ret); + ice_dev_err_errno(ice_pf_to_dev(pf), ret, + "Failed to remove VSI %u from VSI list", + hw_vsi_id); } else if (s_entry->fwd_id.hw_vsi_id == hw_vsi_id) { ret = ice_dcf_rm_sw_rule_to_vsi(pf, s_entry); if (ret) - dev_err(ice_pf_to_dev(pf), - "Failed to remove VSI %u switch rule : %d\n", - hw_vsi_id, ret); + ice_dev_err_errno(ice_pf_to_dev(pf), ret, + "Failed to remove VSI %u switch rule", + hw_vsi_id); } } @@ -455,16 +498,16 @@ void ice_rm_all_dcf_sw_rules(struct ice_pf *pf) rule_id = sw_rule->rule_id; ret = ice_dcf_rm_sw_rule_to_vsi_list(pf, sw_rule); if (ret) - dev_err(ice_pf_to_dev(pf), - "Failed to remove switch rule 0x%04x with list id %u : %d\n", - rule_id, list_id, ret); + ice_dev_err_errno(ice_pf_to_dev(pf), ret, + "Failed to remove switch rule 0x%04x with list id %u", + rule_id, list_id); } else { rule_id = sw_rule->rule_id; ret = ice_dcf_rm_sw_rule_to_vsi(pf, sw_rule); if (ret) - dev_err(ice_pf_to_dev(pf), - "Failed to remove switch rule 0x%04x : %d\n", - rule_id, ret); + ice_dev_err_errno(ice_pf_to_dev(pf), ret, + "Failed to remove switch rule 0x%04x", + rule_id); } /* clears rule filter management data if AdminQ command has error */ diff --git a/drivers/thirdparty/ice/ice_dcf.h b/drivers/thirdparty/ice/ice_dcf.h index 7d95ec56f5b3..2196f5faba31 100644 --- a/drivers/thirdparty/ice/ice_dcf.h +++ b/drivers/thirdparty/ice/ice_dcf.h @@ -6,9 +6,9 @@ struct ice_vf; struct ice_pf; +struct ice_hw; -#define ICE_DCF_VFID0 0 -#define ICE_DCF_VFID1 1 +#define ICE_DCF_VFID 0 /* DCF mode states */ enum ice_dcf_state { diff --git a/drivers/thirdparty/ice/ice_ddp.c b/drivers/thirdparty/ice/ice_ddp.c new file mode 100644 index 000000000000..428dc2ab2427 --- /dev/null +++ b/drivers/thirdparty/ice/ice_ddp.c @@ -0,0 +1,2586 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#include "ice_ddp.h" +#include "ice_type.h" +#include "ice_common.h" +#include "ice_sched.h" + +/** + * ice_aq_download_pkg + * @hw: pointer to the hardware structure + * @pkg_buf: the package buffer to transfer + * @buf_size: the size of the package buffer + * @last_buf: last buffer indicator + * @error_offset: returns error offset + * @error_info: returns error information + * @cd: pointer to command details structure or NULL + * + * Download Package (0x0C40) + */ +static int +ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, + u16 buf_size, bool last_buf, u32 *error_offset, + u32 *error_info, struct ice_sq_cd *cd) +{ + struct ice_aqc_download_pkg *cmd; + struct ice_aq_desc desc; + int status; + + if (error_offset) + *error_offset = 0; + if (error_info) + *error_info = 0; + + cmd = &desc.params.download_pkg; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + if (last_buf) + cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; + + status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); + if (status == -EIO) { + /* Read error from buffer only when the FW returned an error */ + struct ice_aqc_download_pkg_resp *resp; + + resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; + if (error_offset) + *error_offset = le32_to_cpu(resp->error_offset); + if (error_info) + *error_info = le32_to_cpu(resp->error_info); + } + + return status; +} + +/** + * ice_aq_upload_section + * @hw: pointer to the hardware structure + * @pkg_buf: the package buffer which will receive the section + * @buf_size: the size of the package buffer + * @cd: pointer to command details structure or NULL + * + * Upload Section (0x0C41) + */ +int +ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, + u16 buf_size, struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); +} + +/** + * ice_aq_update_pkg + * @hw: pointer to the hardware structure + * @pkg_buf: the package cmd buffer + * @buf_size: the size of the package cmd buffer + * @last_buf: last buffer indicator + * @error_offset: returns error offset + * @error_info: returns error information + * @cd: pointer to command details structure or NULL + * + * Update Package (0x0C42) + */ +static int +ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, + bool last_buf, u32 *error_offset, u32 *error_info, + struct ice_sq_cd *cd) +{ + struct ice_aqc_download_pkg *cmd; + struct ice_aq_desc desc; + int status; + + if (error_offset) + *error_offset = 0; + if (error_info) + *error_info = 0; + + cmd = &desc.params.download_pkg; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + if (last_buf) + cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; + + status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); + if (status == -EIO) { + /* Read error from buffer only when the FW returned an error */ + struct ice_aqc_download_pkg_resp *resp; + + resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; + if (error_offset) + *error_offset = le32_to_cpu(resp->error_offset); + if (error_info) + *error_info = le32_to_cpu(resp->error_info); + } + + return status; +} + +/** + * ice_find_seg_in_pkg + * @hw: pointer to the hardware structure + * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK) + * @pkg_hdr: pointer to the package header to be searched + * + * This function searches a package file for a particular segment type. On + * success it returns a pointer to the segment header, otherwise it will + * return NULL. + */ +struct ice_generic_seg_hdr * +ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, + struct ice_pkg_hdr *pkg_hdr) +{ + u32 i; + + ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", + pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor, + pkg_hdr->pkg_format_ver.update, + pkg_hdr->pkg_format_ver.draft); + + /* Search all package segments for the requested segment type */ + for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { + struct ice_generic_seg_hdr *seg; + + seg = (struct ice_generic_seg_hdr *) + ((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i])); + + if (le32_to_cpu(seg->seg_type) == seg_type) + return seg; + } + + return NULL; +} + +/** + * ice_get_pkg_seg_by_idx + * @pkg_hdr: pointer to the package header to be searched + * @idx: index of segment + */ +static struct ice_generic_seg_hdr * +ice_get_pkg_seg_by_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx) +{ + struct ice_generic_seg_hdr *seg = NULL; + + if (idx < le32_to_cpu(pkg_hdr->seg_count)) + seg = (struct ice_generic_seg_hdr *) + ((u8 *)pkg_hdr + + le32_to_cpu(pkg_hdr->seg_offset[idx])); + + return seg; +} + +/** + * ice_is_signing_seg_at_idx - determine if segment is a signing segment + * @pkg_hdr: pointer to package header + * @idx: segment index + */ +static bool ice_is_signing_seg_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx) +{ + struct ice_generic_seg_hdr *seg; + bool retval = false; + + seg = ice_get_pkg_seg_by_idx(pkg_hdr, idx); + if (seg) + retval = le32_to_cpu(seg->seg_type) == SEGMENT_TYPE_SIGNING; + + return retval; +} + +/** + * ice_is_signing_seg_type_at_idx + * @pkg_hdr: pointer to package header + * @idx: segment index + * @seg_id: segment id that is expected + * @sign_type: signing type + * + * Determine if a segment is a signing segment of the correct type + */ +static bool +ice_is_signing_seg_type_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx, + u32 seg_id, u32 sign_type) +{ + bool result = false; + + if (ice_is_signing_seg_at_idx(pkg_hdr, idx)) { + struct ice_sign_seg *seg; + + seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, + idx); + if (seg && le32_to_cpu(seg->seg_id) == seg_id && + le32_to_cpu(seg->sign_type) == sign_type) + result = true; + } + + return result; +} + +/** + * ice_update_pkg_no_lock + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + */ +int +ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +{ + int status = 0; + u32 i; + + for (i = 0; i < count; i++) { + struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i); + bool last = ((i + 1) == count); + u32 offset, info; + + status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end), + last, &offset, &info, NULL); + + if (status) { + ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n", + status, offset, info); + break; + } + } + + return status; +} + +/** + * ice_update_pkg + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + * + * Obtains change lock and updates package. + */ +int +ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +{ + int status; + + status = ice_acquire_change_lock(hw, ICE_RES_WRITE); + if (status) + return status; + + status = ice_update_pkg_no_lock(hw, bufs, count); + + ice_release_change_lock(hw); + + return status; +} + +static enum ice_ddp_state +ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err) +{ + switch (aq_err) { + case ICE_AQ_RC_ENOSEC: + return ICE_DDP_PKG_NO_SEC_MANIFEST; + case ICE_AQ_RC_EBADSIG: + return ICE_DDP_PKG_FILE_SIGNATURE_INVALID; + case ICE_AQ_RC_ESVN: + return ICE_DDP_PKG_SECURE_VERSION_NBR_TOO_LOW; + case ICE_AQ_RC_EBADMAN: + return ICE_DDP_PKG_MANIFEST_INVALID; + case ICE_AQ_RC_EBADBUF: + return ICE_DDP_PKG_BUFFER_INVALID; + default: + return ICE_DDP_PKG_ERR; + } +} + +/** + * ice_is_buffer_metadata - determine if package buffer is a metadata buffer + * @buf: pointer to buffer header + */ +static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf) +{ + bool metadata = false; + + if (le32_to_cpu(buf->section_entry[0].type) & ICE_METADATA_BUF) + metadata = true; + + return metadata; +} + +/** + * ice_is_last_download_buffer + * @buf: pointer to current buffer header + * @idx: index of the buffer in the current sequence + * @count: the buffer count in the current sequence + * + * Note: this routine should only be called if the buffer is not the last buffer + */ +static bool +ice_is_last_download_buffer(struct ice_buf_hdr *buf, u32 idx, u32 count) +{ + bool last = ((idx + 1) == count); + + /* A set metadata flag in the next buffer will signal that the current + * buffer will be the last buffer downloaded + */ + if (!last) { + struct ice_buf *next_buf = ((struct ice_buf *)buf) + 1; + + last = ice_is_buffer_metadata((struct ice_buf_hdr *)next_buf); + } + + return last; +} + +/** + * ice_dwnld_cfg_bufs_no_lock + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @start: buffer index of first buffer to download + * @count: the number of buffers to download + * @indicate_last: if true, then set last buffer flag on last buffer download + * + * Downloads package configuration buffers to the firmware. Metadata buffers + * are skipped, and the first metadata buffer found indicates that the rest + * of the buffers are all metadata buffers. + */ +static enum ice_ddp_state +ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start, + u32 count, bool indicate_last) +{ + enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; + struct ice_buf_hdr *bh; + enum ice_aq_err err; + u32 offset, info, i; + + if (!bufs || !count) + return ICE_DDP_PKG_ERR; + + /* If the first buffer's first section has its metadata bit set + * then there are no buffers to be downloaded, and the operation is + * considered a success. + */ + bh = (struct ice_buf_hdr *)(bufs + start); + if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) + return ICE_DDP_PKG_SUCCESS; + + for (i = 0; i < count; i++) { + bool last = false; + int status; + + bh = (struct ice_buf_hdr *)(bufs + start + i); + + if (indicate_last) + last = ice_is_last_download_buffer(bh, i, count); + + status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, + &offset, &info, NULL); + + /* Save AQ status from download package */ + if (status) { + ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", + status, offset, info); + err = hw->adminq.sq_last_status; + state = ice_map_aq_err_to_ddp_state(err); + break; + } + + if (last) + break; + } + + return state; +} + +/** + * ice_aq_get_pkg_info_list + * @hw: pointer to the hardware structure + * @pkg_info: the buffer which will receive the information list + * @buf_size: the size of the pkg_info information buffer + * @cd: pointer to command details structure or NULL + * + * Get Package Info List (0x0C43) + */ +static int +ice_aq_get_pkg_info_list(struct ice_hw *hw, + struct ice_aqc_get_pkg_info_resp *pkg_info, + u16 buf_size, struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); + + return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); +} + +/** + * ice_has_signing_seg - determine if package has a signing segment + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to the driver's package hdr + */ +static bool ice_has_signing_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) +{ + struct ice_generic_seg_hdr *seg_hdr; + + seg_hdr = (struct ice_generic_seg_hdr *) + ice_find_seg_in_pkg(hw, SEGMENT_TYPE_SIGNING, pkg_hdr); + + return seg_hdr ? true : false; +} + +/** + * ice_get_pkg_segment_id - get correct package segment id, based on device + * @mac_type: MAC type of the device + */ +static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type) +{ + u32 seg_id; + + switch (mac_type) { + case ICE_MAC_GENERIC: + case ICE_MAC_GENERIC_3K: + default: + seg_id = SEGMENT_TYPE_ICE_E810; + break; + } + + return seg_id; +} + +/** + * ice_get_pkg_sign_type - get package segment sign type, based on device + * @mac_type: MAC type of the device + */ +static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type) +{ + u32 sign_type; + + switch (mac_type) { + case ICE_MAC_GENERIC_3K: + sign_type = SEGMENT_SIGN_TYPE_RSA3K; + break; + case ICE_MAC_GENERIC: + default: + sign_type = SEGMENT_SIGN_TYPE_RSA2K; + break; + } + + return sign_type; +} + +/** + * ice_get_signing_req - get correct package requirements, based on device + * @hw: pointer to the hardware structure + */ +static void ice_get_signing_req(struct ice_hw *hw) +{ + hw->pkg_seg_id = ice_get_pkg_segment_id(hw->mac_type); + hw->pkg_sign_type = ice_get_pkg_sign_type(hw->mac_type); +} + +/** + * ice_download_pkg_sig_seg - download a signature segment + * @hw: pointer to the hardware structure + * @seg: pointer to signature segment + */ +static enum ice_ddp_state +ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg) +{ + enum ice_ddp_state state; + + state = ice_dwnld_cfg_bufs_no_lock(hw, seg->buf_tbl.buf_array, 0, + le32_to_cpu(seg->buf_tbl.buf_count), + false); + + return state; +} + +/** + * ice_download_pkg_config_seg - download a config segment + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to package header + * @idx: segment index + * @start: starting buffer + * @count: buffer count + * + * Note: idx must reference a ICE segment + */ +static enum ice_ddp_state +ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, + u32 idx, u32 start, u32 count) +{ + struct ice_buf_table *bufs; + enum ice_ddp_state state; + struct ice_seg *seg; + u32 buf_count; + + seg = (struct ice_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx); + if (!seg) + return ICE_DDP_PKG_ERR; + + bufs = ice_find_buf_table(seg); + buf_count = le32_to_cpu(bufs->buf_count); + + if (start >= buf_count || start + count > buf_count) + return ICE_DDP_PKG_ERR; + + state = ice_dwnld_cfg_bufs_no_lock(hw, bufs->buf_array, start, count, + true); + + return state; +} + +/** + * ice_dwnld_sign_and_cfg_segs - download a signing segment and config segment + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to package header + * @idx: segment index (must be a signature segment) + * + * Note: idx must reference a signature segment + */ +static enum ice_ddp_state +ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, + u32 idx) +{ + enum ice_ddp_state state; + struct ice_sign_seg *seg; + u32 conf_idx; + u32 start; + u32 count; + + seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx); + if (!seg) { + state = ICE_DDP_PKG_ERR; + goto exit; + } + + conf_idx = le32_to_cpu(seg->signed_seg_idx); + start = le32_to_cpu(seg->signed_buf_start); + count = le32_to_cpu(seg->signed_buf_count); + + state = ice_download_pkg_sig_seg(hw, seg); + if (state) + goto exit; + + state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start, + count); + +exit: + return state; +} + +/** + * ice_match_signing_seg - determine if a matching signing segment exists + * @pkg_hdr: pointer to package header + * @seg_id: segment id that is expected + * @sign_type: signing type + */ +static bool +ice_match_signing_seg(struct ice_pkg_hdr *pkg_hdr, u32 seg_id, u32 sign_type) +{ + bool match = false; + u32 i; + + for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { + if (ice_is_signing_seg_type_at_idx(pkg_hdr, i, seg_id, + sign_type)) { + match = true; + break; + } + } + + return match; +} + +/** + * ice_post_dwnld_pkg_actions - perform post download package actions + * @hw: pointer to the hardware structure + */ +static enum ice_ddp_state +ice_post_dwnld_pkg_actions(struct ice_hw *hw) +{ + enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; + int status; + + status = ice_set_vlan_mode(hw); + if (status) { + ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n", + status); + state = ICE_DDP_PKG_ERR; + } + + return state; +} + +/** + * ice_download_pkg_with_sig_seg - download package using signature segments + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to package header + */ +static enum ice_ddp_state +ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) +{ + enum ice_aq_err aq_err = hw->adminq.sq_last_status; + enum ice_ddp_state state = ICE_DDP_PKG_ERR; + int status; + u32 i; + + ice_debug(hw, ICE_DBG_INIT, "Segment ID %d\n", hw->pkg_seg_id); + ice_debug(hw, ICE_DBG_INIT, "Signature type %d\n", hw->pkg_sign_type); + + status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); + if (status) { + if (status == -EALREADY) + state = ICE_DDP_PKG_ALREADY_LOADED; + else + state = ice_map_aq_err_to_ddp_state(aq_err); + return state; + } + + for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { + if (!ice_is_signing_seg_type_at_idx(pkg_hdr, i, hw->pkg_seg_id, + hw->pkg_sign_type)) + continue; + + state = ice_dwnld_sign_and_cfg_segs(hw, pkg_hdr, i); + if (state) + break; + } + + if (!state) + state = ice_post_dwnld_pkg_actions(hw); + + ice_release_global_cfg_lock(hw); + + return state; +} + +/** + * ice_dwnld_cfg_bufs + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + * + * Obtains global config lock and downloads the package configuration buffers + * to the firmware. + */ +static enum ice_ddp_state +ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +{ + enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; + struct ice_buf_hdr *bh; + int status; + + if (!bufs || !count) + return ICE_DDP_PKG_ERR; + + /* If the first buffer's first section has its metadata bit set + * then there are no buffers to be downloaded, and the operation is + * considered a success. + */ + bh = (struct ice_buf_hdr *)bufs; + if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) + return ICE_DDP_PKG_SUCCESS; + + status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); + if (status) { + if (status == -EALREADY) + return ICE_DDP_PKG_ALREADY_LOADED; + return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status); + } + + state = ice_dwnld_cfg_bufs_no_lock(hw, bufs, 0, count, true); + if (!state) + state = ice_post_dwnld_pkg_actions(hw); + + ice_release_global_cfg_lock(hw); + + return state; +} + +/** + * ice_download_pkg_without_sig_seg + * @hw: pointer to the hardware structure + * @ice_seg: pointer to the segment of the package to be downloaded + * + * Handles the download of a complete package without signature segment. + */ +static enum ice_ddp_state +ice_download_pkg_without_sig_seg(struct ice_hw *hw, struct ice_seg *ice_seg) +{ + struct ice_buf_table *ice_buf_tbl; + enum ice_ddp_state state; + + ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", + ice_seg->hdr.seg_format_ver.major, + ice_seg->hdr.seg_format_ver.minor, + ice_seg->hdr.seg_format_ver.update, + ice_seg->hdr.seg_format_ver.draft); + + ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", + le32_to_cpu(ice_seg->hdr.seg_type), + le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id); + + ice_buf_tbl = ice_find_buf_table(ice_seg); + + ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", + le32_to_cpu(ice_buf_tbl->buf_count)); + + state = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, + le32_to_cpu(ice_buf_tbl->buf_count)); + + return state; +} + +/** + * ice_download_pkg + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to package header + * @ice_seg: pointer to the segment of the package to be downloaded + * + * Handles the download of a complete package. + */ +static enum ice_ddp_state +ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, + struct ice_seg *ice_seg) +{ + enum ice_ddp_state state; + + if (hw->pkg_has_signing_seg) + state = ice_download_pkg_with_sig_seg(hw, pkg_hdr); + else + state = ice_download_pkg_without_sig_seg(hw, ice_seg); + + ice_post_pkg_dwnld_vlan_mode_cfg(hw); + + return state; +} + +/** + * ice_init_pkg_info + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to the driver's package hdr + * + * Saves off the package details into the HW structure. + */ +static enum ice_ddp_state +ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) +{ + struct ice_generic_seg_hdr *seg_hdr; + + if (!pkg_hdr) + return ICE_DDP_PKG_ERR; + + hw->pkg_has_signing_seg = ice_has_signing_seg(hw, pkg_hdr); + ice_get_signing_req(hw); + + ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n", + hw->pkg_seg_id); + + seg_hdr = (struct ice_generic_seg_hdr *) + ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr); + if (seg_hdr) { + struct ice_meta_sect *meta; + struct ice_pkg_enum state; + + memset(&state, 0, sizeof(state)); + + /* Get package information from the Metadata Section */ + meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state, + ICE_SID_METADATA); + if (!meta) { + ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n"); + return ICE_DDP_PKG_INVALID_FILE; + } + + hw->pkg_ver = meta->ver; + memcpy(hw->pkg_name, meta->name, sizeof(meta->name)); + + ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", + meta->ver.major, meta->ver.minor, meta->ver.update, + meta->ver.draft, meta->name); + + hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver; + memcpy(hw->ice_seg_id, seg_hdr->seg_id, + sizeof(hw->ice_seg_id)); + + ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n", + seg_hdr->seg_format_ver.major, + seg_hdr->seg_format_ver.minor, + seg_hdr->seg_format_ver.update, + seg_hdr->seg_format_ver.draft, + seg_hdr->seg_id); + } else { + ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n"); + return ICE_DDP_PKG_INVALID_FILE; + } + + return ICE_DDP_PKG_SUCCESS; +} + +/** + * ice_get_pkg_info + * @hw: pointer to the hardware structure + * + * Store details of the package currently loaded in HW into the HW structure. + */ +enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw) +{ + enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; + struct ice_aqc_get_pkg_info_resp *pkg_info; + u16 size; + u32 i; + + size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT); + pkg_info = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL); + if (!pkg_info) + return ICE_DDP_PKG_ERR; + + if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) { + state = ICE_DDP_PKG_ERR; + goto init_pkg_free_alloc; + } + + for (i = 0; i < le32_to_cpu(pkg_info->count); i++) { +#define ICE_PKG_FLAG_COUNT 4 + char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; + u8 place = 0; + + if (pkg_info->pkg_info[i].is_active) { + flags[place++] = 'A'; + hw->active_pkg_ver = pkg_info->pkg_info[i].ver; + hw->active_track_id = + le32_to_cpu(pkg_info->pkg_info[i].track_id); + memcpy(hw->active_pkg_name, + pkg_info->pkg_info[i].name, + sizeof(pkg_info->pkg_info[i].name)); + hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; + } + if (pkg_info->pkg_info[i].is_active_at_boot) + flags[place++] = 'B'; + if (pkg_info->pkg_info[i].is_modified) + flags[place++] = 'M'; + if (pkg_info->pkg_info[i].is_in_nvm) + flags[place++] = 'N'; + + ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", + i, pkg_info->pkg_info[i].ver.major, + pkg_info->pkg_info[i].ver.minor, + pkg_info->pkg_info[i].ver.update, + pkg_info->pkg_info[i].ver.draft, + pkg_info->pkg_info[i].name, flags); + } + +init_pkg_free_alloc: + devm_kfree(ice_hw_to_dev(hw), pkg_info); + + return state; +} + +/** + * ice_label_enum_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the label entry to be returned + * @offset: pointer to receive absolute offset, always zero for label sections + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * Handles enumeration of individual label entries. + */ +static void * +ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index, + u32 *offset) +{ + struct ice_label_section *labels; + + if (!section) + return NULL; + + if (index > ICE_MAX_LABELS_IN_BUF) + return NULL; + + if (offset) + *offset = 0; + + labels = section; + if (index >= le16_to_cpu(labels->count)) + return NULL; + + return labels->label + index; +} + +/** + * ice_enum_labels + * @ice_seg: pointer to the ice segment (NULL on subsequent calls) + * @type: the section type that will contain the label (0 on subsequent calls) + * @state: ice_pkg_enum structure that will hold the state of the enumeration + * @value: pointer to a value that will return the label's value if found + * + * Enumerates a list of labels in the package. The caller will call + * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call + * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL + * the end of the list has been reached. + */ +static char * +ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state, + u16 *value) +{ + struct ice_label *label; + + /* Check for valid label section on first call */ + if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST)) + return NULL; + + label = ice_pkg_enum_entry(ice_seg, state, type, NULL, + ice_label_enum_handler); + if (!label) + return NULL; + + *value = le16_to_cpu(label->value); + return label->name; +} + +/** + * ice_find_label_value + * @ice_seg: pointer to the ice segment (non-NULL) + * @name: name of the label to search for + * @type: the section type that will contain the label + * @value: pointer to a value that will return the label's value if found + * + * Finds a label's value given the label name and the section type to search. + * The ice_seg parameter must not be NULL since the first call to + * ice_enum_labels requires a pointer to an actual ice_seg structure. + */ +int +ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type, + u16 *value) +{ + struct ice_pkg_enum state; + char *label_name; + u16 val; + + memset(&state, 0, sizeof(state)); + + if (!ice_seg) + return -EINVAL; + + do { + label_name = ice_enum_labels(ice_seg, type, &state, &val); + if (label_name && !strcmp(label_name, name)) { + *value = val; + return 0; + } + + ice_seg = NULL; + } while (label_name); + + return -EIO; +} + +/** + * ice_verify_pkg - verify package + * @pkg: pointer to the package buffer + * @len: size of the package buffer + * + * Verifies various attributes of the package file, including length, format + * version, and the requirement of at least one segment. + */ +enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) +{ + u32 seg_count; + u32 i; + + if (len < struct_size(pkg, seg_offset, 1)) + return ICE_DDP_PKG_INVALID_FILE; + + if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || + pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || + pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || + pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) + return ICE_DDP_PKG_INVALID_FILE; + + /* pkg must have at least one segment */ + seg_count = le32_to_cpu(pkg->seg_count); + if (seg_count < 1) + return ICE_DDP_PKG_INVALID_FILE; + + /* make sure segment array fits in package length */ + if (len < struct_size(pkg, seg_offset, seg_count)) + return ICE_DDP_PKG_INVALID_FILE; + + /* all segments must fit within length */ + for (i = 0; i < seg_count; i++) { + u32 off = le32_to_cpu(pkg->seg_offset[i]); + struct ice_generic_seg_hdr *seg; + + /* segment header must fit */ + if (len < off + sizeof(*seg)) + return ICE_DDP_PKG_INVALID_FILE; + + seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); + + /* segment body must fit */ + if (len < off + le32_to_cpu(seg->seg_size)) + return ICE_DDP_PKG_INVALID_FILE; + } + + return ICE_DDP_PKG_SUCCESS; +} + +/** + * ice_free_seg - free package segment pointer + * @hw: pointer to the hardware structure + * + * Frees the package segment pointer in the proper manner, depending on if the + * segment was allocated or just the passed in pointer was stored. + */ +void ice_free_seg(struct ice_hw *hw) +{ + if (hw->pkg_copy) { + devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy); + hw->pkg_copy = NULL; + hw->pkg_size = 0; + } + hw->seg = NULL; +} + +/** + * ice_chk_pkg_version - check package version for compatibility with driver + * @pkg_ver: pointer to a version structure to check + * + * Check to make sure that the package about to be downloaded is compatible with + * the driver. To be compatible, the major and minor components of the package + * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR + * definitions. + */ +static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) +{ + if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ || + (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && + pkg_ver->minor > ICE_PKG_SUPP_VER_MNR)) + return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH; + else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ || + (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && + pkg_ver->minor < ICE_PKG_SUPP_VER_MNR)) + return ICE_DDP_PKG_FILE_VERSION_TOO_LOW; + + return ICE_DDP_PKG_SUCCESS; +} + +/** + * ice_chk_pkg_compat + * @hw: pointer to the hardware structure + * @ospkg: pointer to the package hdr + * @seg: pointer to the package segment hdr + * + * This function checks the package version compatibility with driver and NVM + */ +static enum ice_ddp_state +ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, + struct ice_seg **seg) +{ + struct ice_aqc_get_pkg_info_resp *pkg; + enum ice_ddp_state state; + u16 size; + u32 i; + + /* Check package version compatibility */ + state = ice_chk_pkg_version(&hw->pkg_ver); + if (state) { + ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n"); + return state; + } + + /* find ICE segment in given package */ + *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id, + ospkg); + if (!*seg) { + ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); + return ICE_DDP_PKG_INVALID_FILE; + } + + /* Check if FW is compatible with the OS package */ + size = struct_size(pkg, pkg_info, ICE_PKG_CNT); + pkg = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL); + if (!pkg) + return ICE_DDP_PKG_ERR; + + if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) { + state = ICE_DDP_PKG_ERR; + goto fw_ddp_compat_free_alloc; + } + + for (i = 0; i < le32_to_cpu(pkg->count); i++) { + /* loop till we find the NVM package */ + if (!pkg->pkg_info[i].is_in_nvm) + continue; + if ((*seg)->hdr.seg_format_ver.major != + pkg->pkg_info[i].ver.major || + (*seg)->hdr.seg_format_ver.minor > + pkg->pkg_info[i].ver.minor) { + state = ICE_DDP_PKG_FW_MISMATCH; + ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n"); + } + /* done processing NVM package so break */ + break; + } +fw_ddp_compat_free_alloc: + devm_kfree(ice_hw_to_dev(hw), pkg); + return state; +} + +/** + * ice_sw_fv_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the field vector entry to be returned + * @offset: ptr to variable that receives the offset in the field vector table + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * This function treats the given section as of type ice_sw_fv_section and + * enumerates offset field. "offset" is an index into the field vector table. + */ +static void * +ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset) +{ + struct ice_sw_fv_section *fv_section = section; + + if (!section || sect_type != ICE_SID_FLD_VEC_SW) + return NULL; + if (index >= le16_to_cpu(fv_section->count)) + return NULL; + if (offset) + /* "index" passed in to this function is relative to a given + * 4k block. To get to the true index into the field vector + * table need to add the relative index to the base_offset + * field of this section + */ + *offset = le16_to_cpu(fv_section->base_offset) + index; + return fv_section->fv + index; +} + +/** + * ice_get_prof_index_max - get the max profile index for used profile + * @hw: pointer to the HW struct + * + * Calling this function will get the max profile index for used profile + * and store the index number in struct ice_switch_info *switch_info + * in hw for following use. + */ +static int ice_get_prof_index_max(struct ice_hw *hw) +{ + u16 prof_index = 0, j, max_prof_index = 0; + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + bool flag = false; + struct ice_fv *fv; + u32 offset; + + memset(&state, 0, sizeof(state)); + + if (!hw->seg) + return -EINVAL; + + ice_seg = hw->seg; + + do { + fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &offset, ice_sw_fv_handler); + if (!fv) + break; + ice_seg = NULL; + + /* in the profile that not be used, the prot_id is set to 0xff + * and the off is set to 0x1ff for all the field vectors. + */ + for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) + if (fv->ew[j].prot_id != ICE_PROT_INVALID || + fv->ew[j].off != ICE_FV_OFFSET_INVAL) + flag = true; + if (flag && prof_index > max_prof_index) + max_prof_index = prof_index; + + prof_index++; + flag = false; + } while (fv); + + hw->switch_info->max_used_prof_index = max_prof_index; + + return 0; +} + +/** + * ice_get_ddp_pkg_state - get DDP pkg state after download + * @hw: pointer to the HW struct + * @already_loaded: indicates if pkg was already loaded onto the device + * + */ +static enum ice_ddp_state +ice_get_ddp_pkg_state(struct ice_hw *hw, bool already_loaded) +{ + if (hw->pkg_ver.major == hw->active_pkg_ver.major && + hw->pkg_ver.minor == hw->active_pkg_ver.minor && + hw->pkg_ver.update == hw->active_pkg_ver.update && + hw->pkg_ver.draft == hw->active_pkg_ver.draft && + !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) { + if (already_loaded) + return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED; + else + return ICE_DDP_PKG_SUCCESS; + } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || + hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { + return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED; + } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && + hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { + return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED; + } else { + return ICE_DDP_PKG_ERR; + } +} + +/** + * ice_init_pkg_regs - initialize additional package registers + * @hw: pointer to the hardware structure + */ +static void ice_init_pkg_regs(struct ice_hw *hw) +{ +#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF +#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF +#define ICE_SW_BLK_IDX 0 + + /* setup Switch block input mask, which is 48-bits in two parts */ + wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); + wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); +} + +/** + * ice_hw_ptype_ena - check if the PTYPE is enabled or not + * @hw: pointer to the HW structure + * @ptype: the hardware PTYPE + */ +bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype) +{ + return ptype < ICE_FLOW_PTYPE_MAX && + test_bit(ptype, hw->hw_ptype); +} + +/** + * ice_marker_ptype_tcam_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the Marker PType TCAM entry to be returned + * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * Handles enumeration of individual Marker PType TCAM entries. + */ +static void * +ice_marker_ptype_tcam_handler(u32 sect_type, void *section, u32 index, + u32 *offset) +{ + struct ice_marker_ptype_tcam_section *marker_ptype; + + if (!section) + return NULL; + + if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE) + return NULL; + + if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF) + return NULL; + + if (offset) + *offset = 0; + + marker_ptype = section; + if (index >= le16_to_cpu(marker_ptype->count)) + return NULL; + + return marker_ptype->tcam + index; +} + +/** + * ice_fill_hw_ptype - fill the enabled PTYPE bit information + * @hw: pointer to the HW structure + */ +static void +ice_fill_hw_ptype(struct ice_hw *hw) +{ + struct ice_marker_ptype_tcam_entry *tcam; + struct ice_seg *seg = hw->seg; + struct ice_pkg_enum state; + + bitmap_zero(hw->hw_ptype, ICE_FLOW_PTYPE_MAX); + if (!seg) + return; + + memset(&state, 0, sizeof(state)); + + do { + tcam = ice_pkg_enum_entry(seg, &state, + ICE_SID_RXPARSER_MARKER_PTYPE, NULL, + ice_marker_ptype_tcam_handler); + if (tcam && + le16_to_cpu(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX && + le16_to_cpu(tcam->ptype) < ICE_FLOW_PTYPE_MAX) + set_bit(le16_to_cpu(tcam->ptype), hw->hw_ptype); + + seg = NULL; + } while (tcam); +} + +/** + * ice_init_pkg - initialize/download package + * @hw: pointer to the hardware structure + * @buf: pointer to the package buffer + * @len: size of the package buffer + * + * This function initializes a package. The package contains HW tables + * required to do packet processing. First, the function extracts package + * information such as version. Then it finds the ice configuration segment + * within the package; this function then saves a copy of the segment pointer + * within the supplied package buffer. Next, the function will cache any hints + * from the package, followed by downloading the package itself. Note, that if + * a previous PF driver has already downloaded the package successfully, then + * the current driver will not have to download the package again. + * + * The local package contents will be used to query default behavior and to + * update specific sections of the HW's version of the package (e.g. to update + * the parse graph to understand new protocols). + * + * This function stores a pointer to the package buffer memory, and it is + * expected that the supplied buffer will not be freed immediately. If the + * package buffer needs to be freed, such as when read from a file, use + * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this + * case. + */ +enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) +{ + bool already_loaded = false; + enum ice_ddp_state state; + struct ice_pkg_hdr *pkg; + struct ice_seg *seg; + + if (!buf || !len) + return ICE_DDP_PKG_ERR; + + pkg = (struct ice_pkg_hdr *)buf; + state = ice_verify_pkg(pkg, len); + if (state) { + ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", + state); + return state; + } + + /* initialize package info */ + state = ice_init_pkg_info(hw, pkg); + if (state) + return state; + + /* For packages with signing segments, must be a matching segment */ + if (hw->pkg_has_signing_seg) + if (!ice_match_signing_seg(pkg, hw->pkg_seg_id, + hw->pkg_sign_type)) + return ICE_DDP_PKG_ERR; + + /* before downloading the package, check package version for + * compatibility with driver + */ + state = ice_chk_pkg_compat(hw, pkg, &seg); + if (state) + return state; + + /* initialize package hints and then download package */ + ice_init_pkg_hints(hw, seg); + state = ice_download_pkg(hw, pkg, seg); + + if (state == ICE_DDP_PKG_ALREADY_LOADED) { + ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n"); + already_loaded = true; + } + + /* Get information on the package currently loaded in HW, then make sure + * the driver is compatible with this version. + */ + if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) { + state = ice_get_pkg_info(hw); + if (!state) + state = ice_get_ddp_pkg_state(hw, already_loaded); + } + + if (ice_is_init_pkg_successful(state)) { + hw->seg = seg; + /* on successful package download update other required + * registers to support the package and fill HW tables + * with package content. + */ + ice_init_pkg_regs(hw); + ice_fill_blk_tbls(hw); + ice_fill_hw_ptype(hw); + ice_get_prof_index_max(hw); + } else { + ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", + state); + } + + return state; +} + +/** + * ice_copy_and_init_pkg - initialize/download a copy of the package + * @hw: pointer to the hardware structure + * @buf: pointer to the package buffer + * @len: size of the package buffer + * + * This function copies the package buffer, and then calls ice_init_pkg() to + * initialize the copied package contents. + * + * The copying is necessary if the package buffer supplied is constant, or if + * the memory may disappear shortly after calling this function. + * + * If the package buffer resides in the data segment and can be modified, the + * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg(). + * + * However, if the package buffer needs to be copied first, such as when being + * read from a file, the caller should use ice_copy_and_init_pkg(). + * + * This function will first copy the package buffer, before calling + * ice_init_pkg(). The caller is free to immediately destroy the original + * package buffer, as the new copy will be managed by this function and + * related routines. + */ +enum ice_ddp_state +ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) +{ + enum ice_ddp_state state; + u8 *buf_copy; + + if (!buf || !len) + return ICE_DDP_PKG_ERR; + + buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL); + + state = ice_init_pkg(hw, buf_copy, len); + if (!ice_is_init_pkg_successful(state)) { + /* Free the copy, since we failed to initialize the package */ + devm_kfree(ice_hw_to_dev(hw), buf_copy); + } else { + /* Track the copied pkg so we can free it later */ + hw->pkg_copy = buf_copy; + hw->pkg_size = len; + } + + return state; +} + +/** + * ice_is_init_pkg_successful - check if DDP init was successful + * @state: state of the DDP pkg after download + */ +bool ice_is_init_pkg_successful(enum ice_ddp_state state) +{ + switch (state) { + case ICE_DDP_PKG_SUCCESS: + case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: + case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: + return true; + default: + return false; + } +} + +/** + * ice_pkg_buf_alloc + * @hw: pointer to the HW structure + * + * Allocates a package buffer and returns a pointer to the buffer header. + * Note: all package contents must be in Little Endian form. + */ +struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) +{ + struct ice_buf_build *bld; + struct ice_buf_hdr *buf; + + bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL); + if (!bld) + return NULL; + + buf = (struct ice_buf_hdr *)bld; + buf->data_end = cpu_to_le16(offsetof(struct ice_buf_hdr, + section_entry)); + return bld; +} + +static bool ice_is_gtp_u_profile(u16 prof_idx) +{ + return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID && + prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP) || + prof_idx == ICE_PROFID_IPV4_GTPU_TEID; +} + +static bool ice_is_gtp_c_profile(u16 prof_idx) +{ + switch (prof_idx) { + case ICE_PROFID_IPV4_GTPC_TEID: + case ICE_PROFID_IPV4_GTPC_NO_TEID: + case ICE_PROFID_IPV6_GTPC_TEID: + case ICE_PROFID_IPV6_GTPC_NO_TEID: + return true; + default: + return false; + } +} + +/** + * ice_get_sw_prof_type - determine switch profile type + * @hw: pointer to the HW structure + * @fv: pointer to the switch field vector + * @prof_idx: profile index to check + */ +static enum ice_prof_type +ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv, u32 prof_idx) +{ + bool valid_prof = false; + u16 i; + + if (ice_is_gtp_c_profile(prof_idx)) + return ICE_PROF_TUN_GTPC; + + if (ice_is_gtp_u_profile(prof_idx)) + return ICE_PROF_TUN_GTPU; + + for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { + if (fv->ew[i].off != ICE_NAN_OFFSET) + valid_prof = true; + + /* UDP tunnel will have UDP_OF protocol ID and VNI offset */ + if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && + fv->ew[i].off == ICE_VNI_OFFSET) + return ICE_PROF_TUN_UDP; + + /* GRE tunnel will have GRE protocol */ + if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF) + return ICE_PROF_TUN_GRE; + } + + return valid_prof ? ICE_PROF_NON_TUN : ICE_PROF_INVALID; +} + +/** + * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type + * @hw: pointer to hardware structure + * @req_profs: type of profiles requested + * @bm: pointer to memory for returning the bitmap of field vectors + */ +void +ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, + unsigned long *bm) +{ + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + struct ice_fv *fv; + + memset(&state, 0, sizeof(state)); + bitmap_zero(bm, ICE_MAX_NUM_PROFILES); + ice_seg = hw->seg; + do { + enum ice_prof_type prof_type; + u32 offset; + + fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &offset, ice_sw_fv_handler); + ice_seg = NULL; + + if (fv) { + /* Determine field vector type */ + prof_type = ice_get_sw_prof_type(hw, fv, offset); + + if (req_profs & prof_type) + set_bit((u16)offset, bm); + } + } while (fv); +} + +/** + * ice_get_sw_fv_list + * @hw: pointer to the HW structure + * @lkups: lookup elements or match criteria for the advanced recipe, one + * structure per protocol header + * @bm: bitmap of field vectors to consider + * @fv_list: Head of a list + * + * Finds all the field vector entries from switch block that contain + * a given protocol ID and offset and returns a list of structures of type + * "ice_sw_fv_list_entry". Every structure in the list has a field vector + * definition and profile ID information + * NOTE: The caller of the function is responsible for freeing the memory + * allocated for every list entry. + */ +int +ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups, + unsigned long *bm, struct list_head *fv_list) +{ + struct ice_sw_fv_list_entry *fvl; + struct ice_sw_fv_list_entry *tmp; + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + struct ice_fv *fv; + u32 offset; + + memset(&state, 0, sizeof(state)); + + if (!lkups->n_val_words || !hw->seg) + return -EINVAL; + + ice_seg = hw->seg; + do { + u16 i; + + fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &offset, ice_sw_fv_handler); + if (!fv) + break; + ice_seg = NULL; + + /* If field vector is not in the bitmap list, then skip this + * profile. + */ + if (!test_bit((u16)offset, bm)) + continue; + + for (i = 0; i < lkups->n_val_words; i++) { + int j; + + for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) + if (fv->ew[j].prot_id == + lkups->fv_words[i].prot_id && + fv->ew[j].off == lkups->fv_words[i].off) + break; + if (j >= hw->blk[ICE_BLK_SW].es.fvw) + break; + if (i + 1 == lkups->n_val_words) { + fvl = devm_kzalloc(ice_hw_to_dev(hw), + sizeof(*fvl), GFP_KERNEL); + if (!fvl) + goto err; + fvl->fv_ptr = fv; + fvl->profile_id = offset; + list_add(&fvl->list_entry, fv_list); + break; + } + } + } while (fv); + if (list_empty(fv_list)) { + dev_warn(ice_hw_to_dev(hw), + "Required profiles not found in currently loaded DDP package"); + return -EIO; + } + return 0; + +err: + list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) { + list_del(&fvl->list_entry); + devm_kfree(ice_hw_to_dev(hw), fvl); + } + + return -ENOMEM; +} + +/** + * ice_init_prof_result_bm - Initialize the profile result index bitmap + * @hw: pointer to hardware structure + */ +void ice_init_prof_result_bm(struct ice_hw *hw) +{ + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + struct ice_fv *fv; + + memset(&state, 0, sizeof(state)); + + if (!hw->seg) + return; + + ice_seg = hw->seg; + do { + u32 off; + u16 i; + + fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &off, ice_sw_fv_handler); + ice_seg = NULL; + if (!fv) + break; + + bitmap_zero(hw->switch_info->prof_res_bm[off], + ICE_MAX_FV_WORDS); + + /* Determine empty field vector indices, these can be + * used for recipe results. Skip index 0, since it is + * always used for Switch ID. + */ + for (i = 1; i < ICE_MAX_FV_WORDS; i++) + if (fv->ew[i].prot_id == ICE_PROT_INVALID && + fv->ew[i].off == ICE_FV_OFFSET_INVAL) + set_bit(i, hw->switch_info->prof_res_bm[off]); + } while (fv); +} + +/** + * ice_pkg_buf_free + * @hw: pointer to the HW structure + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * + * Frees a package buffer + */ +void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) +{ + devm_kfree(ice_hw_to_dev(hw), bld); +} + +/** + * ice_pkg_buf_reserve_section + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * @count: the number of sections to reserve + * + * Reserves one or more section table entries in a package buffer. This routine + * can be called multiple times as long as they are made before calling + * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() + * is called once, the number of sections that can be allocated will not be able + * to be increased; not using all reserved sections is fine, but this will + * result in some wasted space in the buffer. + * Note: all package contents must be in Little Endian form. + */ +int +ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) +{ + struct ice_buf_hdr *buf; + u16 section_count; + u16 data_end; + + if (!bld) + return -EINVAL; + + buf = (struct ice_buf_hdr *)&bld->buf; + + /* already an active section, can't increase table size */ + section_count = le16_to_cpu(buf->section_count); + if (section_count > 0) + return -EIO; + + if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) + return -EIO; + bld->reserved_section_table_entries += count; + + data_end = le16_to_cpu(buf->data_end) + + flex_array_size(buf, section_entry, count); + buf->data_end = cpu_to_le16(data_end); + + return 0; +} + +/** + * ice_pkg_buf_alloc_section + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * @type: the section type value + * @size: the size of the section to reserve (in bytes) + * + * Reserves memory in the buffer for a section's content and updates the + * buffers' status accordingly. This routine returns a pointer to the first + * byte of the section start within the buffer, which is used to fill in the + * section contents. + * Note: all package contents must be in Little Endian form. + */ +void * +ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size) +{ + struct ice_buf_hdr *buf; + u16 sect_count; + u16 data_end; + + if (!bld || !type || !size) + return NULL; + + buf = (struct ice_buf_hdr *)&bld->buf; + + /* check for enough space left in buffer */ + data_end = le16_to_cpu(buf->data_end); + + /* section start must align on 4 byte boundary */ + data_end = ALIGN(data_end, 4); + + if ((data_end + size) > ICE_MAX_S_DATA_END) + return NULL; + + /* check for more available section table entries */ + sect_count = le16_to_cpu(buf->section_count); + if (sect_count < bld->reserved_section_table_entries) { + void *section_ptr = ((u8 *)buf) + data_end; + + buf->section_entry[sect_count].offset = cpu_to_le16(data_end); + buf->section_entry[sect_count].size = cpu_to_le16(size); + buf->section_entry[sect_count].type = cpu_to_le32(type); + + data_end += size; + buf->data_end = cpu_to_le16(data_end); + + buf->section_count = cpu_to_le16(sect_count + 1); + return section_ptr; + } + + /* no free section table entries */ + return NULL; +} + +/** + * ice_pkg_buf_alloc_single_section + * @hw: pointer to the HW structure + * @type: the section type value + * @size: the size of the section to reserve (in bytes) + * @section: returns pointer to the section + * + * Allocates a package buffer with a single section. + * Note: all package contents must be in Little Endian form. + */ +struct ice_buf_build * +ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size, + void **section) +{ + struct ice_buf_build *buf; + + if (!section) + return NULL; + + buf = ice_pkg_buf_alloc(hw); + if (!buf) + return NULL; + + if (ice_pkg_buf_reserve_section(buf, 1)) + goto ice_pkg_buf_alloc_single_section_err; + + *section = ice_pkg_buf_alloc_section(buf, type, size); + if (!*section) + goto ice_pkg_buf_alloc_single_section_err; + + return buf; + +ice_pkg_buf_alloc_single_section_err: + ice_pkg_buf_free(hw, buf); + return NULL; +} + +/** + * ice_pkg_buf_unreserve_section + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * @count: the number of sections to unreserve + * + * Unreserves one or more section table entries in a package buffer, releasing + * space that can be used for section data. This routine can be called + * multiple times as long as they are made before calling + * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() + * is called once, the number of sections that can be allocated will not be able + * to be increased; not using all reserved sections is fine, but this will + * result in some wasted space in the buffer. + * Note: all package contents must be in Little Endian form. + */ +int +ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count) +{ + struct ice_buf_hdr *buf; + u16 section_count; + u16 data_end; + + if (!bld) + return -EINVAL; + + buf = (struct ice_buf_hdr *)&bld->buf; + + /* already an active section, can't decrease table size */ + section_count = le16_to_cpu(buf->section_count); + if (section_count > 0) + return -EIO; + + if (count > bld->reserved_section_table_entries) + return -EIO; + bld->reserved_section_table_entries -= count; + + data_end = le16_to_cpu(buf->data_end) - + flex_array_size(buf, section_entry, count); + buf->data_end = cpu_to_le16(data_end); + + return 0; +} + +/** + * ice_pkg_buf_get_free_space + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * + * Returns the number of free bytes remaining in the buffer. + * Note: all package contents must be in Little Endian form. + */ +u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld) +{ + struct ice_buf_hdr *buf; + + if (!bld) + return 0; + + buf = (struct ice_buf_hdr *)&bld->buf; + return ICE_MAX_S_DATA_END - le16_to_cpu(buf->data_end); +} + +/** + * ice_pkg_buf_get_active_sections + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * + * Returns the number of active sections. Before using the package buffer + * in an update package command, the caller should make sure that there is at + * least one active section - otherwise, the buffer is not legal and should + * not be used. + * Note: all package contents must be in Little Endian form. + */ +u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld) +{ + struct ice_buf_hdr *buf; + + if (!bld) + return 0; + + buf = (struct ice_buf_hdr *)&bld->buf; + return le16_to_cpu(buf->section_count); +} + +/** + * ice_pkg_buf + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * + * Return a pointer to the buffer's header + */ +struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) +{ + if (bld) + return &bld->buf; + + return NULL; +} + +/** + * ice_find_buf_table + * @ice_seg: pointer to the ice segment + * + * Returns the address of the buffer table within the ice segment. + */ +struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) +{ + struct ice_nvm_table *nvms; + + nvms = (struct ice_nvm_table *) + (ice_seg->device_table + + le32_to_cpu(ice_seg->device_table_count)); + + return (__force struct ice_buf_table *) + (nvms->vers + le32_to_cpu(nvms->table_count)); +} + +/** + * ice_pkg_val_buf + * @buf: pointer to the ice buffer + * + * This helper function validates a buffer's header. + */ +static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) +{ + struct ice_buf_hdr *hdr; + u16 section_count; + u16 data_end; + + hdr = (struct ice_buf_hdr *)buf->buf; + /* verify data */ + section_count = le16_to_cpu(hdr->section_count); + if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) + return NULL; + + data_end = le16_to_cpu(hdr->data_end); + if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END) + return NULL; + + return hdr; +} + +/** + * ice_pkg_enum_buf + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * + * This function will enumerate all the buffers in the ice segment. The first + * call is made with the ice_seg parameter non-NULL; on subsequent calls, + * ice_seg is set to NULL which continues the enumeration. When the function + * returns a NULL pointer, then the end of the buffers has been reached, or an + * unexpected value has been detected (for example an invalid section count or + * an invalid buffer end value). + */ +struct ice_buf_hdr * +ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state) +{ + if (ice_seg) { + state->buf_table = ice_find_buf_table(ice_seg); + if (!state->buf_table) + return NULL; + + state->buf_idx = 0; + return ice_pkg_val_buf(state->buf_table->buf_array); + } + + if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count)) + return ice_pkg_val_buf(state->buf_table->buf_array + + state->buf_idx); + else + return NULL; +} + +/** + * ice_pkg_advance_sect + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * + * This helper function will advance the section within the ice segment, + * also advancing the buffer if needed. + */ +bool +ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state) +{ + if (!ice_seg && !state->buf) + return false; + + if (!ice_seg && state->buf) + if (++state->sect_idx < le16_to_cpu(state->buf->section_count)) + return true; + + state->buf = ice_pkg_enum_buf(ice_seg, state); + if (!state->buf) + return false; + + /* start of new buffer, reset section index */ + state->sect_idx = 0; + return true; +} + +/** + * ice_pkg_enum_section + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * @sect_type: section type to enumerate + * + * This function will enumerate all the sections of a particular type in the + * ice segment. The first call is made with the ice_seg parameter non-NULL; + * on subsequent calls, ice_seg is set to NULL which continues the enumeration. + * When the function returns a NULL pointer, then the end of the matching + * sections has been reached. + */ +void * +ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, + u32 sect_type) +{ + u16 offset, size; + + if (ice_seg) + state->type = sect_type; + + if (!ice_pkg_advance_sect(ice_seg, state)) + return NULL; + + /* scan for next matching section */ + while (state->buf->section_entry[state->sect_idx].type != + cpu_to_le32(state->type)) + if (!ice_pkg_advance_sect(NULL, state)) + return NULL; + + /* validate section */ + offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); + if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF) + return NULL; + + size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size); + if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) + return NULL; + + /* make sure the section fits in the buffer */ + if (offset + size > ICE_PKG_BUF_SIZE) + return NULL; + + state->sect_type = + le32_to_cpu(state->buf->section_entry[state->sect_idx].type); + + /* calc pointer to this section */ + state->sect = ((u8 *)state->buf) + + le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); + + return state->sect; +} + +/** + * ice_pkg_enum_entry + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * @sect_type: section type to enumerate + * @offset: pointer to variable that receives the offset in the table (optional) + * @handler: function that handles access to the entries into the section type + * + * This function will enumerate all the entries in particular section type in + * the ice segment. The first call is made with the ice_seg parameter non-NULL; + * on subsequent calls, ice_seg is set to NULL which continues the enumeration. + * When the function returns a NULL pointer, then the end of the entries has + * been reached. + * + * Since each section may have a different header and entry size, the handler + * function is needed to determine the number and location entries in each + * section. + * + * The offset parameter is optional, but should be used for sections that + * contain an offset for each section table. For such cases, the section handler + * function must return the appropriate offset + index to give the absolution + * offset for each entry. For example, if the base for a section's header + * indicates a base offset of 10, and the index for the entry is 2, then + * section handler function should set the offset to 10 + 2 = 12. + */ +void * +ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, + u32 sect_type, u32 *offset, + void *(*handler)(u32 sect_type, void *section, + u32 index, u32 *offset)) +{ + void *entry; + + if (ice_seg) { + if (!handler) + return NULL; + + if (!ice_pkg_enum_section(ice_seg, state, sect_type)) + return NULL; + + state->entry_idx = 0; + state->handler = handler; + } else { + state->entry_idx++; + } + + if (!state->handler) + return NULL; + + /* get entry */ + entry = state->handler(state->sect_type, state->sect, state->entry_idx, + offset); + if (!entry) { + /* end of a section, look for another section of this type */ + if (!ice_pkg_enum_section(NULL, state, 0)) + return NULL; + + state->entry_idx = 0; + entry = state->handler(state->sect_type, state->sect, + state->entry_idx, offset); + } + + return entry; +} + +/** + * ice_boost_tcam_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the boost TCAM entry to be returned + * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * Handles enumeration of individual boost TCAM entries. + */ +static void * +ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset) +{ + struct ice_boost_tcam_section *boost; + + if (!section) + return NULL; + + if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM) + return NULL; + + if (index > ICE_MAX_BST_TCAMS_IN_BUF) + return NULL; + + if (offset) + *offset = 0; + + boost = section; + if (index >= le16_to_cpu(boost->count)) + return NULL; + + return boost->tcam + index; +} + +/** + * ice_find_boost_entry + * @ice_seg: pointer to the ice segment (non-NULL) + * @addr: Boost TCAM address of entry to search for + * @entry: returns pointer to the entry + * + * Finds a particular Boost TCAM entry and returns a pointer to that entry + * if it is found. The ice_seg parameter must not be NULL since the first call + * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure. + */ +static int +ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, + struct ice_boost_tcam_entry **entry) +{ + struct ice_boost_tcam_entry *tcam; + struct ice_pkg_enum state; + + memset(&state, 0, sizeof(state)); + + if (!ice_seg) + return -EINVAL; + + do { + tcam = ice_pkg_enum_entry(ice_seg, &state, + ICE_SID_RXPARSER_BOOST_TCAM, NULL, + ice_boost_tcam_handler); + if (tcam && le16_to_cpu(tcam->addr) == addr) { + *entry = tcam; + return 0; + } + + ice_seg = NULL; + } while (tcam); + + *entry = NULL; + return -EIO; +} + +/** + * ice_init_pkg_hints + * @hw: pointer to the HW structure + * @ice_seg: pointer to the segment of the package scan (non-NULL) + * + * This function will scan the package and save off relevant information + * (hints or metadata) for driver use. The ice_seg parameter must not be NULL + * since the first call to ice_enum_labels requires a pointer to an actual + * ice_seg structure. + */ +void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) +{ + struct ice_pkg_enum state; + char *label_name; + u16 val; + int i; + + memset(&hw->tnl, 0, sizeof(hw->tnl)); + memset(&state, 0, sizeof(state)); + + if (!ice_seg) + return; + + label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state, + &val); + + while (label_name) { +/* TODO: Replace !strnsmp() with wrappers like match_some_pre() */ + if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE))) + /* check for a tunnel entry */ + ice_add_tunnel_hint(hw, label_name, val); + + /* check for a dvm mode entry */ + else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE))) + ice_add_dvm_hint(hw, val, true); + + /* check for a svm mode entry */ + else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE))) + ice_add_dvm_hint(hw, val, false); + + label_name = ice_enum_labels(NULL, 0, &state, &val); + } + + /* Cache the appropriate boost TCAM entry pointers for tunnels */ + for (i = 0; i < hw->tnl.count; i++) { + ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr, + &hw->tnl.tbl[i].boost_entry); + if (hw->tnl.tbl[i].boost_entry) + hw->tnl.tbl[i].valid = true; + } + + /* Cache the appropriate boost TCAM entry pointers for DVM and SVM */ + for (i = 0; i < hw->dvm_upd.count; i++) + ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr, + &hw->dvm_upd.tbl[i].boost_entry); +} + +/** + * ice_acquire_global_cfg_lock + * @hw: pointer to the HW structure + * @access: access type (read or write) + * + * This function will request ownership of the global config lock for reading + * or writing of the package. When attempting to obtain write access, the + * caller must check for the following two return values: + * + * 0 - Means the caller has acquired the global config lock + * and can perform writing of the package. + * -EALREADY - Indicates another driver has already written the + * package or has found that no update was necessary; in + * this case, the caller can just skip performing any + * update of the package. + */ +int +ice_acquire_global_cfg_lock(struct ice_hw *hw, + enum ice_aq_res_access_type access) +{ + int status; + + status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, + ICE_GLOBAL_CFG_LOCK_TIMEOUT); + + if (!status) + mutex_lock(&ice_global_cfg_lock_sw); + else if (status == -EALREADY) + ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n"); + + return status; +} + +/** + * ice_release_global_cfg_lock + * @hw: pointer to the HW structure + * + * This function will release the global config lock. + */ +void ice_release_global_cfg_lock(struct ice_hw *hw) +{ + mutex_unlock(&ice_global_cfg_lock_sw); + ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID); +} + +/** + * ice_acquire_change_lock + * @hw: pointer to the HW structure + * @access: access type (read or write) + * + * This function will request ownership of the change lock. + */ +int +ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access) +{ + return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access, + ICE_CHANGE_LOCK_TIMEOUT); +} + +/** + * ice_release_change_lock + * @hw: pointer to the HW structure + * + * This function will release the change lock using the proper Admin Command. + */ +void ice_release_change_lock(struct ice_hw *hw) +{ + ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID); +} + +/** + * ice_get_set_tx_topo - get or set tx topology + * @hw: pointer to the HW struct + * @buf: pointer to tx topology buffer + * @buf_size: buffer size + * @cd: pointer to command details structure or NULL + * @flags: pointer to descriptor flags + * @set: 0-get, 1-set topology + * + * The function will get or set tx topology + */ +static int +ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size, + struct ice_sq_cd *cd, u8 *flags, bool set) +{ + struct ice_aqc_get_set_tx_topo *cmd; + struct ice_aq_desc desc; + int status; + + cmd = &desc.params.get_set_tx_topo; + if (set) { + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_tx_topo); + cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED; + /* requested to update a new topology, not a default topolgy */ + if (buf) + cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM | + ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW; + } else { + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo); + cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM; + } + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + if (status) + return status; + /* read the return flag values (first byte) for get operation */ + if (!set && flags) + *flags = desc.params.get_set_tx_topo.set_flags; + + return 0; +} + +/** + * ice_cfg_tx_topo - Initialize new tx topology if available + * @hw: pointer to the HW struct + * @buf: pointer to Tx topology buffer + * @len: buffer size + * + * The function will apply the new Tx topology from the package buffer + * if available. + */ +int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len) +{ + u8 *current_topo, *new_topo = NULL; + struct ice_run_time_cfg_seg *seg; + struct ice_buf_hdr *section; + struct ice_pkg_hdr *pkg_hdr; + enum ice_ddp_state state; + u16 i, size = 0, offset; + u32 reg = 0; + int status; + u8 flags; + + if (!buf || !len) + return -EINVAL; + + /* Does FW support new Tx topology mode ? */ + if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) { + ice_debug(hw, ICE_DBG_INIT, "FW doesn't support compatibility mode\n"); + return -EOPNOTSUPP; + } + + current_topo = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, + GFP_KERNEL); + if (!current_topo) + return -ENOMEM; + + /* get the current Tx topology */ + status = ice_get_set_tx_topo(hw, current_topo, ICE_AQ_MAX_BUF_LEN, NULL, + &flags, false); + devm_kfree(ice_hw_to_dev(hw), current_topo); + + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n"); + return status; + } + + /* Is default topology already applied ? */ + if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && + hw->num_tx_sched_layers == 9) { + ice_debug(hw, ICE_DBG_INIT, "Loaded default topology\n"); + /* Already default topology is loaded */ + return -EEXIST; + } + + /* Is new topology already applied ? */ + if ((flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && + hw->num_tx_sched_layers == 5) { + ice_debug(hw, ICE_DBG_INIT, "Loaded new topology\n"); + /* Already new topology is loaded */ + return -EEXIST; + } + + /* Is set topology issued already ? */ + if (flags & ICE_AQC_TX_TOPO_FLAGS_ISSUED) { + ice_debug(hw, ICE_DBG_INIT, "Update tx topology was done by another PF\n"); + /* add a small delay before exiting */ + for (i = 0; i < 20; i++) + msleep(100); + return -EEXIST; + } + + /* Change the topology from new to default (5 to 9) */ + if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && + hw->num_tx_sched_layers == 5) { + ice_debug(hw, ICE_DBG_INIT, "Change topology from 5 to 9 layers\n"); + goto update_topo; + } + + pkg_hdr = (struct ice_pkg_hdr *)buf; + state = ice_verify_pkg(pkg_hdr, len); + if (state) { + ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", + state); + return -EIO; + } + + /* find run time configuration segment */ + seg = (struct ice_run_time_cfg_seg *) + ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr); + if (!seg) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is missing\n"); + return -EIO; + } + + if (le32_to_cpu(seg->buf_table.buf_count) < ICE_MIN_S_COUNT) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment count(%d) is wrong\n", + seg->buf_table.buf_count); + return -EIO; + } + + section = ice_pkg_val_buf(seg->buf_table.buf_array); + + if (!section || le32_to_cpu(section->section_entry[0].type) != + ICE_SID_TX_5_LAYER_TOPO) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology section type is wrong\n"); + return -EIO; + } + + size = le16_to_cpu(section->section_entry[0].size); + offset = le16_to_cpu(section->section_entry[0].offset); + if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology section size is wrong\n"); + return -EIO; + } + + /* make sure the section fits in the buffer */ + if (offset + size > ICE_PKG_BUF_SIZE) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology buffer > 4K\n"); + return -EIO; + } + + /* Get the new topology buffer */ + new_topo = ((u8 *)section) + offset; + +update_topo: + /* acquire global lock to make sure that set topology issued + * by one PF + */ + status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, ICE_RES_WRITE, + ICE_GLOBAL_CFG_LOCK_TIMEOUT); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n"); + return status; + } + + /* check reset was triggered already or not */ + reg = rd32(hw, GLGEN_RSTAT); + if (reg & GLGEN_RSTAT_DEVSTATE_M) { + /* Reset is in progress, re-init the hw again */ + ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. layer topology might be applied already\n"); + ice_check_reset(hw); + return 0; + } + + /* set new topology */ + status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Set tx topology is failed\n"); + return status; + } + + /* new topology is updated, delay 1 second before issuing the CORRER */ + for (i = 0; i < 10; i++) + msleep(100); + ice_reset(hw, ICE_RESET_CORER); + /* CORER will clear the global lock, so no explicit call + * required for release + */ + return 0; +} diff --git a/drivers/thirdparty/ice/ice_ddp.h b/drivers/thirdparty/ice/ice_ddp.h new file mode 100644 index 000000000000..922db1b00f5c --- /dev/null +++ b/drivers/thirdparty/ice/ice_ddp.h @@ -0,0 +1,466 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_DDP_H_ +#define _ICE_DDP_H_ + +#include "ice_osdep.h" +#include "ice_adminq_cmd.h" +#include "ice_controlq.h" +#include "ice_flex_type.h" +#include "ice_protocol_type.h" + +/* Package minimal version supported */ +#define ICE_PKG_SUPP_VER_MAJ 1 +#define ICE_PKG_SUPP_VER_MNR 3 + +/* Package format version */ +#define ICE_PKG_FMT_VER_MAJ 1 +#define ICE_PKG_FMT_VER_MNR 0 +#define ICE_PKG_FMT_VER_UPD 0 +#define ICE_PKG_FMT_VER_DFT 0 + +#define ICE_PKG_CNT 4 + +enum ice_ddp_state { + /* Indicates that this call to ice_init_pkg + * successfully loaded the requested DDP package + */ + ICE_DDP_PKG_SUCCESS = 0, + + /* Generic error for already loaded errors, it is mapped later to + * the more specific one (one of the next 3) + */ + ICE_DDP_PKG_ALREADY_LOADED = -1, + + /* Indicates that a DDP package of the same version has already been + * loaded onto the device by a previous call or by another PF + */ + ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2, + + /* The device has a DDP package that is not supported by the driver */ + ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3, + + /* The device has a compatible package + * (but different from the request) already loaded + */ + ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4, + + /* The firmware loaded on the device is not compatible with + * the DDP package loaded + */ + ICE_DDP_PKG_FW_MISMATCH = -5, + + /* The DDP package file is invalid */ + ICE_DDP_PKG_INVALID_FILE = -6, + + /* The version of the DDP package provided is higher than + * the driver supports + */ + ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7, + + /* The version of the DDP package provided is lower than the + * driver supports + */ + ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8, + + /* Missing security manifest in DDP pkg */ + ICE_DDP_PKG_NO_SEC_MANIFEST = -9, + + /* The RSA signature of the DDP package file provided is invalid */ + ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -10, + + /* The DDP package file security revision is too low and not + * supported by firmware + */ + ICE_DDP_PKG_SECURE_VERSION_NBR_TOO_LOW = -11, + + /* Manifest hash mismatch */ + ICE_DDP_PKG_MANIFEST_INVALID = -12, + + /* Buffer hash mismatches manifest */ + ICE_DDP_PKG_BUFFER_INVALID = -13, + + /* Other errors */ + ICE_DDP_PKG_ERR = -14, +}; + +/* Package and segment headers and tables */ +struct ice_pkg_hdr { + struct ice_pkg_ver pkg_format_ver; + __le32 seg_count; + __le32 seg_offset[]; +}; + +/* Package signing algorithm types */ +#define SEGMENT_SIGN_TYPE_INVALID 0x00000000 +#define SEGMENT_SIGN_TYPE_RSA2K 0x00000001 +#define SEGMENT_SIGN_TYPE_RSA3K 0x00000002 +#define SEGMENT_SIGN_TYPE_RSA3K_SBB 0x00000003 /* Secure Boot Block */ + +/* generic segment */ +struct ice_generic_seg_hdr { +#define SEGMENT_TYPE_INVALID 0x00000000 +#define SEGMENT_TYPE_METADATA 0x00000001 +#define SEGMENT_TYPE_ICE_E810 0x00000010 +#define SEGMENT_TYPE_SIGNING 0x00001001 +#define SEGMENT_TYPE_ICE_RUN_TIME_CFG 0x00000020 + __le32 seg_type; + struct ice_pkg_ver seg_format_ver; + __le32 seg_size; + char seg_id[ICE_PKG_NAME_SIZE]; +}; + +/* ice specific segment */ + +union ice_device_id { + struct { + __le16 device_id; + __le16 vendor_id; + } dev_vend_id; + __le32 id; +}; + +struct ice_device_id_entry { + union ice_device_id device; + union ice_device_id sub_device; +}; + +struct ice_seg { + struct ice_generic_seg_hdr hdr; + __le32 device_table_count; + struct ice_device_id_entry device_table[]; +}; + +struct ice_nvm_table { + __le32 table_count; + __le32 vers[]; +}; + +struct ice_buf { +#define ICE_PKG_BUF_SIZE 4096 + u8 buf[ICE_PKG_BUF_SIZE]; +}; + +struct ice_buf_table { + __le32 buf_count; + struct ice_buf buf_array[]; +}; + +struct ice_run_time_cfg_seg { + struct ice_generic_seg_hdr hdr; + u8 rsvd[8]; + struct ice_buf_table buf_table; +}; + +/* global metadata specific segment */ +struct ice_global_metadata_seg { + struct ice_generic_seg_hdr hdr; + struct ice_pkg_ver pkg_ver; + __le32 rsvd; + char pkg_name[ICE_PKG_NAME_SIZE]; +}; + +#define ICE_MIN_S_OFF 12 +#define ICE_MAX_S_OFF 4095 +#define ICE_MIN_S_SZ 1 +#define ICE_MAX_S_SZ 4084 + +struct ice_sign_seg { + struct ice_generic_seg_hdr hdr; + __le32 seg_id; + __le32 sign_type; + __le32 signed_seg_idx; + __le32 signed_buf_start; + __le32 signed_buf_count; +#define ICE_SIGN_SEG_RESERVED_COUNT 44 + u8 reserved[ICE_SIGN_SEG_RESERVED_COUNT]; + struct ice_buf_table buf_tbl; +}; + +/* section information */ +struct ice_section_entry { + __le32 type; + __le16 offset; + __le16 size; +}; + +#define ICE_MIN_S_COUNT 1 +#define ICE_MAX_S_COUNT 511 +#define ICE_MIN_S_DATA_END 12 +#define ICE_MAX_S_DATA_END 4096 + +#define ICE_METADATA_BUF 0x80000000 + +struct ice_buf_hdr { + __le16 section_count; + __le16 data_end; + struct ice_section_entry section_entry[]; +}; + +#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \ + struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\ + (ent_sz)) + +/* ice package section IDs */ +#define ICE_SID_METADATA 1 +#define ICE_SID_XLT0_SW 10 +#define ICE_SID_XLT_KEY_BUILDER_SW 11 +#define ICE_SID_XLT1_SW 12 +#define ICE_SID_XLT2_SW 13 +#define ICE_SID_PROFID_TCAM_SW 14 +#define ICE_SID_PROFID_REDIR_SW 15 +#define ICE_SID_FLD_VEC_SW 16 +#define ICE_SID_CDID_KEY_BUILDER_SW 17 +#define ICE_SID_CDID_REDIR_SW 18 + +#define ICE_SID_XLT0_ACL 20 +#define ICE_SID_XLT_KEY_BUILDER_ACL 21 +#define ICE_SID_XLT1_ACL 22 +#define ICE_SID_XLT2_ACL 23 +#define ICE_SID_PROFID_TCAM_ACL 24 +#define ICE_SID_PROFID_REDIR_ACL 25 +#define ICE_SID_FLD_VEC_ACL 26 +#define ICE_SID_CDID_KEY_BUILDER_ACL 27 +#define ICE_SID_CDID_REDIR_ACL 28 + +#define ICE_SID_XLT0_FD 30 +#define ICE_SID_XLT_KEY_BUILDER_FD 31 +#define ICE_SID_XLT1_FD 32 +#define ICE_SID_XLT2_FD 33 +#define ICE_SID_PROFID_TCAM_FD 34 +#define ICE_SID_PROFID_REDIR_FD 35 +#define ICE_SID_FLD_VEC_FD 36 +#define ICE_SID_CDID_KEY_BUILDER_FD 37 +#define ICE_SID_CDID_REDIR_FD 38 + +#define ICE_SID_XLT0_RSS 40 +#define ICE_SID_XLT_KEY_BUILDER_RSS 41 +#define ICE_SID_XLT1_RSS 42 +#define ICE_SID_XLT2_RSS 43 +#define ICE_SID_PROFID_TCAM_RSS 44 +#define ICE_SID_PROFID_REDIR_RSS 45 +#define ICE_SID_FLD_VEC_RSS 46 +#define ICE_SID_CDID_KEY_BUILDER_RSS 47 +#define ICE_SID_CDID_REDIR_RSS 48 + +#define ICE_SID_RXPARSER_CAM 50 +#define ICE_SID_RXPARSER_NOMATCH_CAM 51 +#define ICE_SID_RXPARSER_IMEM 52 +#define ICE_SID_RXPARSER_XLT0_BUILDER 53 +#define ICE_SID_RXPARSER_NODE_PTYPE 54 +#define ICE_SID_RXPARSER_MARKER_PTYPE 55 +#define ICE_SID_RXPARSER_BOOST_TCAM 56 +#define ICE_SID_RXPARSER_PROTO_GRP 57 +#define ICE_SID_RXPARSER_METADATA_INIT 58 +#define ICE_SID_RXPARSER_XLT0 59 + +#define ICE_SID_TXPARSER_CAM 60 +#define ICE_SID_TXPARSER_NOMATCH_CAM 61 +#define ICE_SID_TXPARSER_IMEM 62 +#define ICE_SID_TXPARSER_XLT0_BUILDER 63 +#define ICE_SID_TXPARSER_NODE_PTYPE 64 +#define ICE_SID_TXPARSER_MARKER_PTYPE 65 +#define ICE_SID_TXPARSER_BOOST_TCAM 66 +#define ICE_SID_TXPARSER_PROTO_GRP 67 +#define ICE_SID_TXPARSER_METADATA_INIT 68 +#define ICE_SID_TXPARSER_XLT0 69 + +#define ICE_SID_RXPARSER_INIT_REDIR 70 +#define ICE_SID_TXPARSER_INIT_REDIR 71 +#define ICE_SID_RXPARSER_MARKER_GRP 72 +#define ICE_SID_TXPARSER_MARKER_GRP 73 +#define ICE_SID_RXPARSER_LAST_PROTO 74 +#define ICE_SID_TXPARSER_LAST_PROTO 75 +#define ICE_SID_RXPARSER_PG_SPILL 76 +#define ICE_SID_TXPARSER_PG_SPILL 77 +#define ICE_SID_RXPARSER_NOMATCH_SPILL 78 +#define ICE_SID_TXPARSER_NOMATCH_SPILL 79 + +#define ICE_SID_XLT0_PE 80 +#define ICE_SID_XLT_KEY_BUILDER_PE 81 +#define ICE_SID_XLT1_PE 82 +#define ICE_SID_XLT2_PE 83 +#define ICE_SID_PROFID_TCAM_PE 84 +#define ICE_SID_PROFID_REDIR_PE 85 +#define ICE_SID_FLD_VEC_PE 86 +#define ICE_SID_CDID_KEY_BUILDER_PE 87 +#define ICE_SID_CDID_REDIR_PE 88 + +#define ICE_SID_RXPARSER_FLAG_REDIR 97 + +/* Label Metadata section IDs */ +#define ICE_SID_LBL_FIRST 0x80000010 +#define ICE_SID_LBL_RXPARSER_IMEM 0x80000010 +#define ICE_SID_LBL_TXPARSER_IMEM 0x80000011 +#define ICE_SID_LBL_RESERVED_12 0x80000012 +#define ICE_SID_LBL_RESERVED_13 0x80000013 +#define ICE_SID_LBL_RXPARSER_MARKER 0x80000014 +#define ICE_SID_LBL_TXPARSER_MARKER 0x80000015 +#define ICE_SID_LBL_PTYPE 0x80000016 +#define ICE_SID_LBL_PROTOCOL_ID 0x80000017 +#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018 +#define ICE_SID_LBL_TXPARSER_TMEM 0x80000019 +#define ICE_SID_LBL_RXPARSER_PG 0x8000001A +#define ICE_SID_LBL_TXPARSER_PG 0x8000001B +#define ICE_SID_LBL_RXPARSER_M_TCAM 0x8000001C +#define ICE_SID_LBL_TXPARSER_M_TCAM 0x8000001D +#define ICE_SID_LBL_SW_PROFID_TCAM 0x8000001E +#define ICE_SID_LBL_ACL_PROFID_TCAM 0x8000001F +#define ICE_SID_LBL_PE_PROFID_TCAM 0x80000020 +#define ICE_SID_LBL_RSS_PROFID_TCAM 0x80000021 +#define ICE_SID_LBL_FD_PROFID_TCAM 0x80000022 +#define ICE_SID_LBL_FLAG 0x80000023 +#define ICE_SID_LBL_REG 0x80000024 +#define ICE_SID_LBL_SW_PTG 0x80000025 +#define ICE_SID_LBL_ACL_PTG 0x80000026 +#define ICE_SID_LBL_PE_PTG 0x80000027 +#define ICE_SID_LBL_RSS_PTG 0x80000028 +#define ICE_SID_LBL_FD_PTG 0x80000029 +#define ICE_SID_LBL_SW_VSIG 0x8000002A +#define ICE_SID_LBL_ACL_VSIG 0x8000002B +#define ICE_SID_LBL_PE_VSIG 0x8000002C +#define ICE_SID_LBL_RSS_VSIG 0x8000002D +#define ICE_SID_LBL_FD_VSIG 0x8000002E +#define ICE_SID_LBL_PTYPE_META 0x8000002F +#define ICE_SID_LBL_SW_PROFID 0x80000030 +#define ICE_SID_LBL_ACL_PROFID 0x80000031 +#define ICE_SID_LBL_PE_PROFID 0x80000032 +#define ICE_SID_LBL_RSS_PROFID 0x80000033 +#define ICE_SID_LBL_FD_PROFID 0x80000034 +#define ICE_SID_LBL_RXPARSER_MARKER_GRP 0x80000035 +#define ICE_SID_LBL_TXPARSER_MARKER_GRP 0x80000036 +#define ICE_SID_LBL_RXPARSER_PROTO 0x80000037 +#define ICE_SID_LBL_TXPARSER_PROTO 0x80000038 +/* The following define MUST be updated to reflect the last label section ID */ +#define ICE_SID_LBL_LAST 0x80000038 + +/* Label ICE runtime configuration section IDs */ +#define ICE_SID_TX_5_LAYER_TOPO 0x10 + +enum ice_block { + ICE_BLK_SW = 0, + ICE_BLK_ACL, + ICE_BLK_FD, + ICE_BLK_RSS, + ICE_BLK_PE, + ICE_BLK_COUNT +}; + +enum ice_sect { + ICE_XLT0 = 0, + ICE_XLT_KB, + ICE_XLT1, + ICE_XLT2, + ICE_PROF_TCAM, + ICE_PROF_REDIR, + ICE_VEC_TBL, + ICE_CDID_KB, + ICE_CDID_REDIR, + ICE_SECT_COUNT +}; + +/* package buffer building */ + +struct ice_buf_build { + struct ice_buf buf; + u16 reserved_section_table_entries; +}; + +struct ice_pkg_enum { + struct ice_buf_table *buf_table; + u32 buf_idx; + + u32 type; + struct ice_buf_hdr *buf; + u32 sect_idx; + void *sect; + u32 sect_type; + + u32 entry_idx; + void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset); +}; + +/* package Marker PType TCAM entry */ +struct ice_marker_ptype_tcam_entry { +#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX 1024 + __le16 addr; + __le16 ptype; + u8 keys[20]; +}; + +struct ice_marker_ptype_tcam_section { + __le16 count; + __le16 reserved; + struct ice_marker_ptype_tcam_entry tcam[]; +}; + +#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, 1) - \ + sizeof(struct ice_marker_ptype_tcam_entry), \ + sizeof(struct ice_marker_ptype_tcam_entry)) + +struct ice_hw; + +int +ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access); +void ice_release_change_lock(struct ice_hw *hw); + +struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw); +void * +ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size); +int +ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count); +int +ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups, + unsigned long *bm, struct list_head *fv_list); +int +ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count); +u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld); +u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld); + +int +ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count); +int +ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count); +void ice_release_global_cfg_lock(struct ice_hw *hw); +struct ice_generic_seg_hdr * +ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, + struct ice_pkg_hdr *pkg_hdr); +enum ice_ddp_state +ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len); +enum ice_ddp_state +ice_get_pkg_info(struct ice_hw *hw); +void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg); +struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg); +int +ice_acquire_global_cfg_lock(struct ice_hw *hw, + enum ice_aq_res_access_type access); + +struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg); +struct ice_buf_hdr * +ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state); +bool +ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state); +void * +ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, + u32 sect_type, u32 *offset, + void *(*handler)(u32 sect_type, void *section, + u32 index, u32 *offset)); +void * +ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, + u32 sect_type); +enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len); +enum ice_ddp_state +ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len); +bool ice_is_init_pkg_successful(enum ice_ddp_state state); +void ice_free_seg(struct ice_hw *hw); + +struct ice_buf_build * +ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size, + void **section); +struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld); +void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld); + +int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len); + +#endif /* _ICE_DDP_H_ */ diff --git a/drivers/thirdparty/ice/ice_debugfs.c b/drivers/thirdparty/ice/ice_debugfs.c index 09b403d5ff48..f83bbf0d45b1 100644 --- a/drivers/thirdparty/ice/ice_debugfs.c +++ b/drivers/thirdparty/ice/ice_debugfs.c @@ -8,10 +8,8 @@ #include "ice_lib.h" #include "ice_fltr.h" - static struct dentry *ice_debugfs_root; - static void ice_dump_pf(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); @@ -27,10 +25,10 @@ static void ice_dump_pf(struct ice_pf *pf) dev_info(dev, "\tnum_lan_msix = %d\n", pf->num_lan_msix); dev_info(dev, "\tnum_rdma_msix = %d\n", pf->num_rdma_msix); dev_info(dev, "\trdma_base_vector = %d\n", pf->rdma_base_vector); -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS dev_info(dev, "\tnum_macvlan = %d\n", pf->num_macvlan); dev_info(dev, "\tmax_num_macvlan = %d\n", pf->max_num_macvlan); -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ dev_info(dev, "\tirq_tracker->num_entries = %d\n", pf->irq_tracker->num_entries); dev_info(dev, "\tirq_tracker->end = %d\n", pf->irq_tracker->end); @@ -38,9 +36,9 @@ static void ice_dump_pf(struct ice_pf *pf) ice_get_valid_res_count(pf->irq_tracker)); dev_info(dev, "\tnum_avail_sw_msix = %d\n", pf->num_avail_sw_msix); dev_info(dev, "\tsriov_base_vector = %d\n", pf->sriov_base_vector); - dev_info(dev, "\tnum_alloc_vfs = %d\n", pf->num_alloc_vfs); - dev_info(dev, "\tnum_qps_per_vf = %d\n", pf->num_qps_per_vf); - dev_info(dev, "\tnum_msix_per_vf = %d\n", pf->num_msix_per_vf); + dev_info(dev, "\tnum_alloc_vfs = %d\n", ice_get_num_vfs(pf)); + dev_info(dev, "\tnum_qps_per_vf = %d\n", pf->vfs.num_qps_per); + dev_info(dev, "\tnum_msix_per_vf = %d\n", pf->vfs.num_msix_per); } static void ice_dump_pf_vsi_list(struct ice_pf *pf) @@ -55,13 +53,13 @@ static void ice_dump_pf_vsi_list(struct ice_pf *pf) continue; dev_info(dev, "vsi[%d]:\n", i); - dev_info(dev, "\tvsi = %pK\n", vsi); + dev_info(dev, "\tvsi = %p\n", vsi); dev_info(dev, "\tvsi_num = %d\n", vsi->vsi_num); dev_info(dev, "\ttype = %s\n", ice_vsi_type_str(vsi->type)); if (vsi->type == ICE_VSI_VF) - dev_info(dev, "\tvf_id = %d\n", vsi->vf_id); - dev_info(dev, "\tback = %pK\n", vsi->back); - dev_info(dev, "\tnetdev = %pK\n", vsi->netdev); + dev_info(dev, "\tvf_id = %d\n", vsi->vf->vf_id); + dev_info(dev, "\tback = %p\n", vsi->back); + dev_info(dev, "\tnetdev = %p\n", vsi->netdev); dev_info(dev, "\tmax_frame = %d\n", vsi->max_frame); dev_info(dev, "\trx_buf_len = %d\n", vsi->rx_buf_len); dev_info(dev, "\tnum_txq = %d\n", vsi->num_txq); @@ -124,6 +122,61 @@ static void ice_dump_pf_fdir(struct ice_pf *pf) hw->func_caps.fd_fltr_best_effort); } +/** + * ice_dump_rclk_status - print the PHY recovered clock status + * @pf: pointer to PF + * + * Print the PHY's recovered clock pin status. + */ +static void ice_dump_rclk_status(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + u8 phy, phy_pin, pin; + int phy_pins; + + if (ice_is_e810(&pf->hw)) + phy_pins = ICE_C827_RCLK_PINS_NUM; + else + /* E822-based devices have only one RCLK pin */ + phy_pins = E822_CGU_RCLK_PHY_PINS_NUM; + + for (phy_pin = 0; phy_pin < phy_pins; phy_pin++) { + const char *pin_name, *pin_state; + u8 port_num, flags; + u32 freq; + + port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT; + if (ice_aq_get_phy_rec_clk_out(&pf->hw, phy_pin, &port_num, + &flags, &freq)) + return; + + if (ice_is_e810(&pf->hw)) { + int status = ice_get_pf_c827_idx(&pf->hw, &phy); + + if (status) { + dev_err(dev, + "Could not find PF C827 PHY, status=%d\n", + status); + return; + } + + pin = E810T_CGU_INPUT_C827(phy, phy_pin); + pin_name = ice_zl_pin_idx_to_name_e810t(pin); + } else { + /* e822-based devices for now have only one phy + * available (from Rimmon) and only one DPLL RCLK input + * pin + */ + pin_name = E822_CGU_RCLK_PIN_NAME; + } + pin_state = + flags & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN ? + "Enabled" : "Disabled"; + + dev_info(dev, "State for pin %s: %s\n", pin_name, pin_state); + } +} + /** * ice_vsi_dump_ctxt - print the passed in VSI context structure * @dev: Device used for dev_info prints @@ -216,6 +269,213 @@ static void ice_vsi_dump_ctxt(struct device *dev, struct ice_vsi_ctx *ctxt) "enabled" : "disabled"); } +#define ICE_E810T_NEVER_USE_PIN 0xff +#define ZL_VER_MAJOR_SHIFT 24 +#define ZL_VER_MAJOR_MASK ICE_M(0xff, ZL_VER_MAJOR_SHIFT) +#define ZL_VER_MINOR_SHIFT 16 +#define ZL_VER_MINOR_MASK ICE_M(0xff, ZL_VER_MINOR_SHIFT) +#define ZL_VER_REV_SHIFT 8 +#define ZL_VER_REV_MASK ICE_M(0xff, ZL_VER_REV_SHIFT) +#define ZL_VER_BF_SHIFT 0 +#define ZL_VER_BF_MASK ICE_M(0xff, ZL_VER_BF_SHIFT) + +/** + * ice_get_dpll_status - get the detailed state of the clock generator + * @pf: pointer to PF + * @buff: buffer for the state to be printed + * @buff_size: size of the buffer + * + * This function reads current status of the ZL CGU and prints it to the buffer + * buff_size will be updated to reflect the number of bytes written to the + * buffer + * + * Return: 0 on success, error code otherwise + */ +static int +ice_get_dpll_status(struct ice_pf *pf, char *buff, size_t *buff_size) +{ + u8 pin, synce_prio, ptp_prio, ver_major, ver_minor, rev, bugfix; + struct ice_aqc_get_cgu_abilities abilities = {0}; + struct ice_aqc_get_cgu_input_config cfg = {0}; + struct device *dev = ice_pf_to_dev(pf); + u32 cgu_id, cgu_cfg_ver, cgu_fw_ver; + size_t bytes_left = *buff_size; + struct ice_hw *hw = &pf->hw; + char pin_name[MAX_PIN_NAME]; + int cnt = 0; + int status; + + if (!ice_is_cgu_present(hw)) { + dev_err(dev, "CGU not present\n"); + return -ENODEV; + } + + memset(&abilities, 0, sizeof(struct ice_aqc_get_cgu_abilities)); + status = ice_aq_get_cgu_abilities(hw, &abilities); + if (status) { + dev_err(dev, + "Failed to read CGU caps, status: %d, Error: 0x%02X\n", + status, hw->adminq.sq_last_status); + abilities.num_inputs = 7; + abilities.pps_dpll_idx = 1; + abilities.synce_dpll_idx = 0; + } + + status = ice_aq_get_cgu_info(hw, &cgu_id, &cgu_cfg_ver, &cgu_fw_ver); + if (status) + return status; + + if (abilities.cgu_part_num == + ICE_ACQ_GET_LINK_TOPO_NODE_NR_ZL30632_80032) { + cnt = snprintf(buff, bytes_left, "Found ZL80032 CGU\n"); + + /* Read DPLL config version from AQ */ + ver_major = (cgu_cfg_ver & ZL_VER_MAJOR_MASK) + >> ZL_VER_MAJOR_SHIFT; + ver_minor = (cgu_cfg_ver & ZL_VER_MINOR_MASK) + >> ZL_VER_MINOR_SHIFT; + rev = (cgu_cfg_ver & ZL_VER_REV_MASK) >> ZL_VER_REV_SHIFT; + bugfix = (cgu_cfg_ver & ZL_VER_BF_MASK) >> ZL_VER_BF_SHIFT; + + cnt += snprintf(&buff[cnt], bytes_left - cnt, + "DPLL Config ver: %d.%d.%d.%d\n", ver_major, + ver_minor, rev, bugfix); + } else if (abilities.cgu_part_num == + ICE_ACQ_GET_LINK_TOPO_NODE_NR_SI5383_5384) { + cnt = snprintf(buff, bytes_left, "Found SI5383/5384 CGU\n"); + } + + cnt += snprintf(&buff[cnt], bytes_left - cnt, "\nCGU Input status:\n"); + cnt += snprintf(&buff[cnt], bytes_left - cnt, + " | | priority | |\n" + " input (idx) | state | EEC (%d) | PPS (%d) | ESync fail |\n", + abilities.synce_dpll_idx, abilities.pps_dpll_idx); + cnt += snprintf(&buff[cnt], bytes_left - cnt, + " ----------------------------------------------------------------\n"); + + for (pin = 0; pin < abilities.num_inputs; pin++) { + u8 esync_fail = 0; + u8 esync_en = 0; + char *pin_state; + u8 data; + + status = ice_aq_get_input_pin_cfg(hw, &cfg, pin); + if (status) + data = ICE_CGU_IN_PIN_FAIL_FLAGS; + else + data = (cfg.status & ICE_CGU_IN_PIN_FAIL_FLAGS); + + /* get either e810t pin names or generic ones */ + ice_dpll_pin_idx_to_name(pf, pin, pin_name); + + /* get pin priorities */ + if (ice_aq_get_cgu_ref_prio(hw, abilities.synce_dpll_idx, pin, + &synce_prio)) + synce_prio = ICE_E810T_NEVER_USE_PIN; + if (ice_aq_get_cgu_ref_prio(hw, abilities.pps_dpll_idx, pin, + &ptp_prio)) + ptp_prio = ICE_E810T_NEVER_USE_PIN; + + /* if all flags are set, the pin is invalid */ + if (data == ICE_CGU_IN_PIN_FAIL_FLAGS) { + pin_state = ICE_DPLL_PIN_STATE_INVALID; + /* if some flags are set, the pin is validating */ + } else if (data) { + pin_state = ICE_DPLL_PIN_STATE_VALIDATING; + /* if all flags are cleared, the pin is valid */ + } else { + pin_state = ICE_DPLL_PIN_STATE_VALID; + esync_en = !!(cfg.flags2 & + ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN); + esync_fail = !!(cfg.status & + ICE_AQC_GET_CGU_IN_CFG_STATUS_ESYNC_FAIL); + } + + cnt += snprintf(&buff[cnt], bytes_left - cnt, + " %12s (%d) | %10s | %3d | %3d | %4s |\n", + pin_name, pin, pin_state, synce_prio, ptp_prio, + esync_en ? esync_fail ? + "true" : "false" : "N/A"); + } + + if (!test_bit(ICE_FLAG_DPLL_MONITOR, pf->flags)) { + cnt += snprintf(&buff[cnt], bytes_left - cnt, + "\nDPLL Monitoring disabled\n"); + } else { + /* SYNCE DPLL status */ + ice_dpll_pin_idx_to_name(pf, pf->synce_ref_pin, pin_name); + cnt += snprintf(&buff[cnt], bytes_left - cnt, "\nEEC DPLL:\n"); + cnt += snprintf(&buff[cnt], bytes_left - cnt, + "\tCurrent reference:\t%s\n", pin_name); + + cnt += snprintf(&buff[cnt], bytes_left - cnt, + "\tStatus:\t\t\t%s\n", + ice_cgu_state_to_name(pf->synce_dpll_state)); + + ice_dpll_pin_idx_to_name(pf, pf->ptp_ref_pin, pin_name); + cnt += snprintf(&buff[cnt], bytes_left - cnt, "\nPPS DPLL:\n"); + cnt += snprintf(&buff[cnt], bytes_left - cnt, + "\tCurrent reference:\t%s\n", pin_name); + cnt += snprintf(&buff[cnt], bytes_left - cnt, + "\tStatus:\t\t\t%s\n", + ice_cgu_state_to_name(pf->ptp_dpll_state)); + + if (pf->ptp_dpll_state != ICE_CGU_STATE_INVALID) + cnt += snprintf(&buff[cnt], bytes_left - cnt, + "\tPhase offset [ns]:\t\t\t%lld\n", + pf->ptp_dpll_phase_offset); + } + + *buff_size = cnt; + return 0; +} + +/** + * ice_debugfs_cgu_read - debugfs interface for reading DPLL status + * @filp: the opened file + * @user_buf: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + * + * Return: number of bytes read + */ +static ssize_t ice_debugfs_cgu_read(struct file *filp, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ice_pf *pf = filp->private_data; + size_t buffer_size = PAGE_SIZE; + char *kbuff; + int err; + + if (*ppos != 0) + return 0; + + kbuff = (char *)get_zeroed_page(GFP_KERNEL); + if (!kbuff) + return -ENOMEM; + + err = ice_get_dpll_status(pf, kbuff, &buffer_size); + + if (err) { + err = -EIO; + goto err; + } + + err = simple_read_from_buffer(user_buf, count, ppos, kbuff, + buffer_size); + +err: + free_page((unsigned long)kbuff); + return err; +} + +static const struct file_operations ice_debugfs_cgu_fops = { + .owner = THIS_MODULE, + .llseek = default_llseek, + .open = simple_open, + .read = ice_debugfs_cgu_read, +}; + static const char *module_id_to_name(u16 module_id) { switch (module_id) { @@ -314,7 +574,7 @@ static void ice_fwlog_dump_cfg(struct ice_hw *hw) { struct device *dev = ice_pf_to_dev((struct ice_pf *)(hw->back)); struct ice_fwlog_cfg *cfg; - enum ice_status status; + int status; u16 i; cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); @@ -409,7 +669,6 @@ ice_debugfs_command_write(struct file *filp, const char __user *buf, } ret = ice_aq_get_vsi_params(hw, vsi_ctx, NULL); if (ret) { - ret = -EINVAL; devm_kfree(dev, vsi_ctx); goto command_help; } @@ -496,6 +755,9 @@ ice_debugfs_command_write(struct file *filp, const char __user *buf, pf->globr_count); dev_info(dev, "emp reset count: %d\n", pf->empr_count); dev_info(dev, "pf reset count: %d\n", pf->pfr_count); + } else if ((!strncmp(argv[1], "rclk_status", 11))) { + if (ice_is_feature_supported(pf, ICE_F_PHY_RCLK)) + ice_dump_rclk_status(pf); } #ifdef CONFIG_DCB @@ -562,12 +824,8 @@ ice_debugfs_command_write(struct file *filp, const char __user *buf, if (ret) goto command_help; - ice_cgu_cfg_ts_pll(pf, false, (enum ice_time_ref_freq)time_ref_freq, - (enum ice_cgu_time_ref_sel)time_ref_sel, - (enum ice_src_tmr_mode)src_tmr_mode); - ice_cgu_cfg_ts_pll(pf, true, (enum ice_time_ref_freq)time_ref_freq, - (enum ice_cgu_time_ref_sel)time_ref_sel, - (enum ice_src_tmr_mode)src_tmr_mode); + ice_cfg_cgu_pll_e822(hw, time_ref_freq, time_ref_sel); + ice_ptp_update_incval(pf, time_ref_freq, src_tmr_mode); } else { command_help: dev_info(dev, "unknown or invalid command '%s'\n", cmd_buf); @@ -597,6 +855,8 @@ command_help: #ifdef ICE_ADD_PROBES dev_info(dev, "\t dump arfs_stats\n"); #endif /* ICE_ADD_PROBES */ + if (ice_is_feature_supported(pf, ICE_F_PHY_RCLK)) + dev_info(dev, "\t dump rclk_status\n"); ret = -EINVAL; goto command_write_done; } @@ -635,6 +895,13 @@ void ice_debugfs_pf_init(struct ice_pf *pf) if (!pfile) goto create_failed; + /* Expose external CGU debugfs interface if CGU available*/ + if (ice_is_feature_supported(pf, ICE_F_CGU)) { + if (!debugfs_create_file("cgu", 0400, pf->ice_debugfs_pf, pf, + &ice_debugfs_cgu_fops)) + goto create_failed; + } + return; create_failed: diff --git a/drivers/thirdparty/ice/ice_defs.h b/drivers/thirdparty/ice/ice_defs.h new file mode 100644 index 000000000000..6eae880cb1e2 --- /dev/null +++ b/drivers/thirdparty/ice/ice_defs.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_DEFS_H_ +#define _ICE_DEFS_H_ + +#define ICE_BYTES_PER_WORD 2 +#define ICE_BYTES_PER_DWORD 4 +#define ICE_MAX_TRAFFIC_CLASS 8 +#define ICE_CHNL_MAX_TC 16 + +#endif /* _ICE_DEFS_H_ */ diff --git a/drivers/thirdparty/ice/ice_devids.h b/drivers/thirdparty/ice/ice_devids.h index a9c1d294def5..1e2f702e3492 100644 --- a/drivers/thirdparty/ice/ice_devids.h +++ b/drivers/thirdparty/ice/ice_devids.h @@ -4,8 +4,8 @@ #ifndef _ICE_DEVIDS_H_ #define _ICE_DEVIDS_H_ - /* Device IDs */ +#define ICE_DEV_ID_E822_SI_DFLT 0x1888 /* Intel(R) Ethernet Connection E823-L for backplane */ #define ICE_DEV_ID_E823L_BACKPLANE 0x124C /* Intel(R) Ethernet Connection E823-L for SFP */ @@ -22,6 +22,13 @@ #define ICE_DEV_ID_E810C_QSFP 0x1592 /* Intel(R) Ethernet Controller E810-C for SFP */ #define ICE_DEV_ID_E810C_SFP 0x1593 +#define ICE_SUBDEV_ID_E810T 0x000E +#define ICE_SUBDEV_ID_E810T2 0x000F +#define ICE_SUBDEV_ID_E810T3 0x02E9 +#define ICE_SUBDEV_ID_E810T4 0x02EA +#define ICE_SUBDEV_ID_E810T5 0x0010 +#define ICE_SUBDEV_ID_E810T6 0x0012 +#define ICE_SUBDEV_ID_E810T7 0x0011 /* Intel(R) Ethernet Controller E810-XXV for backplane */ #define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599 /* Intel(R) Ethernet Controller E810-XXV for QSFP */ @@ -56,5 +63,12 @@ #define ICE_DEV_ID_E822L_10G_BASE_T 0x1899 /* Intel(R) Ethernet Connection E822-L 1GbE */ #define ICE_DEV_ID_E822L_SGMII 0x189A - +/* Intel(R) Ethernet Connection E825-C for backplane */ +#define ICE_DEV_ID_E825C_BACKPLANE 0x579C +/* Intel(R) Ethernet Connection E825-C for QSFP */ +#define ICE_DEV_ID_E825C_QSFP 0x579D +/* Intel(R) Ethernet Connection E825-C for SFP */ +#define ICE_DEV_ID_E825C_SFP 0x579E +/* Intel(R) Ethernet Connection E825-C 1GbE */ +#define ICE_DEV_ID_E825C_SGMII 0x579F #endif /* _ICE_DEVIDS_H_ */ diff --git a/drivers/thirdparty/ice/ice_devlink.c b/drivers/thirdparty/ice/ice_devlink.c index 8b7fcbc0a32d..237c92734ebd 100644 --- a/drivers/thirdparty/ice/ice_devlink.c +++ b/drivers/thirdparty/ice/ice_devlink.c @@ -17,15 +17,14 @@ struct ice_info_ctx { struct ice_hw_dev_caps dev_caps; }; -/* - * The following functions are used to format specific strings for various +/* The following functions are used to format specific strings for various * devlink info versions. The ctx parameter is used to provide the storage * buffer, as well as any ancillary information calculated when the info * request was made. * - * If a version does not exist, for example a "stored" version that does not - * exist because no update is pending, the function should leave the buffer in - * the ctx structure empty and return 0. + * If a version does not exist, for example when attempting to get the + * inactive version of flash when there is no pending update, the function + * should leave the buffer in the ctx structure empty. */ static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx) @@ -41,28 +40,29 @@ static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx) static void ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_hw *hw = &pf->hw; - enum ice_status status; + int status; status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf)); if (status) /* We failed to locate the PBA, so just skip this entry */ - dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %s\n", - ice_stat_str(status)); + dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %d\n", + status); } static void ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_hw *hw = &pf->hw; - snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->fw_maj_ver, hw->fw_min_ver, - hw->fw_patch); + snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", + hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch); } static void ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_hw *hw = &pf->hw; - snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u", hw->api_maj_ver, hw->api_min_ver); + snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->api_maj_ver, + hw->api_min_ver, hw->api_patch); } static void ice_info_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx) @@ -79,7 +79,9 @@ static void ice_info_fw_srev(struct ice_pf *pf, struct ice_info_ctx *ctx) snprintf(ctx->buf, sizeof(ctx->buf), "%u", nvm->srev); } -static void ice_info_pending_fw_srev(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void +ice_info_pending_fw_srev(struct ice_pf __always_unused *pf, + struct ice_info_ctx *ctx) { struct ice_nvm_info *nvm = &ctx->pending_nvm; @@ -91,10 +93,13 @@ static void ice_info_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_orom_info *orom = &pf->hw.flash.orom; - snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", orom->major, orom->build, orom->patch); + snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", + orom->major, orom->build, orom->patch); } -static void ice_info_pending_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void +ice_info_pending_orom_ver(struct ice_pf __always_unused *pf, + struct ice_info_ctx *ctx) { struct ice_orom_info *orom = &ctx->pending_orom; @@ -110,7 +115,9 @@ static void ice_info_orom_srev(struct ice_pf *pf, struct ice_info_ctx *ctx) snprintf(ctx->buf, sizeof(ctx->buf), "%u", orom->srev); } -static void ice_info_pending_orom_srev(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void +ice_info_pending_orom_srev(struct ice_pf __always_unused *pf, + struct ice_info_ctx *ctx) { struct ice_orom_info *orom = &ctx->pending_orom; @@ -125,12 +132,15 @@ static void ice_info_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor); } -static void ice_info_pending_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void +ice_info_pending_nvm_ver(struct ice_pf __always_unused *pf, + struct ice_info_ctx *ctx) { struct ice_nvm_info *nvm = &ctx->pending_nvm; if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) - snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor); + snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", + nvm->major, nvm->minor); } static void ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx) @@ -140,7 +150,9 @@ static void ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx) snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack); } -static void ice_info_pending_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void +ice_info_pending_eetrack(struct ice_pf __always_unused *pf, + struct ice_info_ctx *ctx) { struct ice_nvm_info *nvm = &ctx->pending_nvm; @@ -155,15 +167,17 @@ static void ice_info_ddp_pkg_name(struct ice_pf *pf, struct ice_info_ctx *ctx) snprintf(ctx->buf, sizeof(ctx->buf), "%s", hw->active_pkg_name); } -static void ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void +ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_pkg_ver *pkg = &pf->hw.active_pkg_ver; - snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u", pkg->major, pkg->minor, pkg->update, - pkg->draft); + snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u", + pkg->major, pkg->minor, pkg->update, pkg->draft); } -static void ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void +ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx) { snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", pf->hw.active_track_id); } @@ -172,10 +186,11 @@ static void ice_info_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_netlist_info *netlist = &pf->hw.flash.netlist; - /* The netlist versions are BCD formatted */ - snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", netlist->major, netlist->minor, - netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev, - netlist->cust_ver); + /* The netlist version fields are BCD formatted */ + snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", + netlist->major, netlist->minor, + netlist->type >> 16, netlist->type & 0xFFFF, + netlist->rev, netlist->cust_ver); } static void ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx) @@ -185,19 +200,23 @@ static void ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx) snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash); } -static void ice_info_pending_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void +ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf, + struct ice_info_ctx *ctx) { struct ice_netlist_info *netlist = &ctx->pending_netlist; - /* The netlist versions are BCD formatted */ + /* The netlist version fields are BCD formatted */ if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", netlist->major, netlist->minor, - netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev, - netlist->cust_ver); + netlist->type >> 16, netlist->type & 0xFFFF, + netlist->rev, netlist->cust_ver); } -static void ice_info_pending_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void +ice_info_pending_netlist_build(struct ice_pf __always_unused *pf, + struct ice_info_ctx *ctx) { struct ice_netlist_info *netlist = &ctx->pending_netlist; @@ -205,9 +224,24 @@ static void ice_info_pending_netlist_build(struct ice_pf *pf, struct ice_info_ct snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash); } -#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter } -#define running(key, getter) { ICE_VERSION_RUNNING, key, getter } -#define stored(key, getter) { ICE_VERSION_STORED, key, getter } +#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter, NULL } +#define running(key, getter) { ICE_VERSION_RUNNING, key, getter, NULL } +#define stored(key, getter, fallback) \ + { ICE_VERSION_STORED, key, getter, fallback } + +/* The combined() macro inserts both the running entry as well as a stored + * entry. The running entry will always report the version from the active + * handler. The stored entry will first try the pending handler, and fallback + * to the active handler if the pending function does not report a version. + * The pending handler should check the status of a pending update for the + * relevant flash component. It should only fill in the buffer in the case + * where a valid pending version is available. This ensures that the related + * stored and running versions remain in sync, and that stored versions are + * correctly reported as expected. + */ +#define combined(key, active, pending) \ + running(key, active), \ + stored(key, pending, active) enum ice_version_type { ICE_VERSION_FIXED, @@ -219,28 +253,27 @@ static const struct ice_devlink_version { enum ice_version_type type; const char *key; void (*getter)(struct ice_pf *pf, struct ice_info_ctx *ctx); + void (*fallback)(struct ice_pf *pf, struct ice_info_ctx *ctx); } ice_devlink_versions[] = { fixed(DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, ice_info_pba), running(DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, ice_info_fw_mgmt), running("fw.mgmt.api", ice_info_fw_api), running("fw.mgmt.build", ice_info_fw_build), - running("fw.mgmt.srev", ice_info_fw_srev), - stored("fw.mgmt.srev", ice_info_pending_fw_srev), - running(DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, ice_info_orom_ver), - stored(DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, ice_info_pending_orom_ver), - running("fw.undi.srev", ice_info_orom_srev), - stored("fw.undi.srev", ice_info_pending_orom_srev), - running("fw.psid.api", ice_info_nvm_ver), - stored("fw.psid.api", ice_info_pending_nvm_ver), - running(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack), - stored(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_pending_eetrack), + combined("fw.mgmt.srev", ice_info_fw_srev, ice_info_pending_fw_srev), + combined(DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, + ice_info_orom_ver, ice_info_pending_orom_ver), + combined("fw.undi.srev", ice_info_orom_srev, + ice_info_pending_orom_srev), + combined("fw.psid.api", ice_info_nvm_ver, ice_info_pending_nvm_ver), + combined(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, + ice_info_eetrack, ice_info_pending_eetrack), running("fw.app.name", ice_info_ddp_pkg_name), running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version), running("fw.app.bundle_id", ice_info_ddp_pkg_bundle_id), - running("fw.netlist", ice_info_netlist_ver), - stored("fw.netlist", ice_info_pending_netlist_ver), - running("fw.netlist.build", ice_info_netlist_build), - stored("fw.netlist.build", ice_info_pending_netlist_build), + combined("fw.netlist", ice_info_netlist_ver, + ice_info_pending_netlist_ver), + combined("fw.netlist.build", ice_info_netlist_build, + ice_info_pending_netlist_build), }; /** @@ -262,10 +295,14 @@ static int ice_devlink_info_get(struct devlink *devlink, struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; struct ice_info_ctx *ctx; - enum ice_status status; size_t i; int err; + if (ice_get_fw_mode(hw) == ICE_FW_MODE_REC) { + NL_SET_ERR_MSG_MOD(extack, "Device firmware is in recovery mode. Unable to collect version info."); + return -EOPNOTSUPP; + } + err = ice_wait_for_reset(pf, 10 * HZ); if (err) { NL_SET_ERR_MSG_MOD(extack, "Device is busy resetting"); @@ -277,20 +314,19 @@ static int ice_devlink_info_get(struct devlink *devlink, return -ENOMEM; /* discover capabilities first */ - status = ice_discover_dev_caps(hw, &ctx->dev_caps); - if (status) { - dev_dbg(dev, "Failed to discover device capabilities, status %s aq_err %s\n", - ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status)); + err = ice_discover_dev_caps(hw, &ctx->dev_caps); + if (err) { + dev_dbg(dev, "Failed to discover device capabilities, status %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Unable to discover device capabilities"); - err = -EIO; goto out_free_ctx; } if (ctx->dev_caps.common_cap.nvm_update_pending_orom) { - status = ice_get_inactive_orom_ver(hw, &ctx->pending_orom); - if (status) { - dev_dbg(dev, "Unable to read inactive Option ROM version data, status %s aq_err %s\n", - ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status)); + err = ice_get_inactive_orom_ver(hw, &ctx->pending_orom); + if (err) { + dev_dbg(dev, "Unable to read inactive Option ROM version data, status %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); /* disable display of pending Option ROM */ ctx->dev_caps.common_cap.nvm_update_pending_orom = false; @@ -298,10 +334,10 @@ static int ice_devlink_info_get(struct devlink *devlink, } if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) { - status = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm); - if (status) { - dev_dbg(dev, "Unable to read inactive NVM version data, status %s aq_err %s\n", - ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status)); + err = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm); + if (err) { + dev_dbg(dev, "Unable to read inactive NVM version data, status %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); /* disable display of pending Option ROM */ ctx->dev_caps.common_cap.nvm_update_pending_nvm = false; @@ -309,10 +345,10 @@ static int ice_devlink_info_get(struct devlink *devlink, } if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) { - status = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist); - if (status) { - dev_dbg(dev, "Unable to read inactive Netlist version data, status %s aq_err %s\n", - ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status)); + err = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist); + if (err) { + dev_dbg(dev, "Unable to read inactive Netlist version data, status %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); /* disable display of pending Option ROM */ ctx->dev_caps.common_cap.nvm_update_pending_netlist = false; @@ -341,6 +377,14 @@ static int ice_devlink_info_get(struct devlink *devlink, ice_devlink_versions[i].getter(pf, ctx); + /* If the default getter doesn't report a version, use the + * fallback function. This is primarily useful in the case of + * "stored" versions that want to report the same value as the + * running version in the normal case of no pending update. + */ + if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback) + ice_devlink_versions[i].fallback(pf, ctx); + /* Do not report missing versions */ if (ctx->buf[0] == '\0') continue; @@ -381,6 +425,7 @@ enum ice_devlink_param_id { ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, ICE_DEVLINK_PARAM_ID_FW_MGMT_MINSREV, ICE_DEVLINK_PARAM_ID_FW_UNDI_MINSREV, + ICE_DEVLINK_PARAM_ID_TX_BALANCE, }; /** @@ -397,7 +442,7 @@ ice_devlink_minsrev_get(struct devlink *devlink, u32 id, struct devlink_param_gs struct ice_pf *pf = devlink_priv(devlink); struct device *dev = ice_pf_to_dev(pf); struct ice_minsrev_info minsrevs = {}; - enum ice_status status; + int status; if (id != ICE_DEVLINK_PARAM_ID_FW_MGMT_MINSREV && id != ICE_DEVLINK_PARAM_ID_FW_UNDI_MINSREV) @@ -452,7 +497,11 @@ ice_devlink_minsrev_set(struct devlink *devlink, u32 id, struct devlink_param_gs struct ice_pf *pf = devlink_priv(devlink); struct device *dev = ice_pf_to_dev(pf); struct ice_minsrev_info minsrevs = {}; - enum ice_status status; + struct ice_rq_event_info event; + u16 completion_retval; + int err; + + memset(&event, 0, sizeof(event)); switch (id) { case ICE_DEVLINK_PARAM_ID_FW_MGMT_MINSREV: @@ -467,12 +516,26 @@ ice_devlink_minsrev_set(struct devlink *devlink, u32 id, struct devlink_param_gs return -EINVAL; } - status = ice_update_nvm_minsrevs(&pf->hw, &minsrevs); - if (status) { + err = ice_update_nvm_minsrevs(&pf->hw, &minsrevs); + if (err) { dev_warn(dev, "Failed to update minimum security revision data\n"); return -EIO; } + /* Wait for FW to finish Dumping the Shadow RAM */ + err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write_activate, 3 * HZ, + &event); + if (err) { + dev_warn(dev, "Timed out waiting for firmware to dump Shadow RAM\n"); + return -ETIMEDOUT; + } + + completion_retval = le16_to_cpu(event.desc.retval); + if (completion_retval) { + dev_warn(dev, "Failed to dump Shadow RAM\n"); + return -EIO; + } + return 0; } @@ -498,7 +561,7 @@ ice_devlink_minsrev_validate(struct devlink *devlink, u32 id, union devlink_para struct ice_pf *pf = devlink_priv(devlink); struct device *dev = ice_pf_to_dev(pf); struct ice_minsrev_info minsrevs = {}; - enum ice_status status; + int status; if (id != ICE_DEVLINK_PARAM_ID_FW_MGMT_MINSREV && id != ICE_DEVLINK_PARAM_ID_FW_UNDI_MINSREV) @@ -546,6 +609,154 @@ ice_devlink_minsrev_validate(struct devlink *devlink, u32 id, union devlink_para return 0; } +/** + * ice_get_tx_topo_user_sel - Read user's choice from flash + * @pf: pointer to pf structure + * @txbalance_ena: value read from flash will be saved here + * + * Reads user's preference for Tx Scheduler Topology Tree from PFA TLV. + * + * Returns zero when read was successful, negative values otherwise. + */ +static int ice_get_tx_topo_user_sel(struct ice_pf *pf, bool *txbalance_ena) +{ + struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {}; + struct ice_hw *hw = &pf->hw; + int status; + + status = ice_acquire_nvm(hw, ICE_RES_READ); + if (status) + return status; + + status = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0, + sizeof(usr_sel), &usr_sel, true, true, NULL); + ice_release_nvm(hw); + + *txbalance_ena = usr_sel.data & ICE_AQC_NVM_TX_TOPO_USER_SEL; + + return status; +} + +/** + * ice_update_tx_topo_user_sel - Save user's preference in flash + * @pf: pointer to pf structure + * @txbalance_ena: value to be saved in flash + * + * When txbalance_ena is set to true it means user's preference is to use + * five layer Tx Scheduler Topology Tree, when it is set to false then it is + * nine layer. This choice should be stored in PFA TLV field and should be + * picked up by driver, next time during init. + * + * Returns zero when save was successful, negative values otherwise. + */ +static int ice_update_tx_topo_user_sel(struct ice_pf *pf, bool txbalance_ena) +{ + struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {}; + struct ice_hw *hw = &pf->hw; + int err; + + err = ice_acquire_nvm(hw, ICE_RES_WRITE); + if (err) + return err; + + err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0, + sizeof(usr_sel), &usr_sel, true, true, NULL); + if (err) + goto exit_release_res; + + if (txbalance_ena) + usr_sel.data |= ICE_AQC_NVM_TX_TOPO_USER_SEL; + else + usr_sel.data &= ~ICE_AQC_NVM_TX_TOPO_USER_SEL; + + err = ice_write_one_nvm_block(pf, ICE_AQC_NVM_TX_TOPO_MOD_ID, 2, + sizeof(usr_sel.data), &usr_sel.data, + true, NULL, NULL); + if (err) + err = -EIO; + +exit_release_res: + ice_release_nvm(hw); + + return err; +} + +/** + * ice_devlink_txbalance_get - Get txbalance parameter + * @devlink: pointer to the devlink instance + * @id: the parameter ID to set + * @ctx: context to store the parameter value + * + * Returns zero on success and negative value on failure. + */ +static int ice_devlink_txbalance_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct ice_pf *pf = devlink_priv(devlink); + struct device *dev = ice_pf_to_dev(pf); + int status; + + status = ice_get_tx_topo_user_sel(pf, &ctx->val.vbool); + if (status) { + dev_warn(dev, "Failed to read Tx Scheduler Tree - User Selection data from flash\n"); + return -EIO; + } + + return 0; +} + +/** + * ice_devlink_txbalance_set - Set txbalance parameter + * @devlink: pointer to the devlink instance + * @id: the parameter ID to set + * @ctx: context to get the parameter value + * + * Returns zero on success and negative value on failure. + */ +static int ice_devlink_txbalance_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct ice_pf *pf = devlink_priv(devlink); + struct device *dev = ice_pf_to_dev(pf); + int status; + + status = ice_update_tx_topo_user_sel(pf, ctx->val.vbool); + if (status) + return -EIO; + + dev_warn(dev, "Transmit balancing setting has been changed on this device. You must reboot the system for the change to take effect"); + + return 0; +} + +/** + * ice_devlink_txbalance_validate - Validate passed txbalance parameter value + * @devlink: unused pointer to devlink instance + * @id: the parameter ID to validate + * @val: value to validate + * @extack: netlink extended ACK structure + * + * Supported values are: + * true - five layer, false - nine layer Tx Scheduler Topology Tree + * + * Returns zero when passed parameter value is supported. Negative value on + * error. + */ +static int ice_devlink_txbalance_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_priv(devlink); + struct ice_hw *hw = &pf->hw; + + if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) { + NL_SET_ERR_MSG_MOD(extack, "Error: Requested feature is not supported by the FW on this device. Update the FW and run this command again."); + return -EOPNOTSUPP; + } + + return 0; +} + /* devlink parameters for the ice driver */ static const struct devlink_param ice_devlink_params[] = { DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_FW_MGMT_MINSREV, @@ -562,81 +773,28 @@ static const struct devlink_param ice_devlink_params[] = { ice_devlink_minsrev_get, ice_devlink_minsrev_set, ice_devlink_minsrev_validate), + DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_TX_BALANCE, + "txbalancing", + DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + ice_devlink_txbalance_get, + ice_devlink_txbalance_set, + ice_devlink_txbalance_validate), }; #endif /* HAVE_DEVLINK_PARAMS */ #ifdef HAVE_DEVLINK_FLASH_UPDATE -/** - * ice_devlink_flash_update - Update firmware stored in flash on the device - * @devlink: pointer to devlink associated with device to update - * @params: flash update parameters - * @extack: netlink extended ACK structure - * - * Perform a device flash update. The bulk of the update logic is contained - * within the ice_flash_pldm_image function. - * - * Returns: zero on success, or an error code on failure. - */ -static int -ice_devlink_flash_update(struct devlink *devlink, - struct devlink_flash_update_params *params, - struct netlink_ext_ack *extack) -{ - struct ice_pf *pf = devlink_priv(devlink); -#ifndef HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW - struct device *dev = &pf->pdev->dev; -#endif - struct ice_hw *hw = &pf->hw; -#ifndef HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW - const struct firmware *fw; -#endif - u8 preservation; - int err; - - if (!params->overwrite_mask) { - /* preserve all settings and identifiers */ - preservation = ICE_AQC_NVM_PRESERVE_ALL; - } else if (params->overwrite_mask == DEVLINK_FLASH_OVERWRITE_SETTINGS) { - /* overwrite settings, but preserve the vital device identifiers */ - preservation = ICE_AQC_NVM_PRESERVE_SELECTED; - } else if (params->overwrite_mask == (DEVLINK_FLASH_OVERWRITE_SETTINGS | - DEVLINK_FLASH_OVERWRITE_IDENTIFIERS)) { - /* overwrite both settings and identifiers, preserve nothing */ - preservation = ICE_AQC_NVM_NO_PRESERVATION; - } else { - NL_SET_ERR_MSG_MOD(extack, "Requested overwrite mask is not supported"); - return -EOPNOTSUPP; - } - - if (!hw->dev_caps.common_cap.nvm_unified_update) { - NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update"); - return -EOPNOTSUPP; - } - - err = ice_check_for_pending_update(pf, NULL, extack); - if (err) - return err; - - devlink_flash_update_status_notify(devlink, "Preparing to flash", NULL, 0, 0); - -#ifndef HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW - err = request_firmware(&fw, params->file_name, dev); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "Unable to read file from disk"); - return err; - } - - err = ice_flash_pldm_image(pf, fw, preservation, extack); - - release_firmware(fw); - - return err; -#else - return ice_flash_pldm_image(pf, params->fw, preservation, extack); -#endif -} - #ifdef HAVE_DEVLINK_FLASH_UPDATE_BEGIN_END_NOTIFY +/** + * ice_devlink_flash_update_notify_compat - Compatibility for begin/end notify + * @devlink: pointer to the devlink instance for this device + * @params: flash update parameters + * @extack: netlink extended ACK message structure + * + * Compatibility wrapper which handles calling + * devlink_flash_update_begin_notify and devlink_flash_update_end_notify when + * the kernel does not do this for us. + */ static int ice_devlink_flash_update_notify_compat(struct devlink *devlink, struct devlink_flash_update_params *params, @@ -645,19 +803,32 @@ ice_devlink_flash_update_notify_compat(struct devlink *devlink, int err; devlink_flash_update_begin_notify(devlink); - err = ice_devlink_flash_update(devlink, params, extack); + err = ice_flash_pldm_image(devlink, params, extack); devlink_flash_update_end_notify(devlink); return err; } -#endif +#endif /* HAVE_DEVLINK_FLASH_UPDATE_BEGIN_END_NOTIFY */ #ifndef HAVE_DEVLINK_FLASH_UPDATE_PARAMS +/** + * ice_devlink_flash_update_params_compat - Compatibility for params argument + * @devlink: pointer to the devlink instance for this device + * @file_name: the file name to request the firmware from + * @component: the flash component to update + * @extack: netlink extended ACK message structure + * + * Compatibility wrapper which handles creating the flash update parameters + * structure for kernels which do not have this structure defined yet. + */ static int ice_devlink_flash_update_params_compat(struct devlink *devlink, const char *file_name, const char *component, struct netlink_ext_ack *extack) { + struct ice_pf *pf = devlink_priv(devlink); struct devlink_flash_update_params params = {}; + struct device *dev = ice_pf_to_dev(pf); + int ret = 0; /* individual component update is not yet supported, and older kernels * did not check this for us. @@ -668,18 +839,129 @@ ice_devlink_flash_update_params_compat(struct devlink *devlink, const char *file params.file_name = file_name; #ifdef HAVE_DEVLINK_FLASH_UPDATE_BEGIN_END_NOTIFY - return ice_devlink_flash_update_notify_compat(devlink, ¶ms, extack); + ret = ice_devlink_flash_update_notify_compat(devlink, ¶ms, extack); + + if (ret) + dev_dbg(dev, "ice_devlink_flash_update_notify_compat() returned %d\n", + ret); #else - return ice_devlink_flash_update(devlink, ¶ms, extack); + ret = ice_flash_pldm_image(devlink, params, extack); + + if (ret) + dev_dbg(dev, "ice_flash_pldm_image() returned %d\n", ret); #endif + return ret; } #endif /* !HAVE_DEVLINK_FLASH_UPDATE_PARAMS */ #endif /* HAVE_DEVLINK_FLASH_UPDATE */ +#ifdef HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT +/** + * ice_devlink_reload_empr_start - Start EMP reset to activate new firmware + * @devlink: pointer to the devlink instance to reload + * @netns_change: if true, the network namespace is changing + * @action: the action to perform. Must be DEVLINK_RELOAD_ACTION_FW_ACTIVATE + * @limit: limits on what reload should do, such as not resetting + * @extack: netlink extended ACK structure + * + * Allow user to activate new Embedded Management Processor firmware by + * issuing device specific EMP reset. Called in response to + * a DEVLINK_CMD_RELOAD with the DEVLINK_RELOAD_ACTION_FW_ACTIVATE. + * + * Note that teardown and rebuild of the driver state happens automatically as + * part of an interrupt and watchdog task. This is because all physical + * functions on the device must be able to reset when an EMP reset occurs from + * any source. + */ +static int +ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_priv(devlink); + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + u8 pending; + int err; + + err = ice_get_pending_updates(pf, &pending, extack); + if (err) + return err; + + /* pending is a bitmask of which flash banks have a pending update, + * including the main NVM bank, the Option ROM bank, and the netlist + * bank. If any of these bits are set, then there is a pending update + * waiting to be activated. + */ + if (!pending) { + NL_SET_ERR_MSG_MOD(extack, "No pending firmware update"); + return -ECANCELED; + } + + if (pf->fw_emp_reset_disabled) { + NL_SET_ERR_MSG_MOD(extack, "EMP reset is not available. To activate firmware, a reboot or power cycle is needed"); + return -ECANCELED; + } + + dev_dbg(dev, "Issuing device EMP reset to activate firmware\n"); + + err = ice_aq_nvm_update_empr(hw); + if (err) { + dev_err(dev, "Failed to trigger EMP device reset to reload firmware, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); + NL_SET_ERR_MSG_MOD(extack, "Failed to trigger EMP device reset to reload firmware"); + return err; + } + + return 0; +} + +/** + * ice_devlink_reload_empr_finish - Wait for EMP reset to finish + * @devlink: pointer to the devlink instance reloading + * @action: the action requested + * @limit: limits imposed by userspace, such as not resetting + * @actions_performed: on return, indicate what actions actually performed + * @extack: netlink extended ACK structure + * + * Wait for driver to finish rebuilding after EMP reset is completed. This + * includes time to wait for both the actual device reset as well as the time + * for the driver's rebuild to complete. + */ +static int +ice_devlink_reload_empr_finish(struct devlink *devlink, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + u32 *actions_performed, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_priv(devlink); + int err; + + *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE); + + /* It can take a while for the device and driver to complete the reset + * and rebuild process. + */ + err = ice_wait_for_reset(pf, 60 * HZ); + if (err) + NL_SET_ERR_MSG_MOD(extack, "Device still resetting after 1 minute"); + + return err; +} +#endif /* HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT */ + static const struct devlink_ops ice_devlink_ops = { #ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK, #endif /* HAVE_DEVLINK_FLASH_UPDATE_PARAMS */ +#ifdef HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT + .reload_actions = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE), + /* The ice driver currently does not support driver reinit */ + .reload_down = ice_devlink_reload_empr_start, + .reload_up = ice_devlink_reload_empr_finish, +#endif .eswitch_mode_get = ice_eswitch_mode_get, .eswitch_mode_set = ice_eswitch_mode_set, #ifdef HAVE_DEVLINK_INFO_GET @@ -691,7 +973,7 @@ static const struct devlink_ops ice_devlink_ops = { #elif defined(HAVE_DEVLINK_FLASH_UPDATE_BEGIN_END_NOTIFY) .flash_update = ice_devlink_flash_update_notify_compat, #else - .flash_update = ice_devlink_flash_update, + .flash_update = ice_flash_pldm_image, #endif #endif /* HAVE_DEVLINK_FLASH_UPDATE */ }; @@ -713,7 +995,7 @@ struct ice_pf *ice_allocate_pf(struct device *dev) { struct devlink *devlink; - devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf)); + devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf), dev); if (!devlink) return NULL; @@ -734,28 +1016,26 @@ struct ice_pf *ice_allocate_pf(struct device *dev) * * Return: zero on success or an error code on failure. */ -int ice_devlink_register(struct ice_pf *pf) +void ice_devlink_register(struct ice_pf *pf) { struct devlink *devlink = priv_to_devlink(pf); - struct device *dev = ice_pf_to_dev(pf); - int err; - err = devlink_register(devlink, dev); - if (err) { - dev_err(dev, "devlink registration failed: %d\n", err); - return err; - } +#ifdef HAVE_DEVLINK_SET_FEATURES + devlink_set_features(devlink, DEVLINK_F_RELOAD); +#endif /* HAVE_DEVLINK_SET_FEATURES */ +#ifdef HAVE_DEVLINK_REGISTER_SETS_DEV + devlink_register(devlink, ice_pf_to_dev(pf)); +#else + devlink_register(devlink); +#endif -#ifdef HAVE_DEVLINK_PARAMS - err = devlink_params_register(devlink, ice_devlink_params, - ARRAY_SIZE(ice_devlink_params)); - if (err) { - dev_err(dev, "devlink params registration failed: %d\n", err); - return err; - } -#endif /* HAVE_DEVLINK_PARAMS */ - - return 0; +#ifdef HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT +#ifndef HAVE_DEVLINK_SET_FEATURES +#ifdef HAVE_DEVLINK_RELOAD_ENABLE_DISABLE + devlink_reload_enable(devlink); +#endif /* HAVE_DEVLINK_RELOAD_ENABLE_DISABLE */ +#endif /* !HAVE_DEVLINK_SET_FEATURES */ +#endif /* HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT */ } /** @@ -768,33 +1048,85 @@ void ice_devlink_unregister(struct ice_pf *pf) { struct devlink *devlink = priv_to_devlink(pf); -#ifdef HAVE_DEVLINK_PARAMS - devlink_params_unregister(devlink, ice_devlink_params, - ARRAY_SIZE(ice_devlink_params)); -#endif /* HAVE_DEVLINK_PARAMS */ +#ifdef HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT +#ifndef HAVE_DEVLINK_SET_FEATURES +#ifdef HAVE_DEVLINK_RELOAD_ENABLE_DISABLE + devlink_reload_disable(devlink); +#endif /* HAVE_DEVLINK_RELOAD_ENABLE_DISABLE */ +#endif /* !HAVE_DEVLINK_SET_FEATURES */ +#endif /* HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT */ + devlink_unregister(devlink); } /** - * ice_devlink_params_publish - Publish parameters to allow user access. - * @pf: the PF structure pointer + * ice_devlink_register_params - Register devlink parameters for this PF + * @pf: the PF structure to register + * + * Registers the parameters associated with this PF. */ -void ice_devlink_params_publish(struct ice_pf __maybe_unused *pf) +int ice_devlink_register_params(struct ice_pf *pf) { #ifdef HAVE_DEVLINK_PARAMS + struct devlink *devlink = priv_to_devlink(pf); + struct device *dev = ice_pf_to_dev(pf); + int err; + + err = devlink_params_register(devlink, ice_devlink_params, + ARRAY_SIZE(ice_devlink_params)); + if (err) { + ice_dev_err_errno(dev, err, + "devlink params registration failed"); + return err; + } + +#ifndef HAVE_DEVLINK_NOTIFY_REGISTER +#ifdef HAVE_DEVLINK_PARAMS_PUBLISH devlink_params_publish(priv_to_devlink(pf)); -#endif +#endif /* HAVE_DEVLINK_PARAMS_PUBLISH */ +#endif /* !HAVE_DEVLINK_NOTIFY_REGISTER */ + +#endif /* HAVE_DEVLINK_PARAMS */ + return 0; } /** - * ice_devlink_params_unpublish - Unpublish parameters to prevent user access. - * @pf: the PF structure pointer + * ice_devlink_unregister_params - Unregister devlink parameters for this PF + * @pf: the PF structure to cleanup + * + * Removes the main devlink parameters associated with this PF. */ -void ice_devlink_params_unpublish(struct ice_pf __maybe_unused *pf) +void ice_devlink_unregister_params(struct ice_pf *pf) { #ifdef HAVE_DEVLINK_PARAMS + struct devlink *devlink = priv_to_devlink(pf); + +#ifndef HAVE_DEVLINK_NOTIFY_REGISTER +#ifdef HAVE_DEVLINK_PARAMS_PUBLISH devlink_params_unpublish(priv_to_devlink(pf)); -#endif +#endif /* HAVE_DEVLINK_PARAMS_PUBLISH */ +#endif /* !HAVE_DEVLINK_NOTIFY_REGISTER */ + + devlink_params_unregister(devlink, ice_devlink_params, + ARRAY_SIZE(ice_devlink_params)); +#endif /* HAVE_DEVLINK_PARAMS */ +} + +/** + * ice_devlink_set_switch_id - Set unical switch id based on pci dsn + * @pf: the PF to create a devlink port for + * @ppid: struct with switch id information + */ +static void +ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid) +{ + struct pci_dev *pdev = pf->pdev; + u64 id; + + id = pci_get_dsn(pdev); + + ppid->id_len = sizeof(id); + put_unaligned_be64(id, &ppid->id); } /** @@ -823,15 +1155,18 @@ int ice_devlink_create_pf_port(struct ice_pf *pf) return -EIO; attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; - attrs.phys.port_number = vsi->port_info->lport; + attrs.phys.port_number = pf->hw.bus.func; + + ice_devlink_set_switch_id(pf, &attrs.switch_id); devlink_port_attrs_set(devlink_port, &attrs); devlink = priv_to_devlink(pf); err = devlink_port_register(devlink, devlink_port, vsi->idx); if (err) { - dev_err(dev, "Failed to create devlink port for PF %d, error %d\n", - pf->hw.pf_id, err); + ice_dev_err_errno(dev, err, + "Failed to create devlink port for PF %d", + pf->hw.pf_id); return err; } @@ -875,20 +1210,26 @@ int ice_devlink_create_vf_port(struct ice_vf *vf) pf = vf->pf; dev = ice_pf_to_dev(pf); - vsi = ice_get_vf_vsi(vf); devlink_port = &vf->devlink_port; + vsi = ice_get_vf_vsi(vf); + if (!vsi) + return -EINVAL; + attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF; attrs.pci_vf.pf = pf->hw.bus.func; attrs.pci_vf.vf = vf->vf_id; + ice_devlink_set_switch_id(pf, &attrs.switch_id); + devlink_port_attrs_set(devlink_port, &attrs); devlink = priv_to_devlink(pf); err = devlink_port_register(devlink, devlink_port, vsi->idx); if (err) { - dev_err(dev, "Failed to create devlink port for VF %d, error %d\n", - vf->vf_id, err); + ice_dev_err_errno(dev, err, + "Failed to create devlink port for VF %d", + vf->vf_id); return err; } @@ -914,18 +1255,25 @@ void ice_devlink_destroy_vf_port(struct ice_vf *vf) #ifdef HAVE_DEVLINK_REGIONS #ifdef HAVE_DEVLINK_REGION_OPS_SNAPSHOT +#define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024) + #ifdef HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS + /** - * ice_devlink_nvm_snapshot - Capture a snapshot of the Shadow RAM contents + * ice_devlink_nvm_snapshot - Capture a snapshot of the NVM flash contents * @devlink: the devlink instance * @ops: the devlink region being snapshotted * @extack: extended ACK response structure * @data: on exit points to snapshot data buffer * * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for - * the shadow-ram devlink region. It captures a snapshot of the shadow ram - * contents. This snapshot can later be viewed via the devlink-region - * interface. + * the nvm-flash devlink region. It captures a snapshot of the full NVM flash + * contents, including both banks of flash. This snapshot can later be viewed + * via the devlink-region interface. + * + * It captures the flash using the FLASH_ONLY bit set when reading via + * firmware, so it does not read the current Shadow RAM contents. For that, + * use the shadow-ram region. * * @returns zero on success, and updates the data pointer. Returns a non-zero * error code on failure. @@ -941,37 +1289,120 @@ ice_devlink_nvm_snapshot(struct devlink *devlink, struct ice_pf *pf = devlink_priv(devlink); struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - enum ice_status status; - u8 *nvm_data; - u32 nvm_size; + u8 *nvm_data, *tmp, i; + u32 nvm_size, left; + s8 num_blks; nvm_size = hw->flash.flash_size; nvm_data = vzalloc(nvm_size); if (!nvm_data) return -ENOMEM; - status = ice_acquire_nvm(hw, ICE_RES_READ); - if (status) { - dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", - status, hw->adminq.sq_last_status); - NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); - vfree(nvm_data); - return -EIO; + num_blks = DIV_ROUND_UP(nvm_size, ICE_DEVLINK_READ_BLK_SIZE); + tmp = nvm_data; + left = nvm_size; + + /* some systems take longer to read the nvm than others which causes the + * fw to reclaim the nvm lock before the entire nvm has been read. fix + * this by breaking the reads of the nvm into smaller chunks that will + * probably not take as long. this has some overhead since we are + * increasing the number of AQ commands, but it should always work + */ + for (i = 0; i < num_blks; i++) { + u32 read_sz = min_t(u32, ICE_DEVLINK_READ_BLK_SIZE, left); + int status; + + status = ice_acquire_nvm(hw, ICE_RES_READ); + if (status) { + dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", + status, hw->adminq.sq_last_status); + NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); + vfree(nvm_data); + return -EIO; + } + + status = ice_read_flat_nvm(hw, i * ICE_DEVLINK_READ_BLK_SIZE, + &read_sz, tmp, false); + if (status) { + dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n", + read_sz, status, hw->adminq.sq_last_status); + NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents"); + ice_release_nvm(hw); + vfree(nvm_data); + return -EIO; + } + + ice_release_nvm(hw); + + tmp += read_sz; + left -= read_sz; } - status = ice_read_flat_nvm(hw, 0, &nvm_size, nvm_data, false); - if (status) { + *data = nvm_data; + + return 0; +} + +#ifdef HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS +/** + * ice_devlink_sram_snapshot - Capture a snapshot of the Shadow RAM contents + * @devlink: the devlink instance + * @ops: the devlink region being snapshotted + * @extack: extended ACK response structure + * @data: on exit points to snapshot data buffer + * + * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for + * the shadow-ram devlink region. It captures a snapshot of the shadow ram + * contents. This snapshot can later be viewed via the devlink-region + * interface. + * + * @returns zero on success, and updates the data pointer. Returns a non-zero + * error code on failure. + */ +#endif /* HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS */ +static int +ice_devlink_sram_snapshot(struct devlink *devlink, +#ifdef HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS + const struct devlink_region_ops __always_unused *ops, +#endif /* HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS */ + struct netlink_ext_ack *extack, u8 **data) +{ + struct ice_pf *pf = devlink_priv(devlink); + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + u8 *sram_data; + u32 sram_size; + int err; + + sram_size = hw->flash.sr_words * 2u; + sram_data = vzalloc(sram_size); + if (!sram_data) + return -ENOMEM; + + err = ice_acquire_nvm(hw, ICE_RES_READ); + if (err) { + dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", + err, hw->adminq.sq_last_status); + NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); + vfree(sram_data); + return err; + } + + /* Read from the Shadow RAM, rather than directly from NVM */ + err = ice_read_flat_nvm(hw, 0, &sram_size, sram_data, true); + if (err) { dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n", - nvm_size, status, hw->adminq.sq_last_status); - NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents"); + sram_size, err, hw->adminq.sq_last_status); + NL_SET_ERR_MSG_MOD(extack, + "Failed to read Shadow RAM contents"); ice_release_nvm(hw); - vfree(nvm_data); - return -EIO; + vfree(sram_data); + return err; } ice_release_nvm(hw); - *data = nvm_data; + *data = sram_data; return 0; } @@ -1002,8 +1433,8 @@ ice_devlink_devcaps_snapshot(struct devlink *devlink, struct ice_pf *pf = devlink_priv(devlink); struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - enum ice_status status; void *devcaps; + int status; devcaps = vzalloc(ICE_AQ_MAX_BUF_LEN); if (!devcaps) @@ -1016,7 +1447,7 @@ ice_devlink_devcaps_snapshot(struct devlink *devlink, status, hw->adminq.sq_last_status); NL_SET_ERR_MSG_MOD(extack, "Failed to read device capabilities"); vfree(devcaps); - return -EIO; + return status; } *data = (u8 *)devcaps; @@ -1033,6 +1464,14 @@ static const struct devlink_region_ops ice_nvm_region_ops = { #endif }; +static const struct devlink_region_ops ice_sram_region_ops = { + .name = "shadow-ram", + .destructor = vfree, +#ifdef HAVE_DEVLINK_REGION_OPS_SNAPSHOT + .snapshot = ice_devlink_sram_snapshot, +#endif +}; + static const struct devlink_region_ops ice_devcaps_region_ops = { .name = "device-caps", .destructor = vfree, @@ -1052,23 +1491,32 @@ void ice_devlink_init_regions(struct ice_pf *pf) { struct devlink *devlink = priv_to_devlink(pf); struct device *dev = ice_pf_to_dev(pf); - u64 nvm_size; + u64 nvm_size, sram_size; nvm_size = pf->hw.flash.flash_size; pf->nvm_region = devlink_region_create(devlink, &ice_nvm_region_ops, 1, nvm_size); if (IS_ERR(pf->nvm_region)) { - dev_err(dev, "failed to create NVM devlink region, err %ld\n", - PTR_ERR(pf->nvm_region)); + ice_dev_err_errno(dev, PTR_ERR(pf->nvm_region), + "failed to create NVM devlink region"); pf->nvm_region = NULL; } + sram_size = pf->hw.flash.sr_words * 2u; + pf->sram_region = devlink_region_create(devlink, &ice_sram_region_ops, + 1, sram_size); + if (IS_ERR(pf->sram_region)) { + dev_err(dev, "failed to create shadow-ram devlink region, err %ld\n", + PTR_ERR(pf->sram_region)); + pf->sram_region = NULL; + } + pf->devcaps_region = devlink_region_create(devlink, &ice_devcaps_region_ops, 10, ICE_AQ_MAX_BUF_LEN); if (IS_ERR(pf->devcaps_region)) { - dev_err(dev, "failed to create device-caps devlink region, err %ld\n", - PTR_ERR(pf->devcaps_region)); + ice_dev_err_errno(dev, PTR_ERR(pf->devcaps_region), + "failed to create device-caps devlink region"); pf->devcaps_region = NULL; } } @@ -1084,7 +1532,706 @@ void ice_devlink_destroy_regions(struct ice_pf *pf) if (pf->nvm_region) devlink_region_destroy(pf->nvm_region); + if (pf->sram_region) + devlink_region_destroy(pf->sram_region); + if (pf->devcaps_region) devlink_region_destroy(pf->devcaps_region); } #endif /* HAVE_DEVLINK_REGIONS */ + +#ifdef HAVE_DEVLINK_HEALTH + +#define ICE_MDD_SRC_TO_STR(_src) \ + ((_src) == ICE_MDD_SRC_NONE ? "none" \ + : (_src) == ICE_MDD_SRC_TX_PQM ? "tx_pqm" \ + : (_src) == ICE_MDD_SRC_TX_TCLAN ? "tx_tclan" \ + : (_src) == ICE_MDD_SRC_TX_TDPU ? "tx_tdpu" \ + : (_src) == ICE_MDD_SRC_RX ? "rx" \ + : "invalid") + +static int +#ifndef HAVE_DEVLINK_HEALTH_OPS_EXTACK +ice_mdd_reporter_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *priv_ctx) +#else +ice_mdd_reporter_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *priv_ctx, + struct netlink_ext_ack __always_unused *extack) +#endif /* HAVE_DEVLINK_HEALTH_OPS_EXTACK */ +{ + struct ice_pf *pf = devlink_health_reporter_priv(reporter); + struct ice_mdd_reporter *mdd_reporter = &pf->mdd_reporter; + struct ice_mdd_event *mdd_event; + int err; + + err = devlink_fmsg_u32_pair_put(fmsg, "count", + mdd_reporter->count); + if (err) + return err; + + list_for_each_entry(mdd_event, &mdd_reporter->event_list, list) { + char *src; + + err = devlink_fmsg_obj_nest_start(fmsg); + if (err) + return err; + + src = ICE_MDD_SRC_TO_STR(mdd_event->src); + + err = devlink_fmsg_string_pair_put(fmsg, "src", src); + if (err) + return err; + + err = devlink_fmsg_u8_pair_put(fmsg, "pf_num", + mdd_event->pf_num); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "mdd_vf_num", + mdd_event->vf_num); + if (err) + return err; + + err = devlink_fmsg_u8_pair_put(fmsg, "mdd_event", + mdd_event->event); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "mdd_queue", + mdd_event->queue); + if (err) + return err; + + err = devlink_fmsg_obj_nest_end(fmsg); + if (err) + return err; + } + + return 0; +} + +static const struct devlink_health_reporter_ops ice_mdd_reporter_ops = { + .name = "mdd", + .dump = ice_mdd_reporter_dump, +}; + +/** + * ice_devlink_init_mdd_reporter - Initialize MDD devlink health reporter + * @pf: the PF device structure + * + * Create devlink health reporter used to handle MDD events. + */ +void ice_devlink_init_mdd_reporter(struct ice_pf *pf) +{ + struct devlink *devlink = priv_to_devlink(pf); + struct device *dev = ice_pf_to_dev(pf); + + INIT_LIST_HEAD(&pf->mdd_reporter.event_list); + + pf->mdd_reporter.reporter = + devlink_health_reporter_create(devlink, + &ice_mdd_reporter_ops, + 0, /* graceful period */ +#ifndef HAVE_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER + false, /* auto recover */ +#endif /* HAVE_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER */ + pf); /* private data */ + + if (IS_ERR(pf->mdd_reporter.reporter)) { + ice_dev_err_errno(dev, PTR_ERR(pf->mdd_reporter.reporter), + "failed to create devlink MDD health reporter"); + } +} + +/** + * ice_devlink_destroy_mdd_reporter - Destroy MDD devlink health reporter + * @pf: the PF device structure + * + * Remove previously created MDD health reporter for this PF. + */ +void ice_devlink_destroy_mdd_reporter(struct ice_pf *pf) +{ + if (pf->mdd_reporter.reporter) + devlink_health_reporter_destroy(pf->mdd_reporter.reporter); +} + +/** + * ice_devlink_report_mdd_event - Report an MDD event through devlink health + * @pf: the PF device structure + * @src: the HW block that was the source of this MDD event + * @pf_num: the pf_num on which the MDD event occurred + * @vf_num: the vf_num on which the MDD event occurred + * @event: the event type of the MDD event + * @queue: the queue on which the MDD event occurred + * + * Report an MDD event that has occurred on this PF. + */ +void +ice_devlink_report_mdd_event(struct ice_pf *pf, enum ice_mdd_src src, + u8 pf_num, u16 vf_num, u8 event, u16 queue) +{ + struct ice_mdd_reporter *mdd_reporter = &pf->mdd_reporter; + struct ice_mdd_event *mdd_event; + int err; + + if (!mdd_reporter->reporter) + return; + + mdd_reporter->count++; + + mdd_event = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*mdd_event), + GFP_KERNEL); + if (!mdd_event) + return; + + mdd_event->src = src; + mdd_event->pf_num = pf_num; + mdd_event->vf_num = vf_num; + mdd_event->event = event; + mdd_event->queue = queue; + + list_add_tail(&mdd_event->list, &mdd_reporter->event_list); + + err = devlink_health_report(mdd_reporter->reporter, + "Malicious Driver Detection event\n", + pf); + if (err) + dev_err(ice_pf_to_dev(pf), + "failed to report MDD via devlink health\n"); +} + +/** + * ice_devlink_clear_after_reset - clear devlink health issues after a reset + * @pf: the PF device structure + * + * Mark the PF in healthy state again after a reset has completed. + */ +void ice_devlink_clear_after_reset(struct ice_pf *pf) +{ + struct ice_mdd_reporter *mdd_reporter = &pf->mdd_reporter; + enum devlink_health_reporter_state new_state = + DEVLINK_HEALTH_REPORTER_STATE_HEALTHY; + struct ice_mdd_event *mdd_event, *tmp; + + if (!mdd_reporter->reporter) + return; + + devlink_health_reporter_state_update(mdd_reporter->reporter, + new_state); + pf->mdd_reporter.count = 0; + + list_for_each_entry_safe(mdd_event, tmp, &mdd_reporter->event_list, + list) { + list_del(&mdd_event->list); + } +} + +#endif /* HAVE_DEVLINK_HEALTH */ + +#ifdef HAVE_DEVLINK_PARAMS +#define ICE_DEVLINK_PARAM_ID_TC1_INLINE_FD 101 +#define ICE_DEVLINK_PARAM_ID_TC2_INLINE_FD 102 +#define ICE_DEVLINK_PARAM_ID_TC3_INLINE_FD 103 +#define ICE_DEVLINK_PARAM_ID_TC4_INLINE_FD 104 +#define ICE_DEVLINK_PARAM_ID_TC5_INLINE_FD 105 +#define ICE_DEVLINK_PARAM_ID_TC6_INLINE_FD 106 +#define ICE_DEVLINK_PARAM_ID_TC7_INLINE_FD 107 +#define ICE_DEVLINK_PARAM_ID_TC8_INLINE_FD 108 +#define ICE_DEVLINK_PARAM_ID_TC9_INLINE_FD 109 +#define ICE_DEVLINK_PARAM_ID_TC10_INLINE_FD 110 +#define ICE_DEVLINK_PARAM_ID_TC11_INLINE_FD 111 +#define ICE_DEVLINK_PARAM_ID_TC12_INLINE_FD 112 +#define ICE_DEVLINK_PARAM_ID_TC13_INLINE_FD 113 +#define ICE_DEVLINK_PARAM_ID_TC14_INLINE_FD 114 +#define ICE_DEVLINK_PARAM_ID_TC15_INLINE_FD 115 + +#define ICE_DL_PARAM_ID_TC_QPS_PER_POLLER(num) (120 + (num)) +#define ICE_DL_PARAM_ID_TC_POLLER_TIMEOUT(num) (140 + (num)) + +/** + * ice_validate_tc_params_id - Validate devlink tc param id + * @id: the parameter ID to validate + * @start_id: start param id + * @num_params: number of valid params + * + * Returns: zero on success, or an error code on failure. + */ +static int +ice_validate_tc_params_id(u32 id, u32 start_id, u8 num_params) +{ + if (id < start_id || id >= start_id + num_params) + return -EINVAL; + + return 0; +} + +/** + * ice_get_tc_param_ch_vsi - Return channel vsi associated with + * tc param id + * @pf: pointer to PF instance + * @id: the parameter ID to validate + * @start_id: start param id + * + * Returns: ch_vsi on success, or NULL on failure. + */ +static struct ice_vsi * +ice_get_tc_param_ch_vsi(struct ice_pf *pf, u32 id, u32 start_id) +{ + struct ice_vsi *vsi = ice_get_main_vsi(pf); + struct ice_vsi *ch_vsi; + + if (ice_validate_tc_params_id(id, start_id, vsi->num_tc_devlink_params)) + return NULL; + + ch_vsi = vsi->tc_map_vsi[id - start_id + 1]; + if (!ch_vsi || !ch_vsi->ch) + return NULL; + + return ch_vsi; +} + +/** + * ice_devlink_tc_inline_fd_get - Get poller timeout value + * @devlink: pointer to the devlink instance + * @id: the parameter ID to get + * @ctx: context to return the parameter value + * + * Returns: zero on success, or an error code on failure. + */ +static int +ice_devlink_tc_inline_fd_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct ice_pf *pf = devlink_priv(devlink); + struct ice_vsi *vsi = pf->vsi[0]; + struct ice_vsi *ch_vsi; + int err = 0; + + err = ice_validate_tc_params_id(id, ICE_DEVLINK_PARAM_ID_TC1_INLINE_FD, + vsi->num_tc_devlink_params); + if (err) + return err; + + ch_vsi = vsi->tc_map_vsi[id - ICE_DEVLINK_PARAM_ID_TC1_INLINE_FD + 1]; + if (!ch_vsi || !ch_vsi->ch) + return -EINVAL; + + ctx->val.vbool = ch_vsi->ch->inline_fd; + + return 0; +} + +/** + * ice_devlink_tc_inline_fd_validate - Validate inline_fd setting + * @devlink: pointer to the devlink instance + * @id: the parameter ID to validate + * @val: value to be validated + * @extack: netlink extended ACK structure + * + * Validate inline fd + * Returns: zero on success, or an error code on failure and extack with a + * reason for failure. + */ +static int +ice_devlink_tc_inline_fd_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_priv(devlink); + struct ice_vsi *vsi = pf->vsi[0]; + struct ice_vsi *ch_vsi; + int err = 0; + + err = ice_validate_tc_params_id(id, ICE_DEVLINK_PARAM_ID_TC1_INLINE_FD, + vsi->num_tc_devlink_params); + if (err) + return err; + + ch_vsi = vsi->tc_map_vsi[id - ICE_DEVLINK_PARAM_ID_TC1_INLINE_FD + 1]; + if (!ch_vsi || !ch_vsi->ch) + return -EINVAL; + + return 0; +} + +/** + * ice_devlink_tc_inline_fd_set - Enable/Disable inline flow director + * @devlink: pointer to the devlink instance + * @id: the parameter ID to set + * @ctx: context to return the parameter value + * + * Returns: zero on success, or an error code on failure. + */ +static int +ice_devlink_tc_inline_fd_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct ice_pf *pf = devlink_priv(devlink); + struct ice_vsi *vsi = pf->vsi[0]; + struct ice_vsi *ch_vsi; + + ch_vsi = vsi->tc_map_vsi[id - ICE_DEVLINK_PARAM_ID_TC1_INLINE_FD + 1]; + ch_vsi->ch->inline_fd = ctx->val.vbool; + + return 0; +} + +/** + * ice_devlink_tc_qps_per_poller_get - Get the current number of qps per + * poller for a tc. + * @devlink: pointer to the devlink instance + * @id: the parameter ID to get + * @ctx: context to return the parameter value + * + * Returns: zero on success, or an error code on failure. + */ +static int +ice_devlink_tc_qps_per_poller_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct ice_pf *pf = devlink_priv(devlink); + struct ice_vsi *ch_vsi; + + ch_vsi = ice_get_tc_param_ch_vsi(pf, id, + ICE_DL_PARAM_ID_TC_QPS_PER_POLLER(1)); + if (!ch_vsi) + return -EINVAL; + + ctx->val.vu8 = ch_vsi->ch->qps_per_poller; + + return 0; +} + +/** + * ice_devlink_tc_qps_per_poller_validate - Validate the number of qps + * per poller. + * @devlink: pointer to the devlink instance + * @id: the parameter ID to validate + * @val: value to be validated + * @extack: netlink extended ACK structure + * + * Check that the value passed is less than the max queues in the TC + * Returns: zero on success, or an error code on failure and extack with a + * reason for failure. + */ +static int +ice_devlink_tc_qps_per_poller_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_priv(devlink); + struct ice_vsi *ch_vsi; + + ch_vsi = ice_get_tc_param_ch_vsi(pf, id, + ICE_DL_PARAM_ID_TC_QPS_PER_POLLER(1)); + if (!ch_vsi) + return -EINVAL; + + if (val.vu8 > ch_vsi->ch->num_rxq) { + NL_SET_ERR_MSG_MOD(extack, + "Value cannot be greater than number of queues in TC"); + return -EINVAL; + } + + if (ice_is_xdp_ena_vsi(ice_get_main_vsi(pf))) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot change qps_per_poller when xdp is enabled"); + return -EINVAL; + } + + return 0; +} + +/** + * ice_devlink_tc_qps_per_poller_set - Set the number of qps per poller + * @devlink: pointer to the devlink instance + * @id: the parameter ID to set + * @ctx: context to return the parameter value + * + * Returns: zero on success, or an error code on failure. + */ +static int +ice_devlink_tc_qps_per_poller_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct ice_pf *pf = devlink_priv(devlink); + struct ice_vsi *ch_vsi; + + ch_vsi = ice_get_tc_param_ch_vsi(pf, id, + ICE_DL_PARAM_ID_TC_QPS_PER_POLLER(1)); + if (!ch_vsi) + return -EINVAL; + + ch_vsi->ch->qps_per_poller = ctx->val.vu8; + + ice_ch_vsi_update_ring_vecs(ice_get_main_vsi(pf)); + return 0; +} + +/** + * ice_devlink_tc_poller_timeout_get - Get poller timeout value + * @devlink: pointer to the devlink instance + * @id: the parameter ID to get + * @ctx: context to return the parameter value + * + * Returns: zero on success, or an error code on failure. + */ +static int +ice_devlink_tc_poller_timeout_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct ice_pf *pf = devlink_priv(devlink); + struct ice_vsi *ch_vsi; + + ch_vsi = ice_get_tc_param_ch_vsi(pf, id, + ICE_DL_PARAM_ID_TC_POLLER_TIMEOUT(1)); + if (!ch_vsi) + return -EINVAL; + + ctx->val.vu32 = ch_vsi->ch->poller_timeout; + + return 0; +} + +#define MAX_POLLER_TIMEOUT 10000 + +/** + * ice_devlink_tc_poller_timeout_validate - Validate the poller timeout + * @devlink: pointer to the devlink instance + * @id: the parameter ID to validate + * @val: value to be validated + * @extack: netlink extended ACK structure + * + * Validate poller timeout value + * Returns: zero on success, or an error code on failure and extack with a + * reason for failure. + */ +static int +ice_devlink_tc_poller_timeout_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_priv(devlink); + struct ice_vsi *ch_vsi; + + ch_vsi = ice_get_tc_param_ch_vsi(pf, id, + ICE_DL_PARAM_ID_TC_POLLER_TIMEOUT(1)); + if (!ch_vsi) + return -EINVAL; + + if (val.vu32 > MAX_POLLER_TIMEOUT) { + NL_SET_ERR_MSG_MOD(extack, + "Value cannot be greater than 10000 jiffies"); + return -EINVAL; + } + + return 0; +} + +/** + * ice_devlink_tc_poller_timeout_set - Set the poller timeout + * @devlink: pointer to the devlink instance + * @id: the parameter ID to set + * @ctx: context to return the parameter value + * + * Returns: zero on success, or an error code on failure. + */ +static int +ice_devlink_tc_poller_timeout_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct ice_pf *pf = devlink_priv(devlink); + struct ice_vsi *ch_vsi; + + ch_vsi = ice_get_tc_param_ch_vsi(pf, id, + ICE_DL_PARAM_ID_TC_POLLER_TIMEOUT(1)); + if (!ch_vsi) + return -EINVAL; + + ch_vsi->ch->poller_timeout = ctx->val.vu32; + + return 0; +} + +#define ICE_DEVLINK_TC_INLINE_FD_PARAM(_id, _name) \ + DEVLINK_PARAM_DRIVER(_id, _name, DEVLINK_PARAM_TYPE_BOOL, \ + BIT(DEVLINK_PARAM_CMODE_RUNTIME), \ + ice_devlink_tc_inline_fd_get, \ + ice_devlink_tc_inline_fd_set, \ + ice_devlink_tc_inline_fd_validate) \ + +#define ICE_DL_TC_QPS_PER_POLLER_PARAM(_id, _name) \ + DEVLINK_PARAM_DRIVER(_id, _name, DEVLINK_PARAM_TYPE_U8, \ + BIT(DEVLINK_PARAM_CMODE_RUNTIME), \ + ice_devlink_tc_qps_per_poller_get, \ + ice_devlink_tc_qps_per_poller_set, \ + ice_devlink_tc_qps_per_poller_validate) \ + +#define ICE_DL_TC_POLLER_TIMEOUT_PARAM(_id, _name) \ + DEVLINK_PARAM_DRIVER(_id, _name, DEVLINK_PARAM_TYPE_U32, \ + BIT(DEVLINK_PARAM_CMODE_RUNTIME), \ + ice_devlink_tc_poller_timeout_get, \ + ice_devlink_tc_poller_timeout_set, \ + ice_devlink_tc_poller_timeout_validate) \ + +static const struct devlink_param ice_devlink_inline_fd_params[] = { + ICE_DEVLINK_TC_INLINE_FD_PARAM(ICE_DEVLINK_PARAM_ID_TC1_INLINE_FD, + "tc1_inline_fd"), + ICE_DEVLINK_TC_INLINE_FD_PARAM(ICE_DEVLINK_PARAM_ID_TC2_INLINE_FD, + "tc2_inline_fd"), + ICE_DEVLINK_TC_INLINE_FD_PARAM(ICE_DEVLINK_PARAM_ID_TC3_INLINE_FD, + "tc3_inline_fd"), + ICE_DEVLINK_TC_INLINE_FD_PARAM(ICE_DEVLINK_PARAM_ID_TC4_INLINE_FD, + "tc4_inline_fd"), + ICE_DEVLINK_TC_INLINE_FD_PARAM(ICE_DEVLINK_PARAM_ID_TC5_INLINE_FD, + "tc5_inline_fd"), + ICE_DEVLINK_TC_INLINE_FD_PARAM(ICE_DEVLINK_PARAM_ID_TC6_INLINE_FD, + "tc6_inline_fd"), + ICE_DEVLINK_TC_INLINE_FD_PARAM(ICE_DEVLINK_PARAM_ID_TC7_INLINE_FD, + "tc7_inline_fd"), + ICE_DEVLINK_TC_INLINE_FD_PARAM(ICE_DEVLINK_PARAM_ID_TC8_INLINE_FD, + "tc8_inline_fd"), + ICE_DEVLINK_TC_INLINE_FD_PARAM(ICE_DEVLINK_PARAM_ID_TC9_INLINE_FD, + "tc9_inline_fd"), + ICE_DEVLINK_TC_INLINE_FD_PARAM(ICE_DEVLINK_PARAM_ID_TC10_INLINE_FD, + "tc10_inline_fd"), + ICE_DEVLINK_TC_INLINE_FD_PARAM(ICE_DEVLINK_PARAM_ID_TC11_INLINE_FD, + "tc11_inline_fd"), + ICE_DEVLINK_TC_INLINE_FD_PARAM(ICE_DEVLINK_PARAM_ID_TC12_INLINE_FD, + "tc12_inline_fd"), + ICE_DEVLINK_TC_INLINE_FD_PARAM(ICE_DEVLINK_PARAM_ID_TC13_INLINE_FD, + "tc13_inline_fd"), + ICE_DEVLINK_TC_INLINE_FD_PARAM(ICE_DEVLINK_PARAM_ID_TC14_INLINE_FD, + "tc14_inline_fd"), + ICE_DEVLINK_TC_INLINE_FD_PARAM(ICE_DEVLINK_PARAM_ID_TC15_INLINE_FD, + "tc15_inline_fd"), +}; + +static const struct devlink_param ice_devlink_qps_per_poller_params[] = { + ICE_DL_TC_QPS_PER_POLLER_PARAM(ICE_DL_PARAM_ID_TC_QPS_PER_POLLER(1), + "tc1_qps_per_poller"), + ICE_DL_TC_QPS_PER_POLLER_PARAM(ICE_DL_PARAM_ID_TC_QPS_PER_POLLER(2), + "tc2_qps_per_poller"), + ICE_DL_TC_QPS_PER_POLLER_PARAM(ICE_DL_PARAM_ID_TC_QPS_PER_POLLER(3), + "tc3_qps_per_poller"), + ICE_DL_TC_QPS_PER_POLLER_PARAM(ICE_DL_PARAM_ID_TC_QPS_PER_POLLER(4), + "tc4_qps_per_poller"), + ICE_DL_TC_QPS_PER_POLLER_PARAM(ICE_DL_PARAM_ID_TC_QPS_PER_POLLER(5), + "tc5_qps_per_poller"), + ICE_DL_TC_QPS_PER_POLLER_PARAM(ICE_DL_PARAM_ID_TC_QPS_PER_POLLER(6), + "tc6_qps_per_poller"), + ICE_DL_TC_QPS_PER_POLLER_PARAM(ICE_DL_PARAM_ID_TC_QPS_PER_POLLER(7), + "tc7_qps_per_poller"), + ICE_DL_TC_QPS_PER_POLLER_PARAM(ICE_DL_PARAM_ID_TC_QPS_PER_POLLER(8), + "tc8_qps_per_poller"), + ICE_DL_TC_QPS_PER_POLLER_PARAM(ICE_DL_PARAM_ID_TC_QPS_PER_POLLER(9), + "tc9_qps_per_poller"), + ICE_DL_TC_QPS_PER_POLLER_PARAM(ICE_DL_PARAM_ID_TC_QPS_PER_POLLER(10), + "tc10_qps_per_poller"), + ICE_DL_TC_QPS_PER_POLLER_PARAM(ICE_DL_PARAM_ID_TC_QPS_PER_POLLER(11), + "tc11_qps_per_poller"), + ICE_DL_TC_QPS_PER_POLLER_PARAM(ICE_DL_PARAM_ID_TC_QPS_PER_POLLER(12), + "tc12_qps_per_poller"), + ICE_DL_TC_QPS_PER_POLLER_PARAM(ICE_DL_PARAM_ID_TC_QPS_PER_POLLER(13), + "tc13_qps_per_poller"), + ICE_DL_TC_QPS_PER_POLLER_PARAM(ICE_DL_PARAM_ID_TC_QPS_PER_POLLER(14), + "tc14_qps_per_poller"), + ICE_DL_TC_QPS_PER_POLLER_PARAM(ICE_DL_PARAM_ID_TC_QPS_PER_POLLER(15), + "tc15_qps_per_poller"), +}; + +static const struct devlink_param ice_devlink_poller_timeout_params[] = { + ICE_DL_TC_POLLER_TIMEOUT_PARAM(ICE_DL_PARAM_ID_TC_POLLER_TIMEOUT(1), + "tc1_poller_timeout"), + ICE_DL_TC_POLLER_TIMEOUT_PARAM(ICE_DL_PARAM_ID_TC_POLLER_TIMEOUT(2), + "tc2_poller_timeout"), + ICE_DL_TC_POLLER_TIMEOUT_PARAM(ICE_DL_PARAM_ID_TC_POLLER_TIMEOUT(3), + "tc3_poller_timeout"), + ICE_DL_TC_POLLER_TIMEOUT_PARAM(ICE_DL_PARAM_ID_TC_POLLER_TIMEOUT(4), + "tc4_poller_timeout"), + ICE_DL_TC_POLLER_TIMEOUT_PARAM(ICE_DL_PARAM_ID_TC_POLLER_TIMEOUT(5), + "tc5_poller_timeout"), + ICE_DL_TC_POLLER_TIMEOUT_PARAM(ICE_DL_PARAM_ID_TC_POLLER_TIMEOUT(6), + "tc6_poller_timeout"), + ICE_DL_TC_POLLER_TIMEOUT_PARAM(ICE_DL_PARAM_ID_TC_POLLER_TIMEOUT(7), + "tc7_poller_timeout"), + ICE_DL_TC_POLLER_TIMEOUT_PARAM(ICE_DL_PARAM_ID_TC_POLLER_TIMEOUT(8), + "tc8_poller_timeout"), + ICE_DL_TC_POLLER_TIMEOUT_PARAM(ICE_DL_PARAM_ID_TC_POLLER_TIMEOUT(9), + "tc9_poller_timeout"), + ICE_DL_TC_POLLER_TIMEOUT_PARAM(ICE_DL_PARAM_ID_TC_POLLER_TIMEOUT(10), + "tc10_poller_timeout"), + ICE_DL_TC_POLLER_TIMEOUT_PARAM(ICE_DL_PARAM_ID_TC_POLLER_TIMEOUT(11), + "tc11_poller_timeout"), + ICE_DL_TC_POLLER_TIMEOUT_PARAM(ICE_DL_PARAM_ID_TC_POLLER_TIMEOUT(12), + "tc12_poller_timeout"), + ICE_DL_TC_POLLER_TIMEOUT_PARAM(ICE_DL_PARAM_ID_TC_POLLER_TIMEOUT(13), + "tc13_poller_timeout"), + ICE_DL_TC_POLLER_TIMEOUT_PARAM(ICE_DL_PARAM_ID_TC_POLLER_TIMEOUT(14), + "tc14_poller_timeout"), + ICE_DL_TC_POLLER_TIMEOUT_PARAM(ICE_DL_PARAM_ID_TC_POLLER_TIMEOUT(15), + "tc15_poller_timeout"), +}; + +int ice_devlink_tc_params_register(struct ice_vsi *vsi) +{ + struct devlink *devlink = priv_to_devlink(vsi->back); + struct device *dev = ice_pf_to_dev(vsi->back); + int err = 0; + + if (vsi->all_numtc > 1) { + vsi->num_tc_devlink_params = vsi->all_numtc - 1; + err = devlink_params_register(devlink, + ice_devlink_inline_fd_params, + vsi->num_tc_devlink_params); + if (err) { + ice_dev_err_errno(dev, err, + "devlink inline_fd params registration failed"); + return err; + } + + err = devlink_params_register(devlink, + ice_devlink_qps_per_poller_params, + vsi->num_tc_devlink_params); + if (err) { + ice_dev_err_errno(dev, err, + "devlink qps_per_poller params registration failed"); + return err; + } + + err = devlink_params_register(devlink, + ice_devlink_poller_timeout_params, + vsi->num_tc_devlink_params); + if (err) { + ice_dev_err_errno(dev, err, + "devlink poller_timeout params registration failed"); + return err; + } +#ifndef HAVE_DEVLINK_NOTIFY_REGISTER +#ifdef HAVE_DEVLINK_PARAMS_PUBLISH + devlink_params_publish(devlink); +#endif /* HAVE_DEVLINK_PARAMS_PUBLISH */ +#endif /* !HAVE_DEVLINK_NOTIFY_REGISTER */ + } + + return err; +} + +void ice_devlink_tc_params_unregister(struct ice_vsi *vsi) +{ + struct devlink *devlink = priv_to_devlink(vsi->back); + + if (vsi->num_tc_devlink_params) { + devlink_params_unregister(devlink, ice_devlink_inline_fd_params, + vsi->num_tc_devlink_params); + devlink_params_unregister(devlink, + ice_devlink_qps_per_poller_params, + vsi->num_tc_devlink_params); + devlink_params_unregister(devlink, + ice_devlink_poller_timeout_params, + vsi->num_tc_devlink_params); + vsi->num_tc_devlink_params = 0; + } +} +#endif /* HAVE_DEVLINK_PARAMS */ diff --git a/drivers/thirdparty/ice/ice_devlink.h b/drivers/thirdparty/ice/ice_devlink.h index 33464adf4ee6..dd9016e738f2 100644 --- a/drivers/thirdparty/ice/ice_devlink.h +++ b/drivers/thirdparty/ice/ice_devlink.h @@ -7,10 +7,10 @@ #if IS_ENABLED(CONFIG_NET_DEVLINK) struct ice_pf *ice_allocate_pf(struct device *dev); -int ice_devlink_register(struct ice_pf *pf); +void ice_devlink_register(struct ice_pf *pf); void ice_devlink_unregister(struct ice_pf *pf); -void ice_devlink_params_publish(struct ice_pf *pf); -void ice_devlink_params_unpublish(struct ice_pf *pf); +int ice_devlink_register_params(struct ice_pf *pf); +void ice_devlink_unregister_params(struct ice_pf *pf); int ice_devlink_create_pf_port(struct ice_pf *pf); void ice_devlink_destroy_pf_port(struct ice_pf *pf); #ifdef HAVE_DEVLINK_PORT_ATTR_PCI_VF @@ -23,10 +23,10 @@ static inline struct ice_pf *ice_allocate_pf(struct device *dev) return devm_kzalloc(dev, sizeof(struct ice_pf), GFP_KERNEL); } -static inline int ice_devlink_register(struct ice_pf *pf) { return 0; } +static inline void ice_devlink_register(struct ice_pf *pf) { } static inline void ice_devlink_unregister(struct ice_pf *pf) { } -static inline void ice_devlink_params_publish(struct ice_pf *pf) { } -static inline void ice_devlink_params_unpublish(struct ice_pf *pf) { } +static inline int ice_devlink_register_params(struct ice_pf *pf) { return 0; } +static inline void ice_devlink_unregister_params(struct ice_pf *pf) { } static inline int ice_devlink_create_pf_port(struct ice_pf *pf) { return 0; } static inline void ice_devlink_destroy_pf_port(struct ice_pf *pf) { } #ifdef HAVE_DEVLINK_PORT_ATTR_PCI_VF @@ -43,4 +43,15 @@ static inline void ice_devlink_init_regions(struct ice_pf *pf) { } static inline void ice_devlink_destroy_regions(struct ice_pf *pf) { } #endif +int ice_devlink_tc_params_register(struct ice_vsi *vsi); +void ice_devlink_tc_params_unregister(struct ice_vsi *vsi); + +#ifdef HAVE_DEVLINK_HEALTH +void ice_devlink_init_mdd_reporter(struct ice_pf *pf); +void ice_devlink_destroy_mdd_reporter(struct ice_pf *pf); +void ice_devlink_report_mdd_event(struct ice_pf *pf, enum ice_mdd_src src, + u8 pf_num, u16 vf_num, u8 event, u16 queue); +void ice_devlink_clear_after_reset(struct ice_pf *pf); +#endif /* HAVE_DEVLINK_HEALTH */ + #endif /* _ICE_DEVLINK_H_ */ diff --git a/drivers/thirdparty/ice/ice_eswitch.c b/drivers/thirdparty/ice/ice_eswitch.c index d520934b420f..06fce565d775 100644 --- a/drivers/thirdparty/ice/ice_eswitch.c +++ b/drivers/thirdparty/ice/ice_eswitch.c @@ -11,6 +11,102 @@ #include "ice_pf_vsi_vlan_ops.h" #include "ice_tc_lib.h" +/** + * ice_eswitch_add_vf_mac_rule - add adv rule with VF's MAC + * @pf: pointer to PF struct + * @vf: pointer to VF struct + * @mac: VF's MAC address + * + * This function adds advanced rule that forwards packets with + * VF's MAC address (src MAC) to the corresponding switchdev ctrl VSI queue. + */ +int +ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac) +{ + struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + struct ice_adv_rule_info rule_info = {0}; + struct ice_adv_lkup_elem *list; + struct ice_hw *hw = &pf->hw; + const u16 lkups_cnt = 1; + int err; + + list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); + if (!list) + return -ENOMEM; + + list[0].type = ICE_MAC_OFOS; + ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac); + eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr); + + rule_info.sw_act.flag |= ICE_FLTR_TX; + rule_info.sw_act.vsi_handle = ctrl_vsi->idx; + rule_info.sw_act.fltr_act = ICE_FWD_TO_Q; + rule_info.rx = false; + rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id + + ctrl_vsi->rxq_map[vf->vf_id]; + rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE; + rule_info.flags_info.act_valid = true; + rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN; + rule_info.add_dir_lkup = false; + + err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, + vf->repr->mac_rule); + if (err) + dev_err(ice_pf_to_dev(pf), "Unable to add VF mac rule in switchdev mode for VF %d", + vf->vf_id); + else + vf->repr->rule_added = true; + + kfree(list); + return err; +} + +/** + * ice_eswitch_replay_vf_mac_rule - replay adv rule with VF's MAC + * @vf: pointer to vF struct + * + * This function replays VF's MAC rule after reset. + */ +void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf) +{ + int err; + + if (!ice_is_switchdev_running(vf->pf)) + return; + + if (is_valid_ether_addr(vf->hw_lan_addr.addr)) { + err = ice_eswitch_add_vf_mac_rule(vf->pf, vf, + vf->hw_lan_addr.addr); + if (err) { + dev_err(ice_pf_to_dev(vf->pf), "Failed to add MAC %pM for VF %d\n, error %d\n", + vf->hw_lan_addr.addr, vf->vf_id, err); + return; + } + vf->num_mac++; + + ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr); + } +} + +/** + * ice_eswitch_del_vf_mac_rule - delete adv rule with VF's MAC + * @vf: pointer to the VF struct + * + * Delete the advanced rule that was used to forward packets with the VF's MAC + * address (src MAC) to the corresponding switchdev ctrl VSI queue. + */ +void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf) +{ + if (!ice_is_switchdev_running(vf->pf)) + return; + + if (!vf->repr->rule_added) + return; + + ice_rem_adv_rule_by_id(&vf->pf->hw, vf->repr->mac_rule); + vf->repr->rule_added = false; +} + /** * ice_eswitch_setup_env - configure switchdev HW filters * @pf: pointer to PF struct @@ -21,8 +117,8 @@ static int ice_eswitch_setup_env(struct ice_pf *pf) { struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; + struct net_device *uplink_netdev = uplink_vsi->netdev; struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; - struct ice_port_info *pi = pf->hw.port_info; struct ice_vsi_vlan_ops *vlan_ops; bool rule_added = false; @@ -33,40 +129,33 @@ static int ice_eswitch_setup_env(struct ice_pf *pf) ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx); + netif_addr_lock_bh(uplink_netdev); + __dev_uc_unsync(uplink_netdev, NULL); + __dev_mc_unsync(uplink_netdev, NULL); + netif_addr_unlock_bh(uplink_netdev); + if (ice_vsi_add_vlan_zero(uplink_vsi)) goto err_def_rx; - if (!ice_is_dflt_vsi_in_use(uplink_vsi->vsw)) { - if (ice_set_dflt_vsi(uplink_vsi->vsw, uplink_vsi)) + if (!ice_is_vsi_dflt_vsi(uplink_vsi)) { + if (ice_set_dflt_vsi(uplink_vsi)) goto err_def_rx; rule_added = true; } - if (ice_cfg_dflt_vsi(pi, ctrl_vsi->idx, true, ICE_FLTR_TX)) - goto err_def_tx; - if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override)) goto err_override_uplink; if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override)) goto err_override_control; - if (ice_fltr_update_flags_dflt_rule(ctrl_vsi, pi->dflt_tx_vsi_rule_id, - ICE_FLTR_TX, - ICE_SINGLE_ACT_LB_ENABLE)) - goto err_update_action; - return 0; -err_update_action: - ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); err_override_control: ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); err_override_uplink: - ice_cfg_dflt_vsi(pi, ctrl_vsi->idx, false, ICE_FLTR_TX); -err_def_tx: if (rule_added) - ice_clear_dflt_vsi(uplink_vsi->vsw); + ice_clear_dflt_vsi(uplink_vsi); err_def_rx: ice_fltr_add_mac_and_broadcast(uplink_vsi, uplink_vsi->port_info->mac.perm_addr, @@ -86,12 +175,10 @@ ice_eswitch_release_env(struct ice_pf *pf) { struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; - struct ice_port_info *pi = pf->hw.port_info; ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); - ice_cfg_dflt_vsi(pi, ctrl_vsi->idx, false, ICE_FLTR_TX); - ice_clear_dflt_vsi(uplink_vsi->vsw); + ice_clear_dflt_vsi(uplink_vsi); ice_fltr_add_mac_and_broadcast(uplink_vsi, uplink_vsi->port_info->mac.perm_addr, ICE_FWD_TO_VSI); @@ -124,17 +211,28 @@ ice_eswitch_remap_ring(struct ice_ring *ring, struct ice_q_vector *q_vector, * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to * number of VFs. */ -static void -ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) +static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) { struct ice_vsi *vsi = pf->switchdev.control_vsi; int q_id; + lockdep_assert_held(&pf->vfs.table_lock); + ice_for_each_txq(vsi, q_id) { - struct ice_repr *repr = pf->vf[q_id].repr; - struct ice_q_vector *q_vector = repr->q_vector; - struct ice_ring *tx_ring = vsi->tx_rings[q_id]; - struct ice_ring *rx_ring = vsi->rx_rings[q_id]; + struct ice_q_vector *q_vector; + struct ice_ring *tx_ring; + struct ice_ring *rx_ring; + struct ice_repr *repr; + struct ice_vf *vf; + + vf = ice_get_vf_by_id(pf, q_id); + if (WARN_ON(!vf)) + continue; + + repr = vf->repr; + q_vector = repr->q_vector; + tx_ring = vsi->tx_rings[q_id]; + rx_ring = vsi->rx_rings[q_id]; q_vector->vsi = vsi; q_vector->reg_idx = vsi->q_vectors[0]->reg_idx; @@ -150,6 +248,38 @@ ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) q_vector->num_ring_rx = 1; q_vector->rx.ring = rx_ring; ice_eswitch_remap_ring(rx_ring, q_vector, repr->netdev); + + ice_put_vf(vf); + } +} + +/** + * ice_eswitch_release_reprs - clear PR VSIs configuration + * @pf: poiner to PF struct + * @ctrl_vsi: pointer to switchdev control VSI + */ +static void +ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi) +{ + struct ice_vf *vf; + unsigned int bkt; + + lockdep_assert_held(&pf->vfs.table_lock); + + ice_for_each_vf(pf, bkt, vf) { + struct ice_vsi *vsi = vf->repr->src_vsi; + + /* Skip VFs that aren't configured */ + if (!vf->repr->dst) + continue; + + ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); + metadata_dst_free(vf->repr->dst); + vf->repr->dst = NULL; + ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, + ICE_FWD_TO_VSI); + + netif_napi_del(&vf->repr->q_vector->napi); } } @@ -161,11 +291,13 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf) { struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; int max_vsi_num = 0; - int i; + struct ice_vf *vf; + unsigned int bkt; - ice_for_each_vf(pf, i) { - struct ice_vsi *vsi = pf->vf[i].repr->src_vsi; - struct ice_vf *vf = &pf->vf[i]; + lockdep_assert_held(&pf->vfs.table_lock); + + ice_for_each_vf(pf, bkt, vf) { + struct ice_vsi *vsi = vf->repr->src_vsi; ice_remove_vsi_fltr(&pf->hw, vsi->idx); vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, @@ -182,6 +314,7 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf) vf->hw_lan_addr.addr, ICE_FWD_TO_VSI); metadata_dst_free(vf->repr->dst); + vf->repr->dst = NULL; goto err; } @@ -190,6 +323,7 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf) vf->hw_lan_addr.addr, ICE_FWD_TO_VSI); metadata_dst_free(vf->repr->dst); + vf->repr->dst = NULL; ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); goto err; } @@ -203,20 +337,13 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf) netif_keep_dst(vf->repr->netdev); } - kfree(ctrl_vsi->target_netdevs); - - ctrl_vsi->target_netdevs = kcalloc(max_vsi_num + 1, - sizeof(*ctrl_vsi->target_netdevs), - GFP_KERNEL); - if (!ctrl_vsi->target_netdevs) - goto err; - - ice_for_each_vf(pf, i) { - struct ice_repr *repr = pf->vf[i].repr; - struct ice_vsi *vsi = repr->src_vsi; + ice_for_each_vf(pf, bkt, vf) { struct metadata_dst *dst; + struct ice_repr *repr; + struct ice_vsi *vsi; - ctrl_vsi->target_netdevs[vsi->vsi_num] = repr->netdev; + repr = vf->repr; + vsi = repr->src_vsi; dst = repr->dst; dst->u.port_info.port_id = vsi->vsi_num; @@ -227,43 +354,11 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf) return 0; err: - for (i = i - 1; i >= 0; i--) { - struct ice_vsi *vsi = pf->vf[i].repr->src_vsi; - struct ice_vf *vf = &pf->vf[i]; - - ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); - metadata_dst_free(vf->repr->dst); - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, - ICE_FWD_TO_VSI); - } + ice_eswitch_release_reprs(pf, ctrl_vsi); return -ENODEV; } -/** - * ice_eswitch_release_reprs - clear PR VSIs configuration - * @pf: poiner to PF struct - * @ctrl_vsi: pointer to switchdev control VSI - */ -static void ice_eswitch_release_reprs(struct ice_pf *pf, - struct ice_vsi *ctrl_vsi) -{ - int i; - - kfree(ctrl_vsi->target_netdevs); - ice_for_each_vf(pf, i) { - struct ice_vsi *vsi = pf->vf[i].repr->src_vsi; - struct ice_vf *vf = &pf->vf[i]; - - ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); - metadata_dst_free(vf->repr->dst); - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, - ICE_FWD_TO_VSI); - - netif_napi_del(&vf->repr->q_vector->napi); - } -} - /** * ice_eswitch_update_repr - reconfigure VF port representor * @vsi: VF VSI for which port representor is configured @@ -278,16 +373,17 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi) if (!ice_is_switchdev_running(pf)) return; - vf = &pf->vf[vsi->vf_id]; + vf = vsi->vf; repr = vf->repr; repr->src_vsi = vsi; repr->dst->u.port_info.port_id = vsi->vsi_num; ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof); if (ret) { - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI); - dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", vsi->vf_id); - return; + ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, + ICE_FWD_TO_VSI); + dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", + vf->vf_id); } } @@ -308,7 +404,8 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) np = netdev_priv(netdev); vsi = np->vsi; - if (ice_is_reset_in_progress(vsi->back->state)) + if (ice_is_reset_in_progress(vsi->back->state) || + test_bit(ICE_VF_DIS, vsi->back->state)) return NETDEV_TX_BUSY; repr = ice_netdev_to_repr(netdev); @@ -325,8 +422,9 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) * @skb: pointer to send buffer * @off: pointer to offload struct */ -void ice_eswitch_set_target_vsi(struct sk_buff *skb, - struct ice_tx_offload_params *off) +void +ice_eswitch_set_target_vsi(struct sk_buff *skb, + struct ice_tx_offload_params *off) { struct metadata_dst *dst = skb_metadata_dst(skb); u64 cd_cmd, dst_vsi; @@ -374,20 +472,22 @@ ice_eswitch_port_start_xmit(struct sk_buff __always_unused *skb, static struct ice_vsi * ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) { - return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, ICE_INVAL_VFID, NULL, 0); + return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, NULL, NULL, 0); } - /** * ice_eswitch_napi_del - remove NAPI handle for all port representors * @pf: pointer to PF structure */ static void ice_eswitch_napi_del(struct ice_pf *pf) { - int i; + struct ice_vf *vf; + unsigned int bkt; - ice_for_each_vf(pf, i) - netif_napi_del(&pf->vf[i].repr->q_vector->napi); + lockdep_assert_held(&pf->vfs.table_lock); + + ice_for_each_vf(pf, bkt, vf) + netif_napi_del(&vf->repr->q_vector->napi); } /** @@ -396,10 +496,13 @@ static void ice_eswitch_napi_del(struct ice_pf *pf) */ static void ice_eswitch_napi_enable(struct ice_pf *pf) { - int i; + struct ice_vf *vf; + unsigned int bkt; - ice_for_each_vf(pf, i) - napi_enable(&pf->vf[i].repr->q_vector->napi); + lockdep_assert_held(&pf->vfs.table_lock); + + ice_for_each_vf(pf, bkt, vf) + napi_enable(&vf->repr->q_vector->napi); } /** @@ -408,39 +511,25 @@ static void ice_eswitch_napi_enable(struct ice_pf *pf) */ static void ice_eswitch_napi_disable(struct ice_pf *pf) { - int i; + struct ice_vf *vf; + unsigned int bkt; - ice_for_each_vf(pf, i) - napi_disable(&pf->vf[i].repr->q_vector->napi); -} + lockdep_assert_held(&pf->vfs.table_lock); -/** - * ice_eswitch_set_rxdid - configure rxdid on all rx queues from VSI - * @vsi: vsi to setup rxdid on - * @rxdid: flex descriptor id - */ -static void ice_eswitch_set_rxdid(struct ice_vsi *vsi, u32 rxdid) -{ - struct ice_hw *hw = &vsi->back->hw; - int i; - - ice_for_each_rxq(vsi, i) { - struct ice_ring *ring = vsi->rx_rings[i]; - u16 pf_q = vsi->rxq_map[ring->q_index]; - - ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true); - } + ice_for_each_vf(pf, bkt, vf) + napi_disable(&vf->repr->q_vector->napi); } /** * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode * @pf: pointer to PF structure */ -static int -ice_eswitch_enable_switchdev(struct ice_pf *pf) +static int ice_eswitch_enable_switchdev(struct ice_pf *pf) { struct ice_vsi *ctrl_vsi; + lockdep_assert_held(&pf->vfs.table_lock); + pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info); if (!pf->switchdev.control_vsi) return -ENODEV; @@ -466,8 +555,6 @@ ice_eswitch_enable_switchdev(struct ice_pf *pf) ice_eswitch_napi_enable(pf); - ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2); - return 0; err_setup_reprs: @@ -487,10 +574,13 @@ static void ice_eswitch_disable_switchdev(struct ice_pf *pf) { struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + lockdep_assert_held(&pf->vfs.table_lock); + ice_eswitch_napi_disable(pf); ice_eswitch_release_env(pf); - ice_vsi_release(ctrl_vsi); + ice_rem_adv_rule_for_vsi(&pf->hw, ctrl_vsi->idx); ice_eswitch_release_reprs(pf, ctrl_vsi); + ice_vsi_release(ctrl_vsi); ice_repr_rem_from_all_vfs(pf); } @@ -502,8 +592,9 @@ static void ice_eswitch_disable_switchdev(struct ice_pf *pf) * @mode: eswitch mode to switch to * @extack: pointer to extack structure */ -int ice_eswitch_mode_set(struct devlink *devlink, u16 mode, - struct netlink_ext_ack *extack) +int +ice_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) #else int ice_eswitch_mode_set(struct devlink *devlink, u16 mode) #endif /* HAVE_DEVLINK_ESWITCH_OPS_EXTACK */ @@ -513,15 +604,15 @@ int ice_eswitch_mode_set(struct devlink *devlink, u16 mode) if (pf->eswitch_mode == mode) return 0; - if (pf->num_alloc_vfs) { - dev_info(ice_pf_to_dev(pf), - "Changing eswitch mode is allowed only if there is no VFs created"); + if (ice_has_vfs(pf)) { + dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created"); return -EOPNOTSUPP; } switch (mode) { case DEVLINK_ESWITCH_MODE_LEGACY: - dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy", pf->hw.pf_id); + dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy", + pf->hw.pf_id); break; case DEVLINK_ESWITCH_MODE_SWITCHDEV: { @@ -532,15 +623,15 @@ int ice_eswitch_mode_set(struct devlink *devlink, u16 mode) } #endif /* NETIF_F_HW_TC */ -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS if (ice_is_offloaded_macvlan_ena(pf)) { dev_err(ice_pf_to_dev(pf), "switchdev cannot be configured - L2 Forwarding Offload is currently enabled.\n"); return -EOPNOTSUPP; } -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ - dev_info(ice_pf_to_dev(pf), - "PF %d changed eswitch mode to switchdev", pf->hw.pf_id); + dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev", + pf->hw.pf_id); break; } default: @@ -557,34 +648,6 @@ int ice_eswitch_mode_set(struct devlink *devlink, u16 mode) } #endif /* HAVE_METADATA_PORT_INFO */ -/** - * ice_eswitch_get_target_netdev - return port representor netdev - * @rx_ring: pointer to rx ring - * @rx_desc: pointer to rx descriptor - * - * When working in switchdev mode context (when control vsi is used), this - * function returns netdev of appropriate port representor. For non-switchdev - * context, regular netdev associated with rx ring is returned. - */ -struct net_device * -ice_eswitch_get_target_netdev(struct ice_ring *rx_ring, - union ice_32b_rx_flex_desc *rx_desc) -{ - struct ice_32b_rx_flex_desc_nic_2 *desc; - struct ice_vsi *vsi = rx_ring->vsi; - struct ice_vsi *control_vsi; - u16 target_vsi_id; - - control_vsi = vsi->back->switchdev.control_vsi; - if (vsi != control_vsi) - return rx_ring->netdev; - - desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc; - target_vsi_id = le16_to_cpu(desc->src_vsi); - - return vsi->target_netdevs[target_vsi_id]; -} - /** * ice_eswitch_mode_get - get current eswitch mode * @devlink: pointer to devlink structure @@ -616,6 +679,8 @@ bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf) */ void ice_eswitch_release(struct ice_pf *pf) { + lockdep_assert_held(&pf->vfs.table_lock); + if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY) return; @@ -631,6 +696,8 @@ int ice_eswitch_configure(struct ice_pf *pf) { int status; + lockdep_assert_held(&pf->vfs.table_lock); + if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running) return 0; @@ -648,17 +715,17 @@ int ice_eswitch_configure(struct ice_pf *pf) */ static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf) { - struct ice_repr *repr; - int i; + struct ice_vf *vf; + unsigned int bkt; + + lockdep_assert_held(&pf->vfs.table_lock); if (test_bit(ICE_DOWN, pf->state)) return; - ice_for_each_vf(pf, i) { - repr = pf->vf[i].repr; - if (repr) - ice_repr_start_tx_queues(repr); - } + ice_for_each_vf(pf, bkt, vf) + if (vf->repr) + ice_repr_start_tx_queues(vf->repr); } /** @@ -667,17 +734,17 @@ static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf) */ void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { - struct ice_repr *repr; - int i; + struct ice_vf *vf; + unsigned int bkt; + + lockdep_assert_held(&pf->vfs.table_lock); if (test_bit(ICE_DOWN, pf->state)) return; - ice_for_each_vf(pf, i) { - repr = pf->vf[i].repr; - if (repr) - ice_repr_stop_tx_queues(repr); - } + ice_for_each_vf(pf, bkt, vf) + if (vf->repr) + ice_repr_stop_tx_queues(vf->repr); } /** @@ -711,7 +778,6 @@ int ice_eswitch_rebuild(struct ice_pf *pf) return status; ice_eswitch_napi_enable(pf); - ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2); ice_eswitch_start_all_tx_queues(pf); return 0; diff --git a/drivers/thirdparty/ice/ice_eswitch.h b/drivers/thirdparty/ice/ice_eswitch.h index 1f8a39493cb7..e5c42899208a 100644 --- a/drivers/thirdparty/ice/ice_eswitch.h +++ b/drivers/thirdparty/ice/ice_eswitch.h @@ -11,10 +11,11 @@ int ice_eswitch_configure(struct ice_pf *pf); int ice_eswitch_rebuild(struct ice_pf *pf); int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode); void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf); +int ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, + const u8 *mac); +void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf); +void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf); -struct net_device * -ice_eswitch_get_target_netdev(struct ice_ring *rx_ring, - union ice_32b_rx_flex_desc *rx_desc); #ifdef HAVE_METADATA_PORT_INFO void ice_eswitch_set_target_vsi(struct sk_buff *skb, struct ice_tx_offload_params *off); @@ -57,6 +58,15 @@ static inline void ice_eswitch_set_target_vsi(struct sk_buff *skb, struct ice_tx_offload_params *off) { } static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { } static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { } +static inline void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf) { } +static inline void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf) { } + +static inline int +ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, + const u8 *mac) +{ + return 0; +} static inline int ice_eswitch_configure(struct ice_pf *pf) @@ -81,12 +91,5 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) { return 0; } - -static inline struct net_device * -ice_eswitch_get_target_netdev(struct ice_ring *rx_ring, - union ice_32b_rx_flex_desc *rx_desc) -{ - return NULL; -} #endif /* CONFIG_NET_DEVLINK */ #endif diff --git a/drivers/thirdparty/ice/ice_ethtool.c b/drivers/thirdparty/ice/ice_ethtool.c index fb909ab649db..00e1b9bfad56 100644 --- a/drivers/thirdparty/ice/ice_ethtool.c +++ b/drivers/thirdparty/ice/ice_ethtool.c @@ -31,7 +31,7 @@ static int ice_q_stats_len(struct net_device *netdev) stats_size = sizeof(struct ice_q_vector_ch_stats); total_slen += np->vsi->alloc_rxq * (stats_size / sizeof(u64)); #ifdef ICE_ADD_PROBES -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS stats_size = sizeof(struct ice_q_stats); total_slen += (ICE_MAX_MACVLANS * 2) * (stats_size / sizeof(u64)); @@ -39,7 +39,7 @@ static int ice_q_stats_len(struct net_device *netdev) * the count by that many so the stats get printed correctly */ total_slen -= ICE_MAX_MACVLANS * 2; -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ #endif /* ICE_ADD_PROBES */ #else stats_size = sizeof(struct ice_q_stats); @@ -47,13 +47,13 @@ static int ice_q_stats_len(struct net_device *netdev) total_slen += np->vsi->alloc_txq * (stats_size / sizeof(u64)); total_slen += np->vsi->alloc_rxq * (stats_size / sizeof(u64)); #ifdef ICE_ADD_PROBES -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS total_slen += (ICE_MAX_MACVLANS * 2) * (stats_size / sizeof(u64)); /* the napi_poll_cnt isn't included in the MACVLAN stats so reduce * the count by that many so the stats get printed correctly */ total_slen -= ICE_MAX_MACVLANS * 2; -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ #endif /* ICE_ADD_PROBES */ #endif /* ADQ_PERF_COUNTERS */ #ifdef HAVE_XDP_SUPPORT @@ -207,6 +207,11 @@ static const struct ice_stats ice_gstrings_pf_stats[] = { ICE_PF_STAT(ICE_PORT_ARFS_UDP4_MATCH, stats.arfs_udpv4_match), ICE_PF_STAT(ICE_PORT_ARFS_UDP6_MATCH, stats.arfs_udpv6_match), #endif /* ICE_ADD_PROBES */ + ICE_PF_STAT(ICE_TX_HWTSTAMP_SKIPPED, ptp.tx_hwtstamp_skipped), + ICE_PF_STAT(ICE_TX_HWTSTAMP_TIMEOUTS, ptp.tx_hwtstamp_timeouts), + ICE_PF_STAT(ICE_TX_HWTSTAMP_FLUSHED, ptp.tx_hwtstamp_flushed), + ICE_PF_STAT(ICE_TX_HWTSTAMP_DISCARDED, ptp.tx_hwtstamp_discarded), + ICE_PF_STAT(ICE_LATE_CACHED_PHC_UPDATES, ptp.late_cached_phc_updates), }; static const u32 ice_regs_dump_list[] = { @@ -237,8 +242,6 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = { #endif /* !ETHTOOL_GFECPARAM */ ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT), #ifdef NETIF_F_HW_TC - ICE_PRIV_FLAG("channel-inline-flow-director", - ICE_FLAG_CHNL_INLINE_FD_ENA), ICE_PRIV_FLAG("channel-inline-fd-mark", ICE_FLAG_CHNL_INLINE_FD_MARK_ENA), ICE_PRIV_FLAG("channel-pkt-inspect-optimize", @@ -251,8 +254,14 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = { ICE_PRIV_FLAG("vf-true-promisc-support", ICE_FLAG_VF_TRUE_PROMISC_ENA), ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF), - ICE_PRIV_FLAG("vf-vlan-prune-disable", ICE_FLAG_VF_VLAN_PRUNE_DIS), + ICE_PRIV_FLAG("vf-vlan-pruning", ICE_FLAG_VF_VLAN_PRUNING), ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX), + /* Flag enable/disable monitoring DPLL Admin Queue error events */ + ICE_PRIV_FLAG("cgu_fast_lock", ICE_FLAG_DPLL_FAST_LOCK), + ICE_PRIV_FLAG("dpll_monitor", ICE_FLAG_DPLL_MONITOR), + ICE_PRIV_FLAG("extts_filter", ICE_FLAG_EXTTS_FILTER), + ICE_PRIV_FLAG("allow-no-fec-modules-in-auto", + ICE_FLAG_ALLOW_FEC_DIS_AUTO), }; #define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags) @@ -279,48 +288,21 @@ __ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo, "%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor, nvm->eetrack, orom->major, orom->build, orom->patch); - /* When called via 'ethtool -i|--driver ', log the above with - * additional Netlist version information as a kernel message since it - * will not all fit in the 32-byte fixed-length buffer. - */ - if (!strncmp(current->comm, "ethtool", 7)) { - struct ice_netlist_info *netlist = &hw->flash.netlist; - - /* The netlist versions are stored in packed BCD format */ - netdev_info(netdev, "NVM version details - %x.%02x, 0x%x, %x.%x.%x-%x.%x.%x.%08x, %d.%d.%d\n", - nvm->major, nvm->minor, nvm->eetrack, - netlist->major, netlist->minor, - netlist->type >> 16, netlist->type & 0xffff, - netlist->rev, netlist->cust_ver, netlist->hash, - orom->major, orom->build, orom->patch); - } - strscpy(drvinfo->bus_info, pci_name(pf->pdev), sizeof(drvinfo->bus_info)); - - if (test_bit(ICE_RECOVERY_MODE, pf->state)) - return; - - drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE; } static void ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_pf *pf = np->vsi->back; __ice_get_drvinfo(netdev, drvinfo, np->vsi); -} - -static void -ice_repr_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) -{ - struct ice_repr *repr = ice_netdev_to_repr(netdev); - - if (ice_check_vf_ready_for_cfg(repr->vf)) + if (test_bit(ICE_RECOVERY_MODE, pf->state)) return; - __ice_get_drvinfo(netdev, drvinfo, repr->src_vsi); + drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE; } static int ice_get_regs_len(struct net_device __always_unused *netdev) @@ -389,18 +371,17 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - enum ice_status status; struct device *dev; - int ret = 0; u32 magic; + int ret; u8 *buf; dev = ice_pf_to_dev(pf); magic = hw->vendor_id | (hw->device_id << 16); if (eeprom->magic && eeprom->magic != magic) { - struct ice_nvm_access_cmd *nvm; union ice_nvm_access_data *data; + struct ice_nvm_access_cmd *nvm; nvm = (struct ice_nvm_access_cmd *)eeprom; data = (union ice_nvm_access_data *)bytes; @@ -408,18 +389,16 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, netdev_dbg(netdev, "GEEPROM config 0x%08x, offset 0x%08x, data_size 0x%08x\n", nvm->config, nvm->offset, nvm->data_size); - status = ice_handle_nvm_access(hw, nvm, data); + ret = ice_handle_nvm_access(hw, nvm, data); ice_debug_array(hw, ICE_DBG_NVM, 16, 1, (u8 *)data, nvm->data_size); - if (status) { - int err = ice_status_to_errno(status); + if (ret) { + netdev_err(netdev, "NVM read offset 0x%x failed with error %d\n", + nvm->offset, ret); - netdev_err(netdev, "NVM read offset 0x%x failed with status %s, error %d\n", - nvm->offset, ice_stat_str(status), err); - - return err; + return ret; } return 0; @@ -433,22 +412,18 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, if (!buf) return -ENOMEM; - status = ice_acquire_nvm(hw, ICE_RES_READ); - if (status) { - dev_err(dev, "ice_acquire_nvm failed: %s %s\n", - ice_stat_str(status), + ret = ice_acquire_nvm(hw, ICE_RES_READ); + if (ret) { + dev_err(dev, "ice_acquire_nvm failed: %d %s\n", ret, ice_aq_str(hw->adminq.sq_last_status)); - ret = -EIO; goto out; } - status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf, - false); - if (status) { - dev_err(dev, "ice_read_flat_nvm failed: %s %s\n", - ice_stat_str(status), + ret = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf, + false); + if (ret) { + dev_err(dev, "ice_read_flat_nvm failed: %d %s\n", ret, ice_aq_str(hw->adminq.sq_last_status)); - ret = -EIO; goto release; } @@ -469,9 +444,8 @@ ice_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, struct ice_pf *pf = np->vsi->back; union ice_nvm_access_data *data; struct ice_nvm_access_cmd *nvm; - enum ice_status status = 0; - int err = 0; u32 magic; + int err; /* normal ethtool set_eeprom is not supported */ nvm = (struct ice_nvm_access_cmd *)eeprom; @@ -490,13 +464,11 @@ ice_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, else if (ice_is_reset_in_progress(pf->state)) err = -EBUSY; else - status = ice_handle_nvm_access(hw, nvm, data); + err = ice_handle_nvm_access(hw, nvm, data); - if (status) { - err = ice_status_to_errno(status); - netdev_err(netdev, "NVM write offset 0x%x failed with status %s, error %d\n", - nvm->offset, ice_stat_str(status), err); - } + if (err) + netdev_err(netdev, "NVM write offset 0x%x failed with error %d\n", + nvm->offset, err); return err; } @@ -509,16 +481,20 @@ ice_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, */ static bool ice_active_vfs(struct ice_pf *pf) { - unsigned int i; + bool active = false; + struct ice_vf *vf; + unsigned int bkt; - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; - - if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) - return true; + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) { + if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + active = true; + break; + } } + rcu_read_unlock(); - return false; + return active; } /** @@ -531,14 +507,14 @@ static bool ice_active_vfs(struct ice_pf *pf) static u64 ice_link_test(struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); - enum ice_status status; bool link_up = false; + int status; netdev_info(netdev, "link test\n"); status = ice_get_link_status(np->vsi->port_info, &link_up); if (status) { - netdev_err(netdev, "link query error, status = %s\n", - ice_stat_str(status)); + netdev_err(netdev, "link query error, status = %d\n", + status); return 1; } @@ -682,7 +658,7 @@ static int ice_lbtest_prepare_rings(struct ice_vsi *vsi) if (status) goto err_start_rx_ring; - return status; + return 0; err_start_rx_ring: ice_vsi_free_rx_rings(vsi); @@ -850,7 +826,8 @@ static int ice_lbtest_receive_frames(struct ice_ring *rx_ring) rx_desc = ICE_RX_DESC(rx_ring, i); if (!(rx_desc->wb.status_error0 & - cpu_to_le16(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS))) + (cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) | + cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S))))) continue; rx_buf = &rx_ring->rx_buf[i]; @@ -1042,8 +1019,9 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test, int status = ice_open(netdev); if (status) { - dev_err(dev, "Could not open device %s, err %d\n", - pf->int_name, status); + ice_dev_err_errno(dev, status, + "Could not open device %s", + pf->int_name); } } } else { @@ -1541,7 +1519,7 @@ ice_get_chnl_rx_strings(struct ice_vsi *vsi, unsigned int q, char **loc_in_buf) #endif /* ADQ_PERF_COUNTERS */ #ifdef ICE_ADD_PROBES -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS /** * ice_get_macvlan * @id: macvlan ID @@ -1707,13 +1685,13 @@ ice_get_macvlan_rx_stats(struct ice_pf *pf, u64 *data, int *idx) /* copy back updated index */ *idx = j; } -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ #endif /* ICE_ADD_PROBES */ -static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +static void +__ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data, + struct ice_vsi *vsi) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = ice_get_netdev_priv_vsi(np); char *p = (char *)data; unsigned int i; @@ -1746,9 +1724,9 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) #endif /* ICE_ADD_PROBES */ #endif /* HAVE_XDP_SUPPORT */ #ifdef ICE_ADD_PROBES -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS ice_get_macvlan_tx_strings(vsi->back, &p); -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ #endif /* ICE_ADD_PROBES */ ice_for_each_alloc_rxq(vsi, i) { @@ -1770,9 +1748,9 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) #endif /* HAVE_XDP_SUPPORT */ } #ifdef ICE_ADD_PROBES -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS ice_get_macvlan_rx_strings(vsi->back, &p); -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ #endif /* ICE_ADD_PROBES */ if (vsi->type != ICE_VSI_PF) @@ -1812,6 +1790,13 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) } } +static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + + __ice_get_strings(netdev, stringset, data, np->vsi); +} + static int ice_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { @@ -1890,11 +1875,16 @@ ice_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; enum ice_fec_mode fec; switch (fecparam->fec) { case ETHTOOL_FEC_AUTO: - fec = ICE_FEC_AUTO; + if (ice_fw_supports_fec_dis_auto(&pf->hw) && + test_bit(ICE_FLAG_ALLOW_FEC_DIS_AUTO, pf->flags)) + fec = ICE_FEC_DIS_AUTO; + else + fec = ICE_FEC_AUTO; break; case ETHTOOL_FEC_RS: fec = ICE_FEC_RS; @@ -1907,7 +1897,7 @@ ice_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) fec = ICE_FEC_NONE; break; default: - dev_warn(ice_pf_to_dev(vsi->back), "Unsupported FEC mode: %d\n", + dev_warn(ice_pf_to_dev(pf), "Unsupported FEC mode: %d\n", fecparam->fec); return -EINVAL; } @@ -1928,8 +1918,7 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) struct ice_link_status *link_info; struct ice_vsi *vsi = np->vsi; struct ice_port_info *pi; - enum ice_status status; - int err = 0; + int err; pi = vsi->port_info; @@ -1955,12 +1944,10 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) if (!caps) return -ENOMEM; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, - caps, NULL); - if (status) { - err = -EAGAIN; + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, + caps, NULL); + if (err) goto done; - } /* Set supported/configured FEC modes based on PHY capability */ if (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC) @@ -2080,12 +2067,6 @@ static void ice_recfg_chnl_vsis(struct ice_pf *pf, struct ice_vsi *vsi) /* set/clear inline flow-director bits for ADQ (aka channel) * VSIs based on PF level private flags */ - if (test_bit(ICE_FLAG_CHNL_INLINE_FD_ENA, pf->flags)) - set_bit(ICE_CHNL_FEATURE_INLINE_FD_ENA, - ch_vsi->features); - else - clear_bit(ICE_CHNL_FEATURE_INLINE_FD_ENA, - ch_vsi->features); if (test_bit(ICE_FLAG_CHNL_INLINE_FD_MARK_ENA, pf->flags)) set_bit(ICE_CHNL_FEATURE_INLINE_FD_MARK_ENA, ch_vsi->features); @@ -2126,7 +2107,7 @@ static void ice_recfg_vsi(struct ice_pf *pf, struct ice_vsi *vsi) ICE_MAX_LIMIT_PROCESS_RX_PKTS_DFLT; } } -#endif /* ADQ_SUPPORT */ +#endif /* NETIF_F_HW_TC */ /** * ice_set_priv_flags - set private flags @@ -2142,6 +2123,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) struct ice_pf *pf = vsi->back; struct device *dev; int ret = 0; + int status; u32 i; if (flags > BIT(ICE_PRIV_FLAG_ARRAY_SIZE)) @@ -2165,7 +2147,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) #ifdef NETIF_F_HW_TC ice_recfg_chnl_vsis(pf, vsi); ice_recfg_vsi(pf, vsi); -#endif /* ADQ_SUPPORT */ +#endif /* NETIF_F_HW_TC */ bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS); @@ -2173,7 +2155,6 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) if (test_bit(ICE_FLAG_RS_FEC, change_flags) || test_bit(ICE_FLAG_BASE_R_FEC, change_flags)) { enum ice_fec_mode fec = ICE_FEC_NONE; - int err; /* Check if FEC is supported */ if (pf->hw.device_id != ICE_DEV_ID_E810C_BACKPLANE && @@ -2186,17 +2167,21 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) /* Set FEC configuration */ if (test_bit(ICE_FLAG_RS_FEC, pf->flags) && - test_bit(ICE_FLAG_BASE_R_FEC, pf->flags)) - fec = ICE_FEC_AUTO; - else if (test_bit(ICE_FLAG_RS_FEC, pf->flags)) + test_bit(ICE_FLAG_BASE_R_FEC, pf->flags)) { + if (ice_fw_supports_fec_dis_auto(&pf->hw) && + test_bit(ICE_FLAG_ALLOW_FEC_DIS_AUTO, pf->flags)) + fec = ICE_FEC_DIS_AUTO; + else + fec = ICE_FEC_AUTO; + } else if (test_bit(ICE_FLAG_RS_FEC, pf->flags)) fec = ICE_FEC_RS; else if (test_bit(ICE_FLAG_BASE_R_FEC, pf->flags)) fec = ICE_FEC_BASER; - err = ice_set_fec_cfg(netdev, fec); + ret = ice_set_fec_cfg(netdev, fec); /* If FEC configuration fails, restore original FEC flags */ - if (err) { + if (ret) { if (test_bit(ICE_FLAG_BASE_R_FEC, orig_flags)) set_bit(ICE_FLAG_BASE_R_FEC, pf->flags); else @@ -2207,7 +2192,6 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) else clear_bit(ICE_FLAG_RS_FEC, pf->flags); - ret = err; goto ethtool_exit; } } @@ -2226,8 +2210,6 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) if (test_bit(ICE_FLAG_FW_LLDP_AGENT, change_flags)) { if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) { - enum ice_status status; - /* Disable FW LLDP engine */ status = ice_cfg_lldp_mib_change(&pf->hw, false); @@ -2255,7 +2237,6 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) pf->dcbx_cap &= ~DCB_CAP_DCBX_LLD_MANAGED; pf->dcbx_cap |= DCB_CAP_DCBX_HOST; } else { - enum ice_status status; bool dcbx_agent_status; #ifdef NETIF_F_HW_TC @@ -2323,27 +2304,115 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) } if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) { /* down and up VSI so that changes of Rx cfg are reflected. */ - ice_down(vsi); - ice_up(vsi); + ice_down_up(vsi); } /* don't allow modification of this flag when a single VF is in * promiscuous mode because it's not supported */ if (test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, change_flags) && - ice_is_any_vf_in_promisc(pf)) { + ice_is_any_vf_in_unicast_promisc(pf)) { dev_err(dev, "Changing vf-true-promisc-support flag while VF(s) are in promiscuous mode not supported\n"); /* toggle bit back to previous state */ change_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags); ret = -EAGAIN; } - if (test_bit(ICE_FLAG_VF_VLAN_PRUNE_DIS, change_flags) && - pf->num_alloc_vfs) { - dev_err(dev, "Changing vf-vlan-prune-disable flag while VF(s) are active is not supported\n"); + if (test_bit(ICE_FLAG_VF_VLAN_PRUNING, change_flags) && + ice_has_vfs(pf)) { + dev_err(dev, "vf-vlan-pruning: VLAN pruning cannot be changed while VFs are active.\n"); /* toggle bit back to previous state */ - change_bit(ICE_FLAG_VF_VLAN_PRUNE_DIS, change_flags); + change_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags); ret = -EOPNOTSUPP; } + if (!test_bit(ICE_FLAG_DPLL_MONITOR, pf->flags) && + pf->synce_dpll_state != ICE_CGU_STATE_UNKNOWN) { + pf->synce_dpll_state = ICE_CGU_STATE_UNKNOWN; + pf->ptp_dpll_state = ICE_CGU_STATE_UNKNOWN; + } + if (test_bit(ICE_FLAG_DPLL_MONITOR, change_flags)) { + if (!ice_is_feature_supported(pf, ICE_F_CGU)) { + dev_err(dev, "dpll_monitor: not supported\n"); + /* toggle bit back to previous state */ + change_bit(ICE_FLAG_DPLL_MONITOR, pf->flags); + ret = -EOPNOTSUPP; + goto ethtool_exit; + } + } + if (test_bit(ICE_FLAG_DPLL_FAST_LOCK, change_flags)) { + u8 ref_state, eec_mode, config; + bool fast_lock_enabled; + u64 phase_offset; + u16 dpll_state; + + if (!ice_is_feature_supported(pf, ICE_F_CGU)) { + dev_err(dev, "cgu-fast-lock: not supported\n"); + /* toggle bit back to previous state */ + change_bit(ICE_FLAG_DPLL_FAST_LOCK, pf->flags); + ret = -EOPNOTSUPP; + goto ethtool_exit; + } + + status = ice_aq_get_cgu_dpll_status(&pf->hw, ICE_CGU_DPLL_PTP, + &ref_state, &dpll_state, + &phase_offset, &eec_mode); + if (status) { + dev_err(dev, "cgu-fast-lock: fail to read current DPLL state.\n"); + /* toggle bit back to previous state */ + change_bit(ICE_FLAG_DPLL_FAST_LOCK, pf->flags); + goto ethtool_exit; + } + + config = dpll_state & + (ICE_AQC_GET_CGU_DPLL_STATUS_STATE_MODE | + ICE_AQC_GET_CGU_DPLL_STATUS_STATE_CLK_REF_SEL) + >> ICE_AQC_GET_CGU_DPLL_STATUS_STATE_CLK_REF_SHIFT; + fast_lock_enabled = test_bit(ICE_FLAG_DPLL_FAST_LOCK, pf->flags); + if (fast_lock_enabled) + ref_state |= ICE_AQC_SET_CGU_DPLL_CONFIG_REF_FLOCK_EN; + else + ref_state &= !ICE_AQC_SET_CGU_DPLL_CONFIG_REF_FLOCK_EN; + status = ice_aq_set_cgu_dpll_config(&pf->hw, ICE_CGU_DPLL_PTP, + ref_state, config, + eec_mode); + if (status) { + dev_err(dev, "cgu-fast-lock: fail to set DPLL clock controller.\n"); + /* toggle bit back to previous state */ + change_bit(ICE_FLAG_DPLL_FAST_LOCK, pf->flags); + goto ethtool_exit; + } + + dev_info(dev, "cgu-fast-lock: %s FAST LOCK for PPS DPLL", + fast_lock_enabled ? "enabled" : "disabled"); + } + + if (test_bit(ICE_FLAG_ALLOW_FEC_DIS_AUTO, change_flags)) { + enum ice_fec_mode fec = ICE_FEC_AUTO; + + if (!ice_fw_supports_fec_dis_auto(&pf->hw)) { + netdev_info(vsi->netdev, "Unsupported Firmware to Enable/Disable auto configuration of No FEC modules\n"); + change_bit(ICE_FLAG_ALLOW_FEC_DIS_AUTO, pf->flags); + ret = -EOPNOTSUPP; + goto ethtool_exit; + } + + /* Set FEC configuration */ + if (test_bit(ICE_FLAG_ALLOW_FEC_DIS_AUTO, pf->flags)) + fec = ICE_FEC_DIS_AUTO; + + ret = ice_set_fec_cfg(netdev, fec); + + /* If FEC configuration fails, restore original FEC flags */ + if (ret) { + netdev_warn(vsi->netdev, "Failed to Enable/Disable auto configuration of No FEC modules\n"); + change_bit(ICE_FLAG_ALLOW_FEC_DIS_AUTO, pf->flags); + goto ethtool_exit; + } + + if (test_bit(ICE_FLAG_ALLOW_FEC_DIS_AUTO, pf->flags)) + netdev_info(vsi->netdev, "Enabled auto configuration of No FEC modules\n"); + else + netdev_info(vsi->netdev, "Auto configuration of No FEC modules reset to NVM defaults\n"); + } ethtool_exit: clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags); return ret; @@ -2371,10 +2440,7 @@ static int ice_get_sset_count(struct net_device *netdev, int sset) * order of strings will suffer from race conditions and are * not safe. */ - if (ice_is_port_repr_netdev(netdev)) - return ICE_VSI_STATS_LEN; - else - return ICE_ALL_STATS_LEN(netdev); + return ICE_ALL_STATS_LEN(netdev); case ETH_SS_TEST: return ICE_TEST_LEN; case ETH_SS_PRIV_FLAGS: @@ -2385,11 +2451,10 @@ static int ice_get_sset_count(struct net_device *netdev, int sset) } static void -ice_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats __always_unused *stats, u64 *data) +__ice_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, u64 *data, + struct ice_vsi *vsi) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = ice_get_netdev_priv_vsi(np); struct ice_pf *pf = vsi->back; struct ice_ring *ring; unsigned int j; @@ -2434,9 +2499,9 @@ ice_get_ethtool_stats(struct net_device *netdev, #endif /* ICE_ADD_PROBES */ #endif /* HAVE_XDP_SUPPORT */ #ifdef ICE_ADD_PROBES -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS ice_get_macvlan_tx_stats(vsi->back, data, &i); -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ #endif /* ICE_ADD_PROBES */ ice_for_each_alloc_rxq(vsi, j) { @@ -2469,9 +2534,9 @@ ice_get_ethtool_stats(struct net_device *netdev, } } #ifdef ICE_ADD_PROBES -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS ice_get_macvlan_rx_stats(vsi->back, data, &i); -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ #endif /* ICE_ADD_PROBES */ rcu_read_unlock(); @@ -2496,6 +2561,15 @@ ice_get_ethtool_stats(struct net_device *netdev, } } +static void +ice_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, u64 *data) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + + __ice_get_ethtool_stats(netdev, stats, data, np->vsi); +} + #define ICE_PHY_TYPE_LOW_MASK_MIN_1G (ICE_PHY_TYPE_LOW_100BASE_TX | \ ICE_PHY_TYPE_LOW_100M_SGMII) @@ -2538,52 +2612,58 @@ ice_get_ethtool_stats(struct net_device *netdev, ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | \ ICE_PHY_TYPE_HIGH_100G_AUI2) -#ifdef HAVE_ETHTOOL_100G_BITS /** * ice_mask_min_supported_speeds + * @hw: pointer to the HW structure * @phy_types_high: PHY type high * @phy_types_low: PHY type low to apply minimum supported speeds mask * * Apply minimum supported speeds mask to PHY type low. These are the speeds * for ethtool supported link mode. */ -static -void ice_mask_min_supported_speeds(u64 phy_types_high, u64 *phy_types_low) +#ifdef HAVE_ETHTOOL_100G_BITS +static void +ice_mask_min_supported_speeds(struct ice_hw *hw, u64 phy_types_high, + u64 *phy_types_low) #else -static void ice_mask_min_supported_speeds(u64 *phy_types_low) +static void ice_mask_min_supported_speeds(struct ice_hw *hw, u64 *phy_types_low) #endif /* !HAVE_ETHTOOL_100G_BITS */ { /* if QSFP connection with 100G speed, minimum supported speed is 25G */ #ifdef HAVE_ETHTOOL_100G_BITS if (*phy_types_low & ICE_PHY_TYPE_LOW_MASK_100G || phy_types_high & ICE_PHY_TYPE_HIGH_MASK_100G) -#else /* HAVE_ETHTOOL_100G_BITS */ +#else /* !HAVE_ETHTOOL_100G_BITS */ if (*phy_types_low & ICE_PHY_TYPE_LOW_MASK_100G) -#endif /* !HAVE_ETHTOOL_100G_BITS */ +#endif /* HAVE_ETHTOOL_100G_BITS */ *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_25G; - else + else if (!ice_is_100m_speed_supported(hw)) *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_1G; } -#ifdef HAVE_ETHTOOL_100G_BITS -#define ice_ethtool_advertise_link_mode(aq_link_speed, ethtool_link_mode) \ - do { \ - if (req_speeds & (aq_link_speed) || \ - (!req_speeds && \ - (advert_phy_type_lo & phy_type_mask_lo || \ - advert_phy_type_hi & phy_type_mask_hi))) \ - ethtool_link_ksettings_add_link_mode(ks, advertising,\ - ethtool_link_mode); \ - } while (0) -#else /* HAVE_ETHTOOL_100G_BITS */ -#define ice_ethtool_advertise_link_mode(aq_link_speed, ethtool_link_mode) \ - do { \ - if (req_speeds & (aq_link_speed) || \ - (req_speeds && advert_phy_type_lo & phy_type_mask_lo)) \ - ethtool_link_ksettings_add_link_mode(ks, advertising,\ - ethtool_link_mode); \ - } while (0) -#endif /* ! HAVE_ETHTOOL_100G_BITS */ +/** + * ice_linkmode_set_bit - if supported, set link mode bit + * @phy_to_ethtool: PHY type to ethtool link mode struct to set + * @ks: ethtool link ksettings struct to fill out + * @req_speeds: speed requested by user + * @advert_phy_type: advertised PHY type + * @phy_type: PHY type + */ +static void +ice_linkmode_set_bit(struct ice_phy_type_to_ethtool *phy_to_ethtool, + struct ethtool_link_ksettings *ks, u16 req_speeds, + u64 advert_phy_type, u8 phy_type) +{ + if (!phy_to_ethtool->ethtool_link_mode_supported) + return; + + linkmode_set_bit(phy_to_ethtool->link_mode, ks->link_modes.supported); + + if (req_speeds & phy_to_ethtool->aq_link_speed || + (!req_speeds && advert_phy_type & BIT(phy_type))) + linkmode_set_bit(phy_to_ethtool->link_mode, + ks->link_modes.advertising); +} /** * ice_phy_type_to_ethtool - convert the phy_types to ethtool link modes @@ -2597,10 +2677,6 @@ ice_phy_type_to_ethtool(struct net_device *netdev, struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; - u64 phy_type_mask_lo = 0; -#ifdef HAVE_ETHTOOL_100G_BITS - u64 phy_type_mask_hi = 0; -#endif /* HAVE_ETHTOOL_100G_BITS */ u64 advert_phy_type_lo = 0; #ifdef HAVE_ETHTOOL_100G_BITS u64 advert_phy_type_hi = 0; @@ -2608,6 +2684,7 @@ ice_phy_type_to_ethtool(struct net_device *netdev, #endif /* HAVE_ETHTOOL_100G_BITS */ u64 phy_types_low = 0; u16 req_speeds; + u8 phy_type; req_speeds = vsi->port_info->phy.link_info.req_speeds; @@ -2626,9 +2703,10 @@ ice_phy_type_to_ethtool(struct net_device *netdev, #ifdef HAVE_ETHTOOL_100G_BITS phy_types_high = le64_to_cpu(pf->nvm_phy_type_hi); - ice_mask_min_supported_speeds(phy_types_high, &phy_types_low); + ice_mask_min_supported_speeds(&pf->hw, phy_types_high, + &phy_types_low); #else /* HAVE_ETHTOOL_100G_BITS */ - ice_mask_min_supported_speeds(&phy_types_low); + ice_mask_min_supported_speeds(&pf->hw, &phy_types_low); #endif /* !HAVE_ETHTOOL_100G_BITS */ /* determine advertised modes based on link override only * if it's supported and if the FW doesn't abstract the @@ -2676,299 +2754,19 @@ ice_phy_type_to_ethtool(struct net_device *netdev, advert_phy_type_lo = vsi->port_info->phy.phy_type_low; #endif /* !HAVE_ETHTOOL_100G_BITS */ - ethtool_link_ksettings_zero_link_mode(ks, supported); - ethtool_link_ksettings_zero_link_mode(ks, advertising); + linkmode_zero(ks->link_modes.supported); + linkmode_zero(ks->link_modes.advertising); - phy_type_mask_lo = ICE_PHY_TYPE_LOW_100BASE_TX | - ICE_PHY_TYPE_LOW_100M_SGMII; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 100baseT_Full); + for_each_set_bit(phy_type, (const unsigned long *)&phy_types_low, + ICE_PHY_TYPE_LOW_SIZE) + ice_linkmode_set_bit(&phy_type_low_lkup[phy_type], ks, + req_speeds, advert_phy_type_lo, phy_type); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100MB, - 100baseT_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_1000BASE_T | - ICE_PHY_TYPE_LOW_1G_SGMII; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 1000baseT_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_1000MB, - 1000baseT_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_1000BASE_KX; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 1000baseKX_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_1000MB, - 1000baseKX_Full); - } -#ifdef HAVE_ETHTOOL_NEW_1G_BITS - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_1000BASE_SX | - ICE_PHY_TYPE_LOW_1000BASE_LX; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 1000baseX_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_1000MB, - 1000baseX_Full); - } -#else - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_1000BASE_SX | - ICE_PHY_TYPE_LOW_1000BASE_LX; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 1000baseT_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_1000MB, - 1000baseT_Full); - } -#endif /* HAVE_ETHTOOL_NEW_1G_BITS */ -#ifdef HAVE_ETHTOOL_NEW_2500MB_BITS - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_2500BASE_T; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 2500baseT_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_2500MB, - 2500baseT_Full); - } -#else - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_2500BASE_T; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 2500baseX_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_2500MB, - 2500baseX_Full); - } -#endif /* HAVE_ETHTOOL_NEW_2500MB_BITS */ - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_2500BASE_X | - ICE_PHY_TYPE_LOW_2500BASE_KX; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 2500baseX_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_2500MB, - 2500baseX_Full); - } -#ifdef HAVE_ETHTOOL_5G_BITS - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_5GBASE_T | - ICE_PHY_TYPE_LOW_5GBASE_KR; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 5000baseT_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_5GB, - 5000baseT_Full); - } -#endif /* HAVE_ETHTOOL_5G_BITS */ - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_T | - ICE_PHY_TYPE_LOW_10G_SFI_DA | - ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 10000baseT_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB, - 10000baseT_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 | - ICE_PHY_TYPE_LOW_10G_SFI_C2C; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 10000baseKR_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB, - 10000baseKR_Full); - } -#ifdef HAVE_ETHTOOL_NEW_10G_BITS - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_SR; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 10000baseSR_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB, - 10000baseSR_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_LR; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 10000baseLR_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB, - 10000baseLR_Full); - } -#else - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_SR | - ICE_PHY_TYPE_LOW_10GBASE_LR; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 10000baseT_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB, - 10000baseT_Full); - } -#endif /* HAVE_ETHTOOL_NEW_10G_BITS */ -#ifdef HAVE_ETHTOOL_25G_BITS - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_25GBASE_T | - ICE_PHY_TYPE_LOW_25GBASE_CR | - ICE_PHY_TYPE_LOW_25GBASE_CR_S | - ICE_PHY_TYPE_LOW_25GBASE_CR1 | - ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 25000baseCR_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_25GB, - 25000baseCR_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_25GBASE_SR | - ICE_PHY_TYPE_LOW_25GBASE_LR; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 25000baseSR_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_25GB, - 25000baseSR_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_25GBASE_KR | - ICE_PHY_TYPE_LOW_25GBASE_KR_S | - ICE_PHY_TYPE_LOW_25GBASE_KR1 | - ICE_PHY_TYPE_LOW_25G_AUI_C2C; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 25000baseKR_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_25GB, - 25000baseKR_Full); - } -#endif /* HAVE_ETHTOOL_25G_BITS */ - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_KR4; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 40000baseKR4_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB, - 40000baseKR4_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_CR4 | - ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC | - ICE_PHY_TYPE_LOW_40G_XLAUI; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 40000baseCR4_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB, - 40000baseCR4_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_SR4; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 40000baseSR4_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB, - 40000baseSR4_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_LR4; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 40000baseLR4_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB, - 40000baseLR4_Full); - } -#ifdef HAVE_ETHTOOL_50G_BITS - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_50GBASE_CR2 | - ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC | - ICE_PHY_TYPE_LOW_50G_LAUI2 | - ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC | - ICE_PHY_TYPE_LOW_50G_AUI2 | - ICE_PHY_TYPE_LOW_50GBASE_CP | - ICE_PHY_TYPE_LOW_50GBASE_SR | - ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC | - ICE_PHY_TYPE_LOW_50G_AUI1; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 50000baseCR2_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_50GB, - 50000baseCR2_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_50GBASE_KR2 | - ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 50000baseKR2_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_50GB, - 50000baseKR2_Full); - } -#endif /* HAVE_ETHTOOL_50G_BITS */ -#ifdef HAVE_ETHTOOL_NEW_50G_BITS - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_50GBASE_SR2 | - ICE_PHY_TYPE_LOW_50GBASE_LR2 | - ICE_PHY_TYPE_LOW_50GBASE_FR | - ICE_PHY_TYPE_LOW_50GBASE_LR; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 50000baseSR2_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_50GB, - 50000baseSR2_Full); - } -#endif /* HAVE_ETHTOOL_NEW_50G_BITS */ #ifdef HAVE_ETHTOOL_100G_BITS - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_CR4 | - ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC | - ICE_PHY_TYPE_LOW_100G_CAUI4 | - ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC | - ICE_PHY_TYPE_LOW_100G_AUI4 | - ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 | - ICE_PHY_TYPE_LOW_100GBASE_CP2; - phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC | - ICE_PHY_TYPE_HIGH_100G_CAUI2 | - ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | - ICE_PHY_TYPE_HIGH_100G_AUI2; - if (phy_types_low & phy_type_mask_lo || - phy_types_high & phy_type_mask_hi) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 100000baseCR4_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, - 100000baseCR4_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_SR4 | - ICE_PHY_TYPE_LOW_100GBASE_SR2; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 100000baseSR4_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, - 100000baseSR4_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_LR4 | - ICE_PHY_TYPE_LOW_100GBASE_DR; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 100000baseLR4_ER4_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, - 100000baseLR4_ER4_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_KR4 | - ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4; - phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4; - if (phy_types_low & phy_type_mask_lo || - phy_types_high & phy_type_mask_hi) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 100000baseKR4_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, - 100000baseKR4_Full); - } + for_each_set_bit(phy_type, (const unsigned long *)&phy_types_high, + ICE_PHY_TYPE_HIGH_SIZE) + ice_linkmode_set_bit(&phy_type_high_lkup[phy_type], ks, + req_speeds, advert_phy_type_hi, phy_type); #endif /* HAVE_ETHTOOL_100G_BITS */ } @@ -3067,6 +2865,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks, Asym_Pause); break; } + } /** @@ -3105,15 +2904,13 @@ ice_get_link_ksettings(struct net_device *netdev, struct ice_aqc_get_phy_caps_data *caps; struct ice_link_status *hw_link_info; struct ice_vsi *vsi = np->vsi; - enum ice_status status; - int err = 0; + int err; ethtool_link_ksettings_zero_link_mode(ks, supported); ethtool_link_ksettings_zero_link_mode(ks, advertising); ethtool_link_ksettings_zero_link_mode(ks, lp_advertising); hw_link_info = &vsi->port_info->phy.link_info; - /* set speed and duplex */ if (hw_link_info->link_info & ICE_AQ_LINK_UP) ice_get_settings_link_up(ks, netdev); @@ -3125,6 +2922,7 @@ ice_get_link_ksettings(struct net_device *netdev, AUTONEG_ENABLE : AUTONEG_DISABLE; /* set media type settings */ + switch (vsi->port_info->phy.media_type) { case ICE_MEDIA_FIBER: ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); @@ -3158,12 +2956,10 @@ ice_get_link_ksettings(struct net_device *netdev, if (!caps) return -ENOMEM; - status = ice_aq_get_phy_caps(vsi->port_info, false, - ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); - if (status) { - err = -EIO; + err = ice_aq_get_phy_caps(vsi->port_info, false, + ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); + if (err) goto done; - } /* Set the advertised flow control based on the PHY capability */ if ((caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) && @@ -3197,12 +2993,10 @@ ice_get_link_ksettings(struct net_device *netdev, ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); #endif /* ETHTOOL_GFECPARAM */ - status = ice_aq_get_phy_caps(vsi->port_info, false, - ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL); - if (status) { - err = -EIO; + err = ice_aq_get_phy_caps(vsi->port_info, false, + ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL); + if (err) goto done; - } #ifdef ETHTOOL_GFECPARAM /* Set supported FEC modes based on PHY capability */ @@ -3314,6 +3108,15 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks) 100000baseKR4_Full)) adv_link_speed |= ICE_AQ_LINK_SPEED_100GB; #endif /* HAVE_ETHTOOL_100G_BITS */ +#ifdef HAVE_ETHTOOL_NEW_100G_BITS + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 100000baseCR2_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 100000baseSR2_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 100000baseKR2_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_100GB; +#endif /* HAVE_ETHTOOL_NEW_100G_BITS */ return adv_link_speed; } @@ -3377,6 +3180,44 @@ ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks, return err; } +/** + * ice_set_phy_type_from_speed - set phy_types based on speeds + * and advertised modes + * @ks: ethtool link ksettings struct + * @phy_type_low: pointer to the lower part of phy_type + * @phy_type_high: pointer to the higher part of phy_type + * @adv_link_speed: targeted link speeds bitmap + */ +static void +ice_set_phy_type_from_speed(const struct ethtool_link_ksettings *ks, + u64 *phy_type_low, u64 *phy_type_high, + u16 adv_link_speed) +{ + /* Handle 1000M speed in a special way because ice_update_phy_type + * enables all link modes, but having mixed copper and optic standards + * is not supported + */ + adv_link_speed &= ~ICE_AQ_LINK_SPEED_1000MB; + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseT_Full)) + *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_T | + ICE_PHY_TYPE_LOW_1G_SGMII; + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseKX_Full)) + *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_KX; +#ifdef HAVE_ETHTOOL_NEW_1G_BITS + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseX_Full)) + *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_SX | + ICE_PHY_TYPE_LOW_1000BASE_LX; +#endif + + ice_update_phy_type(phy_type_low, phy_type_high, adv_link_speed); +} + /** * ice_set_link_ksettings - Set Speed and Duplex * @netdev: network interface device structure @@ -3398,11 +3239,10 @@ ice_set_link_ksettings(struct net_device *netdev, struct ice_pf *pf = np->vsi->back; struct ice_port_info *pi; u8 autoneg_changed = 0; - enum ice_status status; u64 phy_type_high = 0; u64 phy_type_low = 0; - int err = 0; bool linkup; + int err; pi = np->vsi->port_info; @@ -3422,15 +3262,13 @@ ice_set_link_ksettings(struct net_device *netdev, /* Get the PHY capabilities based on media */ if (ice_fw_supports_report_dflt_cfg(pi->hw)) - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, - phy_caps, NULL); + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, + phy_caps, NULL); else - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, - phy_caps, NULL); - if (status) { - err = -EIO; + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, + phy_caps, NULL); + if (err) goto done; - } /* save autoneg out of ksettings */ autoneg = copy_ks.base.autoneg; @@ -3496,13 +3334,11 @@ ice_set_link_ksettings(struct net_device *netdev, /* Call to get the current link speed */ pi->phy.get_link_info = true; - status = ice_get_link_status(pi, &linkup); - if (status) { - err = -EIO; + err = ice_get_link_status(pi, &linkup); + if (err) goto done; - } - curr_link_speed = pi->phy.link_info.link_speed; + curr_link_speed = pi->phy.curr_user_speed_req; adv_link_speed = ice_ksettings_find_adv_link_speed(ks); /* If speed didn't get set, set it to what it currently is. @@ -3513,7 +3349,8 @@ ice_set_link_ksettings(struct net_device *netdev, adv_link_speed = curr_link_speed; /* Convert the advertise link speeds to their corresponded PHY_TYPE */ - ice_update_phy_type(&phy_type_low, &phy_type_high, adv_link_speed); + ice_set_phy_type_from_speed(ks, &phy_type_low, &phy_type_high, + adv_link_speed); if (!autoneg_changed && adv_link_speed == curr_link_speed) { netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); @@ -3569,10 +3406,9 @@ ice_set_link_ksettings(struct net_device *netdev, } /* make the aq call */ - status = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL); - if (status) { + err = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL); + if (err) { netdev_info(netdev, "Set phy config failed,\n"); - err = -EIO; goto done; } @@ -3667,10 +3503,36 @@ ice_get_legacy_settings_link_up(struct ethtool_cmd *ecmd, ecmd->supported = SUPPORTED_40000baseKR4_Full; ecmd->advertising = ADVERTISED_40000baseKR4_Full; break; + case ICE_PHY_TYPE_LOW_25GBASE_T: + case ICE_PHY_TYPE_LOW_25GBASE_CR: + case ICE_PHY_TYPE_LOW_25GBASE_CR_S: + case ICE_PHY_TYPE_LOW_25GBASE_CR1: + case ICE_PHY_TYPE_LOW_25GBASE_SR: + case ICE_PHY_TYPE_LOW_25GBASE_LR: + case ICE_PHY_TYPE_LOW_25GBASE_KR: + case ICE_PHY_TYPE_LOW_25GBASE_KR_S: + case ICE_PHY_TYPE_LOW_25GBASE_KR1: + case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: + netdev_warn(netdev, "25G PHY type detected but this can't be reported to ethtool as your kernel is too old\n"); + break; + case ICE_PHY_TYPE_LOW_100GBASE_CR4: + case ICE_PHY_TYPE_LOW_100GBASE_SR4: + case ICE_PHY_TYPE_LOW_100GBASE_LR4: + case ICE_PHY_TYPE_LOW_100GBASE_KR4: + case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: + case ICE_PHY_TYPE_LOW_100G_CAUI4: + case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: + case ICE_PHY_TYPE_LOW_100G_AUI4: + case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: + case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: + case ICE_PHY_TYPE_LOW_100GBASE_CP2: + case ICE_PHY_TYPE_LOW_100GBASE_SR2: + case ICE_PHY_TYPE_LOW_100GBASE_DR: + netdev_warn(netdev, "100G PHY type detected but this can't be reported to ethtool as your kernel is too old\n"); + break; default: /* if we got here and link is up something bad is afoot */ - netdev_info(netdev, "WARNING: Link up but PhyType isn't recognized.\n"); - netdev_info(netdev, "WARNING: Unrecognized PHY_Low (0x%llx).\n", + netdev_info(netdev, "WARNING: Link is up but detected unrecognized phy_type_low 0x%llx\n", (u64)phy_types_low); } @@ -3691,9 +3553,15 @@ ice_get_legacy_settings_link_up(struct ethtool_cmd *ecmd, /* Set speed and duplex */ switch (hw_link_info->link_speed) { + case ICE_AQ_LINK_SPEED_100GB: + ethtool_cmd_speed_set(ecmd, SPEED_100000); + break; case ICE_AQ_LINK_SPEED_40GB: ethtool_cmd_speed_set(ecmd, SPEED_40000); break; + case ICE_AQ_LINK_SPEED_25GB: + ethtool_cmd_speed_set(ecmd, SPEED_25000); + break; case ICE_AQ_LINK_SPEED_10GB: ethtool_cmd_speed_set(ecmd, SPEED_10000); break; @@ -3751,13 +3619,12 @@ static int ice_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) struct ice_aqc_get_phy_caps_data *caps; struct ice_link_status *hw_link_info; struct ice_vsi *vsi = np->vsi; - enum ice_status status; bool link_up; + int status; hw_link_info = &vsi->port_info->phy.link_info; link_up = hw_link_info->link_info & ICE_AQ_LINK_UP; - /* set speed and duplex */ if (link_up) ice_get_legacy_settings_link_up(ecmd, netdev); @@ -3801,8 +3668,8 @@ static int ice_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) status = ice_aq_get_phy_caps(vsi->port_info, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL); if (status) { - dev_dbg(ice_pf_to_dev(vsi->back), "get PHY caps failed, status %s\n", - ice_stat_str(status)); + dev_dbg(ice_pf_to_dev(vsi->back), "get PHY caps failed, status %d\n", + status); kfree(caps); return -EIO; } @@ -3941,12 +3808,11 @@ static int ice_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) struct ethtool_cmd safe_ecmd; struct ice_port_info *p; u8 autoneg_changed = 0; - enum ice_status status; u64 phy_type_high = 0; u64 phy_type_low = 0; u32 advertise; - int err = 0; bool linkup; + int err; p = np->vsi->port_info; @@ -4000,12 +3866,10 @@ static int ice_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) return -ENOMEM; /* Get the current PHY config */ - status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, - abilities, NULL); - if (status) { - err = -EAGAIN; + err = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, + abilities, NULL); + if (err) goto done; - } /* Copy the current user PHY configuration. The current user PHY * configuration is initialized during probe from PHY capabilities @@ -4028,11 +3892,9 @@ static int ice_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) /* Call to get the current link speed */ p->phy.get_link_info = true; - status = ice_get_link_status(p, &linkup); - if (status) { - err = -EAGAIN; + err = ice_get_link_status(p, &linkup); + if (err) goto done; - } curr_link_speed = p->phy.link_info.link_speed; adv_link_speed = ice_legacy_find_adv_link_speed(advertise); @@ -4079,10 +3941,9 @@ static int ice_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) } /* make the AQ call */ - status = ice_aq_set_phy_cfg(&pf->hw, p, &config, NULL); - if (status) { + err = ice_aq_set_phy_cfg(&pf->hw, p, &config, NULL); + if (err) { netdev_info(netdev, "Set phy config failed,\n"); - err = -EAGAIN; goto done; } @@ -4222,9 +4083,9 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) { struct ice_pf *pf = vsi->back; struct ice_rss_hash_cfg cfg; - enum ice_status status; struct device *dev; u64 hashed_flds; + int status; u32 hdrs; dev = ice_pf_to_dev(pf); @@ -4254,8 +4115,8 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) cfg.symm = false; status = ice_add_rss_cfg(&pf->hw, vsi->idx, &cfg); if (status) { - dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %s\n", - vsi->vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n", + vsi->vsi_num, status); return -EINVAL; } @@ -4389,11 +4250,16 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, return ret; } +#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS static void ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, - struct kernel_ethtool_ringparam *kernel_ring, - struct netlink_ext_ack *extack) + struct kernel_ethtool_ringparam __always_unused *kernel_rp, + struct netlink_ext_ack __always_unused *extack) +#else /* HAVE_ETHTOOL_EXTENDED_RINGPARAMS */ +static void +ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) +#endif /* HAVE_ETHTOOL_EXTENDED_RINGPARAMS */ { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; @@ -4410,11 +4276,16 @@ ice_get_ringparam(struct net_device *netdev, ring->rx_jumbo_pending = 0; } +#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS static int ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, - struct kernel_ethtool_ringparam *kernel_ring, - struct netlink_ext_ack *extack) + struct kernel_ethtool_ringparam __always_unused *kernel_rp, + struct netlink_ext_ack __always_unused *extack) +#else /* HAVE_ETHTOOL_EXTENDED_RINGPARAMS */ +static int +ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) +#endif /* HAVE_ETHTOOL_EXTENDED_RINGPARAMS */ { struct ice_ring *tx_rings = NULL, *rx_rings = NULL; struct ice_netdev_priv *np = netdev_priv(netdev); @@ -4505,6 +4376,7 @@ ice_set_ringparam(struct net_device *netdev, tx_rings[i].count = new_tx_cnt; tx_rings[i].desc = NULL; tx_rings[i].tx_buf = NULL; + tx_rings[i].tx_tstamps = &pf->ptp.port.tx; err = ice_setup_tx_ring(&tx_rings[i]); if (err) { while (i--) @@ -4563,6 +4435,7 @@ process_rx: /* clone ring and setup updated count */ rx_rings[i] = *vsi->rx_rings[i]; rx_rings[i].count = new_rx_cnt; + rx_rings[i].cached_phctime = pf->ptp.cached_phc_time; rx_rings[i].desc = NULL; rx_rings[i].rx_buf = NULL; /* this is to allow wr32 to have something to write to @@ -4669,7 +4542,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) struct ice_port_info *pi = np->vsi->port_info; struct ice_aqc_get_phy_caps_data *pcaps; struct ice_dcbx_cfg *dcbx_cfg; - enum ice_status status; + int status; /* Initialize pause params */ pause->rx_pause = 0; @@ -4770,11 +4643,10 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) struct ice_vsi *vsi = np->vsi; struct ice_hw *hw = &pf->hw; struct ice_port_info *pi; - enum ice_status status; u8 aq_failures; bool link_up; - int err = 0; u32 is_an; + int err; pi = vsi->port_info; hw_link_info = &pi->phy.link_info; @@ -4801,11 +4673,11 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) return -ENOMEM; /* Get current PHY config */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, - NULL); - if (status) { + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, + NULL); + if (err) { kfree(pcaps); - return -EIO; + return err; } is_an = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE : @@ -4847,22 +4719,19 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) return -EINVAL; /* Set the FC mode and only restart AN if link is up */ - status = ice_set_fc(pi, &aq_failures, link_up); + err = ice_set_fc(pi, &aq_failures, link_up); if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) { - netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); err = -EAGAIN; } else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) { - netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); err = -EAGAIN; } else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) { - netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); err = -EAGAIN; } @@ -5000,8 +4869,7 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key) #ifdef NETIF_F_HW_TC if (ice_is_adq_active(pf)) { - netdev_err(netdev, - "Cannot change RSS params with ADQ configured.\n"); + netdev_err(netdev, "Cannot change RSS params with ADQ configured.\n"); return -EOPNOTSUPP; } @@ -5078,7 +4946,7 @@ ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) */ static int ice_get_max_txq(struct ice_pf *pf) { - return min3(pf->num_lan_msix, (u16)num_online_cpus(), + return min3(pf->num_lan_msix, pf->max_qps, (u16)pf->hw.func_caps.common_cap.num_txq); } @@ -5088,7 +4956,7 @@ static int ice_get_max_txq(struct ice_pf *pf) */ static int ice_get_max_rxq(struct ice_pf *pf) { - return min3(pf->num_lan_msix, (u16)num_online_cpus(), + return min3(pf->num_lan_msix, pf->max_qps, (u16)pf->hw.func_caps.common_cap.num_rxq); } @@ -5108,7 +4976,7 @@ static u32 ice_get_combined_cnt(struct ice_vsi *vsi) struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; if (q_vector->rx.ring && q_vector->tx.ring) - combined++; + combined += q_vector->num_ring_rx; } return combined; @@ -5135,7 +5003,7 @@ ice_get_channels(struct net_device *dev, struct ethtool_channels *ch) ch->combined_count = ice_get_combined_cnt(vsi); ch->rx_count = vsi->num_rxq - ch->combined_count; ch->tx_count = vsi->num_txq - ch->combined_count; -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS if (test_bit(ICE_FLAG_MACVLAN_ENA, pf->flags)) { /* L2 forwarding devices are single queue so we infer one @@ -5144,67 +5012,13 @@ ice_get_channels(struct net_device *dev, struct ethtool_channels *ch) ch->max_combined += pf->max_num_macvlan; ch->combined_count += pf->num_macvlan; } -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ /* report other queues */ ch->other_count = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; ch->max_other = ch->other_count; } -/** - * ice_get_valid_rss_size - return valid number of RSS queues - * @hw: pointer to the HW structure - * @new_size: requested RSS queues - */ -static int ice_get_valid_rss_size(struct ice_hw *hw, int new_size) -{ - struct ice_hw_common_caps *caps = &hw->func_caps.common_cap; - - return min_t(int, new_size, BIT(caps->rss_table_entry_width)); -} - -/** - * ice_vsi_set_dflt_rss_lut - set default RSS LUT with requested RSS size - * @vsi: VSI to reconfigure RSS LUT on - * @req_rss_size: requested range of queue numbers for hashing - * - * Set the VSI's RSS parameters, configure the RSS LUT based on these. - */ -static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size) -{ - struct ice_pf *pf = vsi->back; - struct device *dev; - struct ice_hw *hw; - int err; - u8 *lut; - - dev = ice_pf_to_dev(pf); - hw = &pf->hw; - - if (!req_rss_size) - return -EINVAL; - - lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); - if (!lut) - return -ENOMEM; - - /* set RSS LUT parameters */ - if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) - vsi->rss_size = 1; - else - vsi->rss_size = ice_get_valid_rss_size(hw, req_rss_size); - - /* create/set RSS LUT */ - ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); - err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size); - if (err) - dev_err(dev, "Cannot set RSS lut, err %d aq_err %s\n", err, - ice_aq_str(hw->adminq.sq_last_status)); - - kfree(lut); - return err; -} - /** * ice_set_channels - set the number channels * @dev: network interface device structure @@ -5217,6 +5031,7 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) struct ice_pf *pf = vsi->back; int new_rx = 0, new_tx = 0; u32 curr_combined; + int err; /* do not support changing channels in Safe Mode */ if (ice_is_safe_mode(pf)) { @@ -5233,12 +5048,12 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) return -EOPNOTSUPP; } #endif /* NETIF_F_HW_TC */ -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS if (test_bit(ICE_FLAG_MACVLAN_ENA, pf->flags)) { netdev_err(dev, "Cannot set channels when L2 forwarding enabled\n"); return -EOPNOTSUPP; } -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ if (test_bit(ICE_FLAG_FD_ENA, pf->flags) && pf->hw.fdir_active_fltr) { netdev_err(dev, "Cannot set channels when Flow Director filters are active\n"); @@ -5267,6 +5082,16 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) new_rx = ch->combined_count + ch->rx_count; new_tx = ch->combined_count + ch->tx_count; + if (new_rx < vsi->tc_cfg.numtc) { + netdev_err(dev, "Cannot set less Rx channels, than Traffic Classes you have (%u)\n", + vsi->tc_cfg.numtc); + return -EINVAL; + } + if (new_tx < vsi->tc_cfg.numtc) { + netdev_err(dev, "Cannot set less Tx channels, than Traffic Classes you have (%u)\n", + vsi->tc_cfg.numtc); + return -EINVAL; + } if (new_rx > ice_get_max_rxq(pf)) { netdev_err(dev, "Maximum allowed Rx channels is %d\n", ice_get_max_rxq(pf)); @@ -5278,16 +5103,19 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) return -EINVAL; } - ice_vsi_recfg_qs(vsi, new_rx, new_tx); + err = ice_vsi_recfg_qs(vsi, new_rx, new_tx); + if (err) + goto channels_out; #ifdef IFF_RXFH_CONFIGURED - if (!netif_is_rxfh_configured(dev)) - return ice_vsi_set_dflt_rss_lut(vsi, new_rx); + err = netif_is_rxfh_configured(dev); + if (!err) { + err = ice_vsi_set_dflt_rss_lut(vsi, new_rx); + goto channels_out; + } /* Update rss_size due to change in Rx queues */ vsi->rss_size = ice_get_valid_rss_size(&pf->hw, new_rx); - - return 0; #else /* Clear the previous vsi->rss_lut_user because it is assumed to * be invalid at this point. @@ -5298,8 +5126,12 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) vsi->rss_lut_user = NULL; } - return ice_vsi_set_dflt_rss_lut(vsi, new_rx); + err = ice_vsi_set_dflt_rss_lut(vsi, new_rx); #endif /* IFF_RXFH_CONFIGURED */ +channels_out: + set_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags); + set_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags); + return err; } /** @@ -5456,10 +5288,15 @@ __ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, return 0; } +#ifdef HAVE_ETHTOOL_COALESCE_EXTACK static int ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, - struct kernel_ethtool_coalesce *kernel_coal, - struct netlink_ext_ack *extack) + struct kernel_ethtool_coalesce __maybe_unused *kec, + struct netlink_ext_ack __maybe_unused *extack) +#else +static int +ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) +#endif /* HAVE_ETHTOOL_COALESCE_EXTACK */ { return __ice_get_coalesce(netdev, ec, -1); } @@ -5514,11 +5351,8 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, c_type_str); return -EINVAL; } - if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl) { + if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl) rc->ring->q_vector->intrl = ec->rx_coalesce_usecs_high; - ice_write_intrl(rc->ring->q_vector, - ec->rx_coalesce_usecs_high); - } use_adaptive_coalesce = ec->use_adaptive_rx_coalesce; coalesce_usecs = ec->rx_coalesce_usecs; @@ -5700,6 +5534,8 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, if (ice_set_q_coalesce(vsi, ec, v_idx)) return -EINVAL; + + ice_set_q_vector_intrl(vsi->q_vectors[v_idx]); } goto set_complete; } @@ -5707,14 +5543,21 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, if (ice_set_q_coalesce(vsi, ec, q_num)) return -EINVAL; + ice_set_q_vector_intrl(vsi->q_vectors[q_num]); + set_complete: return 0; } +#ifdef HAVE_ETHTOOL_COALESCE_EXTACK static int ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, - struct kernel_ethtool_coalesce *kernel_coal, - struct netlink_ext_ack *extack) + struct kernel_ethtool_coalesce __maybe_unused *kec, + struct netlink_ext_ack __maybe_unused *extack) +#else +static int +ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) +#endif /* HAVE_ETHTOOL_COALESCE_EXTACK */ { return __ice_set_coalesce(netdev, ec, -1); } @@ -5757,18 +5600,24 @@ ice_repr_is_coalesce_param_invalid(struct ethtool_coalesce *ec) } #endif /* !ETHTOOL_COALESCE_USECS */ +#ifdef HAVE_ETHTOOL_COALESCE_EXTACK /** * ice_repr_set_coalesce - set coalesce settings for all queues * @netdev: pointer to the netdev associated with this query * @ec: ethtool structure to read the requested coalesce settings + * @kec: kernel coalesce parameter + * @extack: kernel extack parameter * * Return 0 on success, negative otherwise. */ static int -ice_repr_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec, - struct kernel_ethtool_coalesce *kernel_coal, - struct netlink_ext_ack *extack) +ice_repr_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce __maybe_unused *kec, + struct netlink_ext_ack __maybe_unused *extack) +#else +static int +ice_repr_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) +#endif /* HAVE_ETHTOOL_COALESCE_EXTACK */ { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; @@ -5798,10 +5647,13 @@ ice_repr_set_coalesce(struct net_device *netdev, return 0; } +#ifdef HAVE_ETHTOOL_COALESCE_EXTACK /** * ice_repr_get_coalesce - get coalesce settings * @netdev: pointer to the netdev associated with this query * @ec: ethtool structure to read the requested coalesce settings + * @kec: kernel coalesce parameter + * @extack: kernel extack parameter * * Since all queues have the same Rx coalesce high settings, * read the value from te first queue. @@ -5809,10 +5661,13 @@ ice_repr_set_coalesce(struct net_device *netdev, * Return 0 on success, negative otherwise. */ static int -ice_repr_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec, - struct kernel_ethtool_coalesce *kernel_coal, - struct netlink_ext_ack *extack) +ice_repr_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce __maybe_unused *kec, + struct netlink_ext_ack __maybe_unused *extack) +#else +static int +ice_repr_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) +#endif /* HAVE_ETHTOOL_COALESCE_EXTACK */ { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; @@ -5825,6 +5680,54 @@ ice_repr_get_coalesce(struct net_device *netdev, return 0; } +static void +ice_repr_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct ice_repr *repr = ice_netdev_to_repr(netdev); + + if (ice_check_vf_ready_for_cfg(repr->vf)) + return; + + __ice_get_drvinfo(netdev, drvinfo, repr->src_vsi); +} + +static void +ice_repr_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct ice_repr *repr = ice_netdev_to_repr(netdev); + + /* for port representors only ETH_SS_STATS is supported */ + if (ice_check_vf_ready_for_cfg(repr->vf) || + stringset != ETH_SS_STATS) + return; + + __ice_get_strings(netdev, stringset, data, repr->src_vsi); +} + +static void +ice_repr_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + struct ice_repr *repr = ice_netdev_to_repr(netdev); + + if (ice_check_vf_ready_for_cfg(repr->vf)) + return; + + __ice_get_ethtool_stats(netdev, stats, data, repr->src_vsi); +} + +static int ice_repr_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ICE_VSI_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + #ifdef ETHTOOL_GMODULEINFO #define ICE_I2C_EEPROM_DEV_ADDR 0xA0 #define ICE_I2C_EEPROM_DEV_ADDR2 0xA2 @@ -5851,16 +5754,16 @@ ice_get_module_info(struct net_device *netdev, struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - enum ice_status status; u8 sff8472_comp = 0; u8 sff8472_swap = 0; u8 sff8636_rev = 0; u8 value = 0; + int status; status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 0x00, 0x00, 0, &value, 1, 0, NULL); if (status) - return -EIO; + return status; switch (value) { case ICE_MODULE_TYPE_SFP: @@ -5868,12 +5771,12 @@ ice_get_module_info(struct net_device *netdev, ICE_MODULE_SFF_8472_COMP, 0x00, 0, &sff8472_comp, 1, 0, NULL); if (status) - return -EIO; + return status; status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, ICE_MODULE_SFF_8472_SWAP, 0x00, 0, &sff8472_swap, 1, 0, NULL); if (status) - return -EIO; + return status; if (sff8472_swap & ICE_MODULE_SFF_ADDR_MODE) { modinfo->type = ETH_MODULE_SFF_8079; @@ -5893,7 +5796,7 @@ ice_get_module_info(struct net_device *netdev, ICE_MODULE_REVISION_ADDR, 0x00, 0, &sff8636_rev, 1, 0, NULL); if (status) - return -EIO; + return status; /* Check revision compliance */ if (sff8636_rev > 0x02) { /* Module is SFF-8636 compliant */ @@ -5928,11 +5831,11 @@ ice_get_module_eeprom(struct net_device *netdev, struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - enum ice_status status; bool is_sfp = false; unsigned int i, j; u16 offset = 0; u8 page = 0; + int status; if (!ee || !ee->len || !data) return -EINVAL; @@ -5940,7 +5843,7 @@ ice_get_module_eeprom(struct net_device *netdev, status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, value, 1, 0, NULL); if (status) - return -EIO; + return status; if (value[0] == ICE_MODULE_TYPE_SFP) is_sfp = true; @@ -5964,7 +5867,7 @@ ice_get_module_eeprom(struct net_device *netdev, } } - /* Bit 2 of eeprom address 0x02 declares upper + /* Bit 2 of EEPROM address 0x02 declares upper * pages are disabled on QSFP modules. * SFP modules only ever use page 0. */ @@ -6107,7 +6010,6 @@ void ice_set_ethtool_safe_mode_ops(struct net_device *netdev) netdev->ethtool_ops = &ice_ethtool_safe_mode_ops; } - static const struct ethtool_ops ice_ethtool_repr_ops = { #ifdef ETHTOOL_COALESCE_USECS .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS_HIGH, @@ -6116,9 +6018,9 @@ static const struct ethtool_ops ice_ethtool_repr_ops = { .set_coalesce = ice_repr_set_coalesce, .get_drvinfo = ice_repr_get_drvinfo, .get_link = ethtool_op_get_link, - .get_strings = ice_get_strings, - .get_ethtool_stats = ice_get_ethtool_stats, - .get_sset_count = ice_get_sset_count, + .get_strings = ice_repr_get_strings, + .get_ethtool_stats = ice_repr_get_ethtool_stats, + .get_sset_count = ice_repr_get_sset_count, }; /** diff --git a/drivers/thirdparty/ice/ice_ethtool.h b/drivers/thirdparty/ice/ice_ethtool.h index 3b167694aa18..ad238a928f13 100644 --- a/drivers/thirdparty/ice/ice_ethtool.h +++ b/drivers/thirdparty/ice/ice_ethtool.h @@ -136,7 +136,7 @@ struct ice_stats { #define ICE_RXQ_NAPI_POLL PICK("rx_queue_%u_napi_poll_cnt", "rx_q-%u_napi_poll_count") #endif /* ICE_ADD_PROBES */ -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS #ifdef ICE_ADD_PROBES /* macvlan stats */ #define L2_FWD_TX_PKTS1 PICK("l2-fwd-%s-tx_pkts", "tx-l2-forward_q-%s_pkts") @@ -148,7 +148,7 @@ struct ice_stats { #define L2_FWD_RX_PKTS2 PICK("l2-fwd-%i-rx_pkts", "rx-l2-forward_q-%i_pkts") #define L2_FWD_RX_BYTES2 PICK("l2-fwd-%i-rx_bytes", "rx-l2-forward_q-%i_bytes") #endif /* ICE_ADD_PROBES */ -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ #ifdef ADQ_PERF_COUNTERS /* ADQ stats */ @@ -193,4 +193,334 @@ struct ice_stats { #define ICE_RXQ_KEEP_STATE_BP_BUDGET8 PICK("rx_%u.keep_state_bp_budget8", "rx_q-%u_keep_state_bp_budget8") #define ICE_RXQ_KEEP_STATE_BP_BUDGET64 PICK("rx_%u.keep_state_bp_budget64", "rx_q-%u_keep_state_bp_budget64") #endif /* ADQ_PERF_COUNTERS */ + +/* PTP stats */ +#define ICE_TX_HWTSTAMP_SKIPPED "tx_hwtstamp_skipped" +#define ICE_TX_HWTSTAMP_TIMEOUTS "tx_hwtstamp_timeouts" +#define ICE_TX_HWTSTAMP_FLUSHED "tx_hwtstamp_flushed" +#define ICE_TX_HWTSTAMP_DISCARDED "tx_hwtstamp_discarded" +#define ICE_LATE_CACHED_PHC_UPDATES "late_cached_phc_updates" + +struct ice_phy_type_to_ethtool { + u64 aq_link_speed; + enum ethtool_link_mode_bit_indices link_mode; + bool ethtool_link_mode_supported; + u8 phy_type_idx; +}; + +/* Macro to make PHY type to ethtool link mode table entry. + * The index is the PHY type. + */ +#define ICE_PHY_TYPE(PHY_TYPE_IDX, LINK_SPEED, ETHTOOL_LINK_MODE) \ + { ICE_AQ_LINK_SPEED_ ## LINK_SPEED, \ + ETHTOOL_LINK_MODE_ ## ETHTOOL_LINK_MODE ## _BIT, \ + true, \ + PHY_TYPE_IDX } + +/* PHY types that do not have a supported ethtool link mode are initialized as: + * { false, PHY_TYPE_IDX, ICE_AQ_LINK_SPEED_UNKNOWN , 0 } + */ +#define ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(PHY_TYPE_IDX) \ + { ICE_AQ_LINK_SPEED_UNKNOWN, \ + (enum ethtool_link_mode_bit_indices)0, \ + false, \ + PHY_TYPE_IDX } + +#define ICE_PHY_TYPE_LOW_SIZE (ICE_PHY_TYPE_LOW_MAX_INDEX + 1) + +/* Lookup table mapping PHY type low to link speed and ethtool link modes */ +static +struct ice_phy_type_to_ethtool phy_type_low_lkup[ICE_PHY_TYPE_LOW_SIZE] = { + /* ICE_PHY_TYPE_LOW_100BASE_TX */ + ICE_PHY_TYPE(0, 100MB, 100baseT_Full), + /* ICE_PHY_TYPE_LOW_100M_SGMII */ + ICE_PHY_TYPE(1, 100MB, 100baseT_Full), + /* ICE_PHY_TYPE_LOW_1000BASE_T */ + ICE_PHY_TYPE(2, 1000MB, 1000baseT_Full), +#ifdef HAVE_ETHTOOL_NEW_1G_BITS + /* ICE_PHY_TYPE_LOW_1000BASE_SX */ + ICE_PHY_TYPE(3, 1000MB, 1000baseX_Full), + /* ICE_PHY_TYPE_LOW_1000BASE_LX */ + ICE_PHY_TYPE(4, 1000MB, 1000baseX_Full), +#else + /* ICE_PHY_TYPE_LOW_1000BASE_SX */ + ICE_PHY_TYPE(3, 1000MB, 1000baseT_Full), + /* ICE_PHY_TYPE_LOW_1000BASE_LX */ + ICE_PHY_TYPE(4, 1000MB, 1000baseT_Full), +#endif /* HAVE_ETHTOOL_NEW_1G_BITS */ + /* ICE_PHY_TYPE_LOW_1000BASE_KX */ + ICE_PHY_TYPE(5, 1000MB, 1000baseKX_Full), + /* ICE_PHY_TYPE_LOW_1G_SGMII */ + ICE_PHY_TYPE(6, 1000MB, 1000baseT_Full), +#ifdef HAVE_ETHTOOL_NEW_2500MB_BITS + /* ICE_PHY_TYPE_LOW_2500BASE_T */ + ICE_PHY_TYPE(7, 2500MB, 2500baseT_Full), +#else + /* ICE_PHY_TYPE_LOW_2500BASE_T */ + ICE_PHY_TYPE(7, 2500MB, 2500baseX_Full), +#endif /* HAVE_ETHTOOL_NEW_2500MB_BITS */ + /* ICE_PHY_TYPE_LOW_2500BASE_X */ + ICE_PHY_TYPE(8, 2500MB, 2500baseX_Full), + /* ICE_PHY_TYPE_LOW_2500BASE_KX */ + ICE_PHY_TYPE(9, 2500MB, 2500baseX_Full), +#ifdef HAVE_ETHTOOL_5G_BITS + /* ICE_PHY_TYPE_LOW_5GBASE_T */ + ICE_PHY_TYPE(10, 5GB, 5000baseT_Full), + /* ICE_PHY_TYPE_LOW_5GBASE_KR */ + ICE_PHY_TYPE(11, 5GB, 5000baseT_Full), +#else /* HAVE_ETHTOOL_5G_BITS */ + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(10), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(11), +#endif /* HAVE_ETHTOOL_5G_BITS */ + /* ICE_PHY_TYPE_LOW_10GBASE_T */ + ICE_PHY_TYPE(12, 10GB, 10000baseT_Full), +#ifdef HAVE_ETHTOOL_NEW_10G_BITS + /* ICE_PHY_TYPE_LOW_10G_SFI_DA */ + ICE_PHY_TYPE(13, 10GB, 10000baseCR_Full), + /* ICE_PHY_TYPE_LOW_10GBASE_SR */ + ICE_PHY_TYPE(14, 10GB, 10000baseSR_Full), + /* ICE_PHY_TYPE_LOW_10GBASE_LR */ + ICE_PHY_TYPE(15, 10GB, 10000baseLR_Full), +#else + /* ICE_PHY_TYPE_LOW_10G_SFI_DA */ + ICE_PHY_TYPE(13, 10GB, 10000baseT_Full), + /* ICE_PHY_TYPE_LOW_10GBASE_SR */ + ICE_PHY_TYPE(14, 10GB, 10000baseT_Full), + /* ICE_PHY_TYPE_LOW_10GBASE_LR */ + ICE_PHY_TYPE(15, 10GB, 10000baseT_Full), +#endif /* HAVE_ETHTOOL_NEW_10G_BITS */ + /* ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 */ + ICE_PHY_TYPE(16, 10GB, 10000baseKR_Full), +#ifdef HAVE_ETHTOOL_NEW_10G_BITS + /* ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC */ + ICE_PHY_TYPE(17, 10GB, 10000baseCR_Full), +#else + /* ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC */ + ICE_PHY_TYPE(17, 10GB, 10000baseT_Full), +#endif + /* ICE_PHY_TYPE_LOW_10G_SFI_C2C */ + ICE_PHY_TYPE(18, 10GB, 10000baseKR_Full), +#ifdef HAVE_ETHTOOL_25G_BITS + /* ICE_PHY_TYPE_LOW_25GBASE_T */ + ICE_PHY_TYPE(19, 25GB, 25000baseCR_Full), + /* ICE_PHY_TYPE_LOW_25GBASE_CR */ + ICE_PHY_TYPE(20, 25GB, 25000baseCR_Full), + /* ICE_PHY_TYPE_LOW_25GBASE_CR_S */ + ICE_PHY_TYPE(21, 25GB, 25000baseCR_Full), + /* ICE_PHY_TYPE_LOW_25GBASE_CR1 */ + ICE_PHY_TYPE(22, 25GB, 25000baseCR_Full), + /* ICE_PHY_TYPE_LOW_25GBASE_SR */ + ICE_PHY_TYPE(23, 25GB, 25000baseSR_Full), + /* ICE_PHY_TYPE_LOW_25GBASE_LR */ + ICE_PHY_TYPE(24, 25GB, 25000baseSR_Full), + /* ICE_PHY_TYPE_LOW_25GBASE_KR */ + ICE_PHY_TYPE(25, 25GB, 25000baseKR_Full), + /* ICE_PHY_TYPE_LOW_25GBASE_KR_S */ + ICE_PHY_TYPE(26, 25GB, 25000baseKR_Full), + /* ICE_PHY_TYPE_LOW_25GBASE_KR1 */ + ICE_PHY_TYPE(27, 25GB, 25000baseKR_Full), + /* ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC */ + ICE_PHY_TYPE(28, 25GB, 25000baseSR_Full), + /* ICE_PHY_TYPE_LOW_25G_AUI_C2C */ + ICE_PHY_TYPE(29, 25GB, 25000baseCR_Full), +#else /* HAVE_ETHTOOL_25G_BITS */ + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(19), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(20), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(21), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(22), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(23), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(24), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(25), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(26), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(27), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(28), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(29), +#endif /* HAVE_ETHTOOL_25G_BITS */ + /* ICE_PHY_TYPE_LOW_40GBASE_CR4 */ + ICE_PHY_TYPE(30, 40GB, 40000baseCR4_Full), + /* ICE_PHY_TYPE_LOW_40GBASE_SR4 */ + ICE_PHY_TYPE(31, 40GB, 40000baseSR4_Full), + /* ICE_PHY_TYPE_LOW_40GBASE_LR4 */ + ICE_PHY_TYPE(32, 40GB, 40000baseLR4_Full), + /* ICE_PHY_TYPE_LOW_40GBASE_KR4 */ + ICE_PHY_TYPE(33, 40GB, 40000baseKR4_Full), + /* ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC */ + ICE_PHY_TYPE(34, 40GB, 40000baseSR4_Full), + /* ICE_PHY_TYPE_LOW_40G_XLAUI */ + ICE_PHY_TYPE(35, 40GB, 40000baseCR4_Full), +#ifdef HAVE_ETHTOOL_50G_BITS + /* ICE_PHY_TYPE_LOW_50GBASE_CR2 */ + ICE_PHY_TYPE(36, 50GB, 50000baseCR2_Full), +#else + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(36), +#endif /* HAVE_ETHTOOL_50G_BITS */ +#ifdef HAVE_ETHTOOL_NEW_50G_BITS + /* ICE_PHY_TYPE_LOW_50GBASE_SR2 */ + ICE_PHY_TYPE(37, 50GB, 50000baseSR2_Full), + /* ICE_PHY_TYPE_LOW_50GBASE_LR2 */ + ICE_PHY_TYPE(38, 50GB, 50000baseSR2_Full), +#else + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(37), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(38), +#endif /* HAVE_ETHTOOL_NEW_50G_BITS */ +#ifdef HAVE_ETHTOOL_50G_BITS + /* ICE_PHY_TYPE_LOW_50GBASE_KR2 */ + ICE_PHY_TYPE(39, 50GB, 50000baseKR2_Full), +#ifdef HAVE_ETHTOOL_NEW_50G_BITS + /* ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC */ + ICE_PHY_TYPE(40, 50GB, 50000baseSR2_Full), +#else + /* ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC */ + ICE_PHY_TYPE(40, 50GB, 50000baseCR2_Full), +#endif /* HAVE_ETHTOOL_NEW_50G_BITS */ + /* ICE_PHY_TYPE_LOW_50G_LAUI2 */ + ICE_PHY_TYPE(41, 50GB, 50000baseCR2_Full), +#ifdef HAVE_ETHTOOL_NEW_50G_BITS + /* ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC */ + ICE_PHY_TYPE(42, 50GB, 50000baseSR2_Full), +#else + /* ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC */ + ICE_PHY_TYPE(42, 50GB, 50000baseCR2_Full), +#endif + /* ICE_PHY_TYPE_LOW_50G_AUI2 */ + ICE_PHY_TYPE(43, 50GB, 50000baseCR2_Full), +#ifdef HAVE_ETHTOOL_200G_BITS + /* ICE_PHY_TYPE_LOW_50GBASE_CP */ + ICE_PHY_TYPE(44, 50GB, 50000baseCR_Full), + /* ICE_PHY_TYPE_LOW_50GBASE_SR */ + ICE_PHY_TYPE(45, 50GB, 50000baseSR_Full), +#else + /* ICE_PHY_TYPE_LOW_50GBASE_CP */ + ICE_PHY_TYPE(44, 50GB, 50000baseCR2_Full), + /* ICE_PHY_TYPE_LOW_50GBASE_SR */ + ICE_PHY_TYPE(45, 50GB, 50000baseCR2_Full), +#endif /* HAVE_ETHTOOL_200G_BITS */ +#else /* HAVE_ETHTOOL_50G_BITS */ + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(39), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(40), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(41), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(42), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(43), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(44), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(45), +#endif /* HAVE_ETHTOOL_50G_BITS */ +#ifdef HAVE_ETHTOOL_NEW_50G_BITS +#ifdef HAVE_ETHTOOL_200G_BITS + /* ICE_PHY_TYPE_LOW_50GBASE_FR */ + ICE_PHY_TYPE(46, 50GB, 50000baseLR_ER_FR_Full), + /* ICE_PHY_TYPE_LOW_50GBASE_LR */ + ICE_PHY_TYPE(47, 50GB, 50000baseLR_ER_FR_Full), +#else + /* ICE_PHY_TYPE_LOW_50GBASE_FR */ + ICE_PHY_TYPE(46, 50GB, 50000baseSR2_Full), + /* ICE_PHY_TYPE_LOW_50GBASE_LR */ + ICE_PHY_TYPE(47, 50GB, 50000baseSR2_Full), +#endif /* HAVE_ETHTOOL_200G_BITS */ +#else + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(46), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(47), +#endif /* HAVE_ETHTOOL_NEW_50G_BITS */ +#ifdef HAVE_ETHTOOL_50G_BITS +#ifdef HAVE_ETHTOOL_200G_BITS + /* ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4 */ + ICE_PHY_TYPE(48, 50GB, 50000baseKR_Full), + /* ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC */ + ICE_PHY_TYPE(49, 50GB, 50000baseSR_Full), + /* ICE_PHY_TYPE_LOW_50G_AUI1 */ + ICE_PHY_TYPE(50, 50GB, 50000baseCR_Full), +#else + /* ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4 */ + ICE_PHY_TYPE(48, 50GB, 50000baseKR2_Full), + /* ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC */ + ICE_PHY_TYPE(49, 50GB, 50000baseCR2_Full), + /* ICE_PHY_TYPE_LOW_50G_AUI1 */ + ICE_PHY_TYPE(50, 50GB, 50000baseCR2_Full), +#endif /* HAVE_ETHTOOL_200G_BITS */ +#else + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(48), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(49), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(50), +#endif /* HAVE_ETHTOOL_50G_BITS */ +#ifdef HAVE_ETHTOOL_100G_BITS + /* ICE_PHY_TYPE_LOW_100GBASE_CR4 */ + ICE_PHY_TYPE(51, 100GB, 100000baseCR4_Full), + /* ICE_PHY_TYPE_LOW_100GBASE_SR4 */ + ICE_PHY_TYPE(52, 100GB, 100000baseSR4_Full), + /* ICE_PHY_TYPE_LOW_100GBASE_LR4 */ + ICE_PHY_TYPE(53, 100GB, 100000baseLR4_ER4_Full), + /* ICE_PHY_TYPE_LOW_100GBASE_KR4 */ + ICE_PHY_TYPE(54, 100GB, 100000baseKR4_Full), + /* ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC */ + ICE_PHY_TYPE(55, 100GB, 100000baseCR4_Full), + /* ICE_PHY_TYPE_LOW_100G_CAUI4 */ + ICE_PHY_TYPE(56, 100GB, 100000baseCR4_Full), + /* ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC */ + ICE_PHY_TYPE(57, 100GB, 100000baseSR4_Full), + /* ICE_PHY_TYPE_LOW_100G_AUI4 */ + ICE_PHY_TYPE(58, 100GB, 100000baseCR4_Full), + /* ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 */ + ICE_PHY_TYPE(59, 100GB, 100000baseCR4_Full), + /* ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 */ + ICE_PHY_TYPE(60, 100GB, 100000baseKR4_Full), +#ifdef HAVE_ETHTOOL_NEW_100G_BITS + /* ICE_PHY_TYPE_LOW_100GBASE_CP2 */ + ICE_PHY_TYPE(61, 100GB, 100000baseCR2_Full), + /* ICE_PHY_TYPE_LOW_100GBASE_SR2 */ + ICE_PHY_TYPE(62, 100GB, 100000baseSR2_Full), +#else + /* ICE_PHY_TYPE_LOW_100GBASE_CP2 */ + ICE_PHY_TYPE(61, 100GB, 100000baseCR4_Full), + /* ICE_PHY_TYPE_LOW_100GBASE_SR2 */ + ICE_PHY_TYPE(62, 100GB, 100000baseSR4_Full), +#endif /* HAVE_ETHTOOL_NEW_100G_BITS */ + /* ICE_PHY_TYPE_LOW_100GBASE_DR */ + ICE_PHY_TYPE(63, 100GB, 100000baseLR4_ER4_Full), +#else /* HAVE_ETHTOOL_100G_BITS */ + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(51), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(52), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(53), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(54), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(55), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(56), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(57), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(58), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(59), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(60), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(61), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(62), + ICE_PHY_TYPE_ETHTOOL_UNSUPPORTED(63), +#endif /* HAVE_ETHTOOL_100G_BITS */ +}; + +#ifdef HAVE_ETHTOOL_100G_BITS +#define ICE_PHY_TYPE_HIGH_SIZE (ICE_PHY_TYPE_HIGH_MAX_INDEX + 1) + +/* Lookup table mapping PHY type high to link speed and ethtool link modes */ +static +struct ice_phy_type_to_ethtool phy_type_high_lkup[ICE_PHY_TYPE_HIGH_SIZE] = { +#ifdef HAVE_ETHTOOL_NEW_100G_BITS + /* ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 */ + ICE_PHY_TYPE(0, 100GB, 100000baseKR2_Full), + /* ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC */ + ICE_PHY_TYPE(1, 100GB, 100000baseSR2_Full), + /* ICE_PHY_TYPE_HIGH_100G_CAUI2 */ + ICE_PHY_TYPE(2, 100GB, 100000baseCR2_Full), + /* ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC */ + ICE_PHY_TYPE(3, 100GB, 100000baseSR2_Full), + /* ICE_PHY_TYPE_HIGH_100G_AUI2 */ + ICE_PHY_TYPE(4, 100GB, 100000baseCR2_Full), +#else + /* ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 */ + ICE_PHY_TYPE(0, 100GB, 100000baseKR4_Full), + /* ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC */ + ICE_PHY_TYPE(1, 100GB, 100000baseCR4_Full), + /* ICE_PHY_TYPE_HIGH_100G_CAUI2 */ + ICE_PHY_TYPE(2, 100GB, 100000baseCR4_Full), + /* ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC */ + ICE_PHY_TYPE(3, 100GB, 100000baseCR4_Full), + /* ICE_PHY_TYPE_HIGH_100G_AUI2 */ + ICE_PHY_TYPE(4, 100GB, 100000baseCR4_Full), +#endif /* HAVE_ETHTOOL_NEW_100G_BITS */ +}; +#endif /* HAVE_ETHTOOL_100G_BITS */ #endif /* !_ICE_ETHTOOL_H_ */ diff --git a/drivers/thirdparty/ice/ice_ethtool_fdir.c b/drivers/thirdparty/ice/ice_ethtool_fdir.c index 87d09c332eac..47755736ac55 100644 --- a/drivers/thirdparty/ice/ice_ethtool_fdir.c +++ b/drivers/thirdparty/ice/ice_ethtool_fdir.c @@ -216,7 +216,6 @@ int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd) else fsp->ring_cookie = rule->orig_q_index; - idx = ice_ethtool_flow_to_fltr(fsp->flow_type); if (idx == ICE_FLTR_PTYPE_NONF_NONE) { dev_err(ice_hw_to_dev(hw), "Missing input index for flow_type %d\n", @@ -308,13 +307,13 @@ ice_fdir_remap_entries(struct ice_fd_hw_prof *prof, int tun, int idx) } /** - * ice_fdir_rem_adq_chnl - remove a ADQ channel from HW filter rules + * ice_fdir_rem_adq_chnl - remove an ADQ channel from HW filter rules * @hw: hardware structure containing filter list * @vsi_idx: VSI handle */ void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx) { - enum ice_status status; + int status; int flow; if (!hw->fdir_prof) @@ -324,7 +323,7 @@ void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx) struct ice_fd_hw_prof *prof = hw->fdir_prof[flow]; int tun, i; - if (!prof) + if (!prof || !prof->cnt) continue; for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { @@ -349,8 +348,7 @@ void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx) */ status = ice_flow_rem_vsi_prof(hw, ICE_BLK_FD, vsi_idx, prof_id); if (status) { - dev_err(ice_hw_to_dev(hw), - "ice_flow_rem_vsi_prof() failed status=%d\n", + dev_err(ice_hw_to_dev(hw), "ice_flow_rem_vsi_prof() failed status=%d\n", status); } } @@ -662,7 +660,6 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, struct ice_flow_prof *prof = NULL; struct ice_fd_hw_prof *hw_prof; struct ice_hw *hw = &pf->hw; - enum ice_status status; u64 entry1_h = 0; u64 entry2_h = 0; #ifdef NETIF_F_HW_TC @@ -718,24 +715,22 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, * actions (NULL) and zero actions 0. */ prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; - status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, - TNL_SEG_CNT(tun), NULL, 0, &prof); - if (status) - return ice_status_to_errno(status); - status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, - main_vsi->idx, ICE_FLOW_PRIO_NORMAL, - seg, NULL, 0, &entry1_h); - if (status) { - err = ice_status_to_errno(status); + err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, + TNL_SEG_CNT(tun), NULL, 0, &prof); + if (err) + return err; + + err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, + main_vsi->idx, ICE_FLOW_PRIO_NORMAL, seg, NULL, + 0, &entry1_h); + if (err) goto err_prof; - } - status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, - ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, - seg, NULL, 0, &entry2_h); - if (status) { - err = ice_status_to_errno(status); + + err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, + ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, seg, NULL, + 0, &entry2_h); + if (err) goto err_entry; - } hw_prof->fdir_seg[tun] = seg; hw_prof->entry_h[0][tun] = entry1_h; @@ -755,11 +750,10 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, entry1_h = 0; vsi_h = main_vsi->tc_map_vsi[idx]->idx; - status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, - main_vsi->idx, vsi_h, - ICE_FLOW_PRIO_NORMAL, seg, NULL, 0, - &entry1_h); - if (status) { + err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, + vsi_h, ICE_FLOW_PRIO_NORMAL, seg, NULL, + 0, &entry1_h); + if (err) { dev_err(dev, "Could not add Channel VSI %d to flow group\n", idx); goto err_unroll; @@ -906,7 +900,7 @@ ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow) if (!seg) return -ENOMEM; - tun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX, + tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX, GFP_KERNEL); if (!tun_seg) { devm_kfree(dev, seg); @@ -1340,7 +1334,7 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp, if (!seg) return -ENOMEM; - tun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX, + tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX, GFP_KERNEL); if (!tun_seg) { devm_kfree(dev, seg); @@ -1489,7 +1483,6 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, struct ice_hw *hw = &pf->hw; struct ice_fltr_desc desc; struct ice_vsi *ctrl_vsi; - enum ice_status status; u8 *pkt, *frag_pkt; bool has_frag; int err; @@ -1508,11 +1501,10 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, } ice_fdir_get_prgm_desc(hw, input, &desc, add); - status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); - if (status) { - err = ice_status_to_errno(status); + err = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); + if (err) goto err_free_all; - } + err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); if (err) goto err_free_all; @@ -1522,12 +1514,11 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, if (has_frag) { /* does not return error */ ice_fdir_get_prgm_desc(hw, input, &desc, add); - status = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true, - is_tun); - if (status) { - err = ice_status_to_errno(status); + err = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true, + is_tun); + if (err) goto err_frag; - } + err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt); if (err) goto err_frag; @@ -1623,6 +1614,28 @@ int ice_fdir_create_dflt_rules(struct ice_pf *pf) return err; } +/** + * ice_fdir_del_all_fltrs - Delete all flow director filters + * @vsi: the VSI being changed + * + * This function needs to be called while holding hw->fdir_fltr_lock + */ +void ice_fdir_del_all_fltrs(struct ice_vsi *vsi) +{ + struct ice_fdir_fltr *f_rule, *tmp; + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + + list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) { + if (!f_rule->acl_fltr) + ice_fdir_write_all_fltr(pf, f_rule, false); + ice_fdir_update_cntrs(hw, f_rule->flow_type, f_rule->acl_fltr, + false); + list_del(&f_rule->fltr_node); + devm_kfree(ice_pf_to_dev(pf), f_rule); + } +} + /** * ice_vsi_manage_fdir - turn on/off flow director * @vsi: the VSI being changed @@ -1630,7 +1643,6 @@ int ice_fdir_create_dflt_rules(struct ice_pf *pf) */ void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena) { - struct ice_fdir_fltr *f_rule, *tmp; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; enum ice_fltr_ptype flow; @@ -1644,14 +1656,8 @@ void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena) mutex_lock(&hw->fdir_fltr_lock); if (!test_and_clear_bit(ICE_FLAG_FD_ENA, pf->flags)) goto release_lock; - list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) { - if (!f_rule->acl_fltr) - ice_fdir_write_all_fltr(pf, f_rule, false); - ice_fdir_update_cntrs(hw, f_rule->flow_type, f_rule->acl_fltr, - false); - list_del(&f_rule->fltr_node); - devm_kfree(ice_pf_to_dev(pf), f_rule); - } + + ice_fdir_del_all_fltrs(vsi); if (hw->fdir_prof) for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; @@ -1682,7 +1688,7 @@ ice_del_acl_ethtool(struct ice_hw *hw, struct ice_fdir_fltr *fltr) u64 entry; entry = ice_flow_find_entry(hw, ICE_BLK_ACL, fltr->fltr_id); - return ice_status_to_errno(ice_flow_rem_entry(hw, ICE_BLK_ACL, entry)); + return ice_flow_rem_entry(hw, ICE_BLK_ACL, entry); } /** @@ -1842,7 +1848,7 @@ ice_update_ring_dest_vsi(struct ice_vsi *vsi, u16 *dest_vsi, u32 *ring) * specified */ if ((*ring < ch->base_q) || - (*ring > (ch->base_q + ch->num_rxq))) + (*ring >= (ch->base_q + ch->num_rxq))) continue; /* update the dest_vsi based on channel */ @@ -1937,6 +1943,9 @@ ice_ntuple_set_input_set(struct ice_vsi *vsi, enum ice_block blk, else return -EINVAL; + /* zero input so filter comparisons are safer */ + memset(input, 0, sizeof(struct ice_fdir_fltr)); + pf = vsi->back; hw = &pf->hw; @@ -2095,7 +2104,6 @@ int ice_add_ntuple_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) return -EOPNOTSUPP; - /* Do not program filters during reset */ if (ice_is_reset_in_progress(pf->state)) { dev_err(dev, "Device is resetting - adding ntuple filters not supported during reset\n"); diff --git a/drivers/thirdparty/ice/ice_fdir.c b/drivers/thirdparty/ice/ice_fdir.c index d63f5d855903..ff69d5081e44 100644 --- a/drivers/thirdparty/ice/ice_fdir.c +++ b/drivers/thirdparty/ice/ice_fdir.c @@ -5,6 +5,7 @@ #include "ice_fdir.h" /* These are training packet headers used to program flow director filters. */ + static const u8 ice_fdir_tcpv4_pkt[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, @@ -96,150 +97,6 @@ static const u8 ice_fdir_tcp4_gtpu4_pkt[] = { 0x00, 0x00, 0x00, 0x00, }; -static const u8 ice_fdir_ipv4_gtpu4_eh_pkt[] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, - 0x00, 0x42, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, - 0x7c, 0xa8, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, - 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x2e, - 0xba, 0x1d, 0x34, 0xff, 0x00, 0x1e, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, - 0x00, 0x00, 0x45, 0x00, 0x00, 0x16, 0x00, 0x01, - 0x00, 0x00, 0x40, 0x00, 0x7c, 0xe5, 0x7f, 0x00, - 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x00, -}; - -static const u8 ice_fdir_udp4_gtpu4_eh_pkt[] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, - 0x00, 0x4a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, - 0x7c, 0xa0, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, - 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x36, - 0xb8, 0x23, 0x34, 0xff, 0x00, 0x26, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, - 0x00, 0x00, 0x45, 0x00, 0x00, 0x1e, 0x00, 0x01, - 0x00, 0x00, 0x40, 0x11, 0x7c, 0xcc, 0x7f, 0x00, - 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x0a, 0x01, 0xd8, 0x00, 0x00, -}; - -static const u8 ice_fdir_tcp4_gtpu4_eh_pkt[] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, - 0x00, 0x56, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, - 0x7c, 0x94, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, - 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x42, - 0xb8, 0x00, 0x34, 0xff, 0x00, 0x32, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, - 0x00, 0x00, 0x45, 0x00, 0x00, 0x2a, 0x00, 0x01, - 0x00, 0x00, 0x40, 0x06, 0x7c, 0xcb, 0x7f, 0x00, - 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x50, 0x02, 0x20, 0x00, 0x91, 0xde, - 0x00, 0x00, 0x00, 0x00, -}; - -static const u8 ice_fdir_ipv4_gtpu4_eh_dw_pkt[] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, - 0x00, 0x42, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, - 0x7c, 0xa8, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, - 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x2e, - 0xba, 0x1d, 0x34, 0xff, 0x00, 0x1e, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, - 0x00, 0x00, 0x45, 0x00, 0x00, 0x16, 0x00, 0x01, - 0x00, 0x00, 0x40, 0x00, 0x7c, 0xe5, 0x7f, 0x00, - 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x00, -}; - -static const u8 ice_fdir_udp4_gtpu4_eh_dw_pkt[] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, - 0x00, 0x4a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, - 0x7c, 0xa0, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, - 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x36, - 0xb8, 0x23, 0x34, 0xff, 0x00, 0x26, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, - 0x00, 0x00, 0x45, 0x00, 0x00, 0x1e, 0x00, 0x01, - 0x00, 0x00, 0x40, 0x11, 0x7c, 0xcc, 0x7f, 0x00, - 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x0a, 0x01, 0xd8, 0x00, 0x00, -}; - -static const u8 ice_fdir_tcp4_gtpu4_eh_dw_pkt[] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, - 0x00, 0x56, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, - 0x7c, 0x94, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, - 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x42, - 0xb8, 0x00, 0x34, 0xff, 0x00, 0x32, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, - 0x00, 0x00, 0x45, 0x00, 0x00, 0x2a, 0x00, 0x01, - 0x00, 0x00, 0x40, 0x06, 0x7c, 0xcb, 0x7f, 0x00, - 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x50, 0x02, 0x20, 0x00, 0x91, 0xde, - 0x00, 0x00, 0x00, 0x00, -}; - -static const u8 ice_fdir_ipv4_gtpu4_eh_up_pkt[] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, - 0x00, 0x42, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, - 0x7c, 0xa8, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, - 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x2e, - 0xba, 0x0d, 0x34, 0xff, 0x00, 0x1e, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x10, - 0x00, 0x00, 0x45, 0x00, 0x00, 0x16, 0x00, 0x01, - 0x00, 0x00, 0x40, 0x00, 0x7c, 0xe5, 0x7f, 0x00, - 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x00, -}; - -static const u8 ice_fdir_udp4_gtpu4_eh_up_pkt[] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, - 0x00, 0x4a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, - 0x7c, 0xa0, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, - 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x36, - 0xb8, 0x13, 0x34, 0xff, 0x00, 0x26, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x10, - 0x00, 0x00, 0x45, 0x00, 0x00, 0x1e, 0x00, 0x01, - 0x00, 0x00, 0x40, 0x11, 0x7c, 0xcc, 0x7f, 0x00, - 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x0a, 0x01, 0xd8, 0x00, 0x00, -}; - -static const u8 ice_fdir_tcp4_gtpu4_eh_up_pkt[] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, - 0x00, 0x56, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, - 0x7c, 0x94, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, - 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x42, - 0xb7, 0xf0, 0x34, 0xff, 0x00, 0x32, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x10, - 0x00, 0x00, 0x45, 0x00, 0x00, 0x2a, 0x00, 0x01, - 0x00, 0x00, 0x40, 0x06, 0x7c, 0xcb, 0x7f, 0x00, - 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x50, 0x02, 0x20, 0x00, 0x91, 0xde, - 0x00, 0x00, 0x00, 0x00, -}; - -static const u8 ice_fdir_icmp4_gtpu4_pkt[] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, - 0x00, 0x4c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x08, 0x68, 0x08, 0x68, 0x00, 0x00, - 0x00, 0x00, 0x34, 0xff, 0x00, 0x28, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x02, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00, - 0x00, 0x1c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x01, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, -}; - static const u8 ice_fdir_ipv6_gtpu4_pkt[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, @@ -288,6 +145,303 @@ static const u8 ice_fdir_tcp6_gtpu4_pkt[] = { 0x20, 0x00, 0x8f, 0x7b, 0x00, 0x00, 0x00, 0x00, }; +static const u8 ice_fdir_ipv4_gtpu4_eh_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x42, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0xa8, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x2e, + 0xba, 0x1d, 0x34, 0xff, 0x00, 0x1e, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, + 0x00, 0x00, 0x45, 0x00, 0x00, 0x16, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x00, 0x7c, 0xe5, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp4_gtpu4_eh_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x4a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0xa0, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x36, + 0xb8, 0x23, 0x34, 0xff, 0x00, 0x26, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, + 0x00, 0x00, 0x45, 0x00, 0x00, 0x1e, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0xcc, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x0a, 0x01, 0xd8, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp4_gtpu4_eh_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x56, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x94, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x42, + 0xb8, 0x00, 0x34, 0xff, 0x00, 0x32, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, + 0x00, 0x00, 0x45, 0x00, 0x00, 0x2a, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x06, 0x7c, 0xcb, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x50, 0x02, 0x20, 0x00, 0x91, 0xde, + 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv6_gtpu4_eh_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x56, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x94, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x42, + 0x1e, 0x9d, 0x34, 0xff, 0x00, 0x32, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, + 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x3b, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp6_gtpu4_eh_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x5e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x8c, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x4a, + 0x48, 0x9a, 0x34, 0xff, 0x00, 0x3a, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, + 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x0a, + 0x11, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, + 0xff, 0xd8, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp6_gtpu4_eh_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x6a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x80, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x56, + 0x53, 0x6b, 0x34, 0xff, 0x00, 0x46, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, + 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x16, + 0x06, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x02, + 0x20, 0x00, 0x8f, 0xdf, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv4_gtpu4_eh_dw_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x42, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0xa8, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x2e, + 0xba, 0x1d, 0x34, 0xff, 0x00, 0x1e, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, + 0x00, 0x00, 0x45, 0x00, 0x00, 0x16, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x00, 0x7c, 0xe5, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp4_gtpu4_eh_dw_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x4a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0xa0, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x36, + 0xb8, 0x23, 0x34, 0xff, 0x00, 0x26, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, + 0x00, 0x00, 0x45, 0x00, 0x00, 0x1e, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0xcc, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x0a, 0x01, 0xd8, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp4_gtpu4_eh_dw_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x56, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x94, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x42, + 0xb8, 0x00, 0x34, 0xff, 0x00, 0x32, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, + 0x00, 0x00, 0x45, 0x00, 0x00, 0x2a, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x06, 0x7c, 0xcb, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x50, 0x02, 0x20, 0x00, 0x91, 0xde, + 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv6_gtpu4_eh_dw_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x56, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x94, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x42, + 0x1e, 0x9d, 0x34, 0xff, 0x00, 0x32, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, + 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x3b, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp6_gtpu4_eh_dw_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x5e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x8c, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x4a, + 0x48, 0x9a, 0x34, 0xff, 0x00, 0x3a, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, + 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x0a, + 0x11, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, + 0xff, 0xd8, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp6_gtpu4_eh_dw_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x6a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x80, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x56, + 0x53, 0x6b, 0x34, 0xff, 0x00, 0x46, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, + 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x16, + 0x06, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x02, + 0x20, 0x00, 0x8f, 0xdf, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv4_gtpu4_eh_up_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x42, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0xa8, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x2e, + 0xba, 0x0d, 0x34, 0xff, 0x00, 0x1e, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x10, + 0x00, 0x00, 0x45, 0x00, 0x00, 0x16, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x00, 0x7c, 0xe5, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp4_gtpu4_eh_up_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x4a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0xa0, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x36, + 0xb8, 0x13, 0x34, 0xff, 0x00, 0x26, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x10, + 0x00, 0x00, 0x45, 0x00, 0x00, 0x1e, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0xcc, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x0a, 0x01, 0xd8, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp4_gtpu4_eh_up_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x56, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x94, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x42, + 0xb7, 0xf0, 0x34, 0xff, 0x00, 0x32, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x10, + 0x00, 0x00, 0x45, 0x00, 0x00, 0x2a, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x06, 0x7c, 0xcb, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x50, 0x02, 0x20, 0x00, 0x91, 0xde, + 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv6_gtpu4_eh_up_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x56, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x94, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x42, + 0x1e, 0x8d, 0x34, 0xff, 0x00, 0x32, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x10, + 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x3b, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp6_gtpu4_eh_up_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x5e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x8c, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x4a, + 0x48, 0x8a, 0x34, 0xff, 0x00, 0x3a, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x10, + 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x0a, + 0x11, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, + 0xff, 0xd8, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp6_gtpu4_eh_up_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x6a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x80, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x56, + 0x53, 0x5b, 0x34, 0xff, 0x00, 0x46, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x10, + 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x16, + 0x06, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x02, + 0x20, 0x00, 0x8f, 0xdf, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_icmp4_gtpu4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x4c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x08, 0x68, 0x08, 0x68, 0x00, 0x00, + 0x00, 0x00, 0x34, 0xff, 0x00, 0x28, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x02, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x1c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + static const u8 ice_fdir_ipv6_gtpu6_pkt[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, @@ -536,6 +690,1426 @@ static const u8 ice_fdir_ipv4_frag_pkt[] = { 0x00, 0x00 }; +/* IPV4 GRE INNER IPV4 */ +static const u8 ice_fdir_ipv4_gre4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x2e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x9e, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x16, 0x00, 0x01, 0x00, 0x00, 0x40, 0x00, + 0x7c, 0xe5, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp4_gre4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x36, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x96, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x1e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0xcc, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, + 0x01, 0xd8, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp4_gre4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x42, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x8a, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x2a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x06, + 0x7c, 0xcb, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x02, + 0x20, 0x00, 0x91, 0xde, 0x00, 0x00, 0x00, 0x00, +}; + +/* IPV4 GRE INNER IPV6 */ +static const u8 ice_fdir_ipv6_gre4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x42, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x8a, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x02, 0x3b, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp6_gre4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x4a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x82, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x0a, 0x11, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x0a, 0xff, 0xd8, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp6_gre4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x56, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x76, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x16, 0x06, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x50, 0x02, 0x20, 0x00, 0x8f, 0xdf, + 0x00, 0x00, 0x00, 0x00, +}; + +/* IPV6 GRE INNER IPV4 */ +static const u8 ice_fdir_ipv4_gre6_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x18, 0x2F, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x14, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x00, 0x7A, 0xEA, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp4_gre6_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x20, 0x2F, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x1C, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7A, 0xD1, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x08, 0xFF, 0xDE, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp4_gre6_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x2C, 0x2F, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x28, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x06, 0x7A, 0xD0, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x50, 0x02, 0x20, 0x00, 0x8F, 0xE3, + 0x00, 0x00, 0x00, 0x00, +}; + +/* IPV6 GRE INNER IPV6 */ +static const u8 ice_fdir_ipv6_gre6_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x2C, 0x2F, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x86, 0xDD, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x3B, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp6_gre6_pkt[] = { + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x34, 0x2F, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x86, 0xDD, 0x60, 0x00, 0x00, 0x00, 0x00, 0x08, + 0x11, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x35, 0x00, 0x35, 0x00, 0x08, + 0xFF, 0x72, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp6_gre6_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x40, 0x2F, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x86, 0xDD, 0x60, 0x00, 0x00, 0x00, 0x00, 0x14, + 0x06, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x02, + 0x20, 0x00, 0x8F, 0xE1, 0x00, 0x00, 0x00, 0x00, +}; + +/* IPV4 GRE IPV4 GTPU IPV4 */ +static const u8 ice_fdir_ipv4_gtpu4_gre4_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x52, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x7a, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x3a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0xb0, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x26, + 0xbf, 0xba, 0x30, 0xff, 0x00, 0x16, 0x00, 0x00, + 0x00, 0x00, 0x45, 0x00, 0x00, 0x16, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x00, 0x7c, 0xe5, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp4_gtpu4_gre4_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x5a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x72, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x42, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0xa8, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x2e, + 0xbd, 0xc0, 0x30, 0xff, 0x00, 0x1e, 0x00, 0x00, + 0x00, 0x00, 0x45, 0x00, 0x00, 0x1e, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0xcc, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x35, + 0x00, 0x35, 0x00, 0x0a, 0x01, 0x6e, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp4_gtpu4_gre4_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x66, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x66, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x4e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x9c, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x3a, + 0xbd, 0x9d, 0x30, 0xff, 0x00, 0x2a, 0x00, 0x00, + 0x00, 0x00, 0x45, 0x00, 0x00, 0x2a, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x06, 0x7c, 0xcb, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x14, + 0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x50, 0x02, 0x20, 0x00, 0x91, 0x7a, + 0x00, 0x00, 0x00, 0x00, +}; + +/* IPV4 GRE IPV4 GTPU IPV6 */ +static const u8 ice_fdir_ipv6_gtpu4_gre4_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x66, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x66, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x4e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x9c, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x3a, + 0x24, 0x3a, 0x30, 0xff, 0x00, 0x2a, 0x00, 0x00, + 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x3b, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp6_gtpu4_gre4_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x6e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x5e, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x56, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x94, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x42, + 0x4e, 0x37, 0x30, 0xff, 0x00, 0x32, 0x00, 0x00, + 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x0a, + 0x11, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x35, 0x00, 0x35, 0x00, 0x0a, + 0xff, 0x6e, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp6_gtpu4_gre4_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x7a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x52, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x62, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x88, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x4e, + 0x59, 0x08, 0x30, 0xff, 0x00, 0x3e, 0x00, 0x00, + 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x16, + 0x06, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x14, 0x00, 0x50, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x02, + 0x20, 0x00, 0x8f, 0x7b, 0x00, 0x00, 0x00, 0x00, +}; + +/* IPV6 GRE IPV4 GTPU IPV4 */ +static const u8 ice_fdir_ipv4_gtpu4_gre6_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x3e, 0x2f, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x3a, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0xb0, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x08, 0x68, + 0x08, 0x68, 0x00, 0x26, 0xbf, 0xba, 0x30, 0xff, + 0x00, 0x16, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x16, 0x00, 0x01, 0x00, 0x00, 0x40, 0x00, + 0x7c, 0xe5, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp4_gtpu4_gre6_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x46, 0x2f, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x42, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0xa8, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x08, 0x68, + 0x08, 0x68, 0x00, 0x2e, 0xbd, 0xc0, 0x30, 0xff, + 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x1e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0xcc, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x35, 0x00, 0x35, 0x00, 0x0a, + 0x01, 0x6e, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp4_gtpu4_gre6_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x52, 0x2f, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x4e, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0x9c, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x08, 0x68, + 0x08, 0x68, 0x00, 0x3a, 0xbd, 0x9d, 0x30, 0xff, + 0x00, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x2a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x06, + 0x7c, 0xcb, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x14, 0x00, 0x50, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x02, + 0x20, 0x00, 0x91, 0x7a, 0x00, 0x00, 0x00, 0x00, +}; + +/* IPV6 GRE IPV4 GTPU IPV6 */ +static const u8 ice_fdir_ipv6_gtpu4_gre6_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x52, 0x2f, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x4e, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0x9c, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x08, 0x68, + 0x08, 0x68, 0x00, 0x3a, 0x24, 0x3a, 0x30, 0xff, + 0x00, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x02, 0x3b, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp6_gtpu4_gre6_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x5a, 0x2f, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x56, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0x94, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x08, 0x68, + 0x08, 0x68, 0x00, 0x42, 0x4e, 0x37, 0x30, 0xff, + 0x00, 0x32, 0x00, 0x00, 0x00, 0x00, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x0a, 0x11, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x35, + 0x00, 0x35, 0x00, 0x0a, 0xff, 0x6e, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp6_gtpu4_gre6_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x66, 0x2f, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x62, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0x88, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x08, 0x68, + 0x08, 0x68, 0x00, 0x4e, 0x59, 0x08, 0x30, 0xff, + 0x00, 0x3e, 0x00, 0x00, 0x00, 0x00, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x16, 0x06, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x14, + 0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x50, 0x02, 0x20, 0x00, 0x8f, 0x7b, + 0x00, 0x00, 0x00, 0x00, +}; + +/* IPV4 GRE IPV4 GTPU EH IPV4 */ +static const u8 ice_fdir_ipv4_gtpu4_eh_gre4_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x5a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x72, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x42, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0xa8, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x2e, + 0xba, 0x1d, 0x34, 0xff, 0x00, 0x1e, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, + 0x00, 0x00, 0x45, 0x00, 0x00, 0x16, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x00, 0x7c, 0xe5, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp4_gtpu4_eh_gre4_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x62, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x6a, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x4a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0xa0, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x36, + 0xb8, 0x23, 0x34, 0xff, 0x00, 0x26, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, + 0x00, 0x00, 0x45, 0x00, 0x00, 0x1e, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0xcc, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x35, + 0x00, 0x35, 0x00, 0x0a, 0x01, 0x6e, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp4_gtpu4_eh_gre4_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x6e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x5e, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x56, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x94, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x42, + 0xb8, 0x00, 0x34, 0xff, 0x00, 0x32, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, + 0x00, 0x00, 0x45, 0x00, 0x00, 0x2a, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x06, 0x7c, 0xcb, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x14, + 0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x50, 0x02, 0x20, 0x00, 0x91, 0x7a, + 0x00, 0x00, 0x00, 0x00, +}; + +/* IPV4 GRE IPV4 GTPU EH IPV6 */ +static const u8 ice_fdir_ipv6_gtpu4_eh_gre4_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x6e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x5e, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x56, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x94, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x42, + 0x1e, 0x9d, 0x34, 0xff, 0x00, 0x32, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, + 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x3b, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp6_gtpu4_eh_gre4_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x76, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x56, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x5e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x8c, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x4a, + 0x48, 0x9a, 0x34, 0xff, 0x00, 0x3a, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, + 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x0a, + 0x11, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x35, 0x00, 0x35, 0x00, 0x0a, + 0xff, 0x6e, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp6_gtpu4_eh_gre4_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x82, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x4a, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x6a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x80, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x56, + 0x53, 0x6b, 0x34, 0xff, 0x00, 0x46, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, + 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x16, + 0x06, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x14, 0x00, 0x50, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x02, + 0x20, 0x00, 0x8f, 0x7b, 0x00, 0x00, 0x00, 0x00, +}; + +/* IPV6 GRE IPV4 GTPU EH IPV4 */ +static const u8 ice_fdir_ipv4_gtpu4_eh_gre6_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x46, 0x2f, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x42, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0xa8, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x08, 0x68, + 0x08, 0x68, 0x00, 0x2e, 0xba, 0x1d, 0x34, 0xff, + 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x85, 0x01, 0x00, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x16, 0x00, 0x01, 0x00, 0x00, 0x40, 0x00, + 0x7c, 0xe5, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp4_gtpu4_eh_gre6_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x4e, 0x2f, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x4a, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0xa0, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x08, 0x68, + 0x08, 0x68, 0x00, 0x36, 0xb8, 0x23, 0x34, 0xff, + 0x00, 0x26, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x85, 0x01, 0x00, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x1e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0xcc, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x35, 0x00, 0x35, 0x00, 0x0a, + 0x01, 0x6e, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp4_gtpu4_eh_gre6_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x5a, 0x2f, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x56, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0x94, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x08, 0x68, + 0x08, 0x68, 0x00, 0x42, 0xb8, 0x00, 0x34, 0xff, + 0x00, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x85, 0x01, 0x00, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x2a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x06, + 0x7c, 0xcb, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x14, 0x00, 0x50, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x02, + 0x20, 0x00, 0x91, 0x7a, 0x00, 0x00, 0x00, 0x00, +}; + +/* IPV6 GRE IPV4 GTPU EH IPV6 */ +static const u8 ice_fdir_ipv6_gtpu4_eh_gre6_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x5a, 0x2f, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x56, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0x94, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x08, 0x68, + 0x08, 0x68, 0x00, 0x42, 0x1e, 0x9d, 0x34, 0xff, + 0x00, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x85, 0x01, 0x00, 0x00, 0x00, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x02, 0x3b, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp6_gtpu4_eh_gre6_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x62, 0x2f, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x5e, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0x8c, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x08, 0x68, + 0x08, 0x68, 0x00, 0x4a, 0x48, 0x9a, 0x34, 0xff, + 0x00, 0x3a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x85, 0x01, 0x00, 0x00, 0x00, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x0a, 0x11, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x35, + 0x00, 0x35, 0x00, 0x0a, 0xff, 0x6e, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp6_gtpu4_eh_gre6_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x6e, 0x2f, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x6a, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0x80, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x08, 0x68, + 0x08, 0x68, 0x00, 0x56, 0x53, 0x6b, 0x34, 0xff, + 0x00, 0x46, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x85, 0x01, 0x00, 0x00, 0x00, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x16, 0x06, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x14, + 0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x50, 0x02, 0x20, 0x00, 0x8f, 0x7b, + 0x00, 0x00, 0x00, 0x00, +}; + +/* IPV4 GRE IPV4 GTPU DW IPV4 */ +static const u8 ice_fdir_ipv4_gtpu4_eh_dw_gre4_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x5a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x72, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x42, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0xa8, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x2e, + 0xba, 0x1d, 0x34, 0xff, 0x00, 0x1e, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, + 0x00, 0x00, 0x45, 0x00, 0x00, 0x16, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x00, 0x7c, 0xe5, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp4_gtpu4_eh_dw_gre4_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x62, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x6a, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x4a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0xa0, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x36, + 0xb8, 0x23, 0x34, 0xff, 0x00, 0x26, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, + 0x00, 0x00, 0x45, 0x00, 0x00, 0x1e, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0xcc, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x35, + 0x00, 0x35, 0x00, 0x0a, 0x01, 0x6e, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp4_gtpu4_eh_dw_gre4_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x6e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x5e, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x56, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x94, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x42, + 0xb8, 0x00, 0x34, 0xff, 0x00, 0x32, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, + 0x00, 0x00, 0x45, 0x00, 0x00, 0x2a, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x06, 0x7c, 0xcb, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x14, + 0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x50, 0x02, 0x20, 0x00, 0x91, 0x7a, + 0x00, 0x00, 0x00, 0x00, +}; + +/* IPV4 GRE IPV4 GTPU DW IPV6 */ +static const u8 ice_fdir_ipv6_gtpu4_eh_dw_gre4_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x6e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x5e, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x56, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x94, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x42, + 0x1e, 0x9d, 0x34, 0xff, 0x00, 0x32, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, + 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x3b, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp6_gtpu4_eh_dw_gre4_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x76, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x56, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x5e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x8c, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x4a, + 0x48, 0x9a, 0x34, 0xff, 0x00, 0x3a, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, + 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x0a, + 0x11, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x35, 0x00, 0x35, 0x00, 0x0a, + 0xff, 0x6e, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp6_gtpu4_eh_dw_gre4_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x82, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x4a, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x6a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x80, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x56, + 0x53, 0x6b, 0x34, 0xff, 0x00, 0x46, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, + 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x16, + 0x06, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x14, 0x00, 0x50, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x02, + 0x20, 0x00, 0x8f, 0x7b, 0x00, 0x00, 0x00, 0x00, +}; + +/* IPV6 GRE IPV4 GTPU DW IPV4 */ +static const u8 ice_fdir_ipv4_gtpu4_eh_dw_gre6_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x46, 0x2f, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x42, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0xa8, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x08, 0x68, + 0x08, 0x68, 0x00, 0x2e, 0xba, 0x1d, 0x34, 0xff, + 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x85, 0x01, 0x00, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x16, 0x00, 0x01, 0x00, 0x00, 0x40, 0x00, + 0x7c, 0xe5, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp4_gtpu4_eh_dw_gre6_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x4e, 0x2f, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x4a, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0xa0, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x08, 0x68, + 0x08, 0x68, 0x00, 0x36, 0xb8, 0x23, 0x34, 0xff, + 0x00, 0x26, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x85, 0x01, 0x00, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x1e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0xcc, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x35, 0x00, 0x35, 0x00, 0x0a, + 0x01, 0x6e, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp4_gtpu4_eh_dw_gre6_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x5a, 0x2f, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x56, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0x94, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x08, 0x68, + 0x08, 0x68, 0x00, 0x42, 0xb8, 0x00, 0x34, 0xff, + 0x00, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x85, 0x01, 0x00, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x2a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x06, + 0x7c, 0xcb, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x14, 0x00, 0x50, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x02, + 0x20, 0x00, 0x91, 0x7a, 0x00, 0x00, 0x00, 0x00, +}; + +/* IPV6 GRE IPV4 GTPU DW IPV6 */ +static const u8 ice_fdir_ipv6_gtpu4_eh_dw_gre6_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x5a, 0x2f, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x56, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0x94, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x08, 0x68, + 0x08, 0x68, 0x00, 0x42, 0x1e, 0x9d, 0x34, 0xff, + 0x00, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x85, 0x01, 0x00, 0x00, 0x00, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x02, 0x3b, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp6_gtpu4_eh_dw_gre6_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x62, 0x2f, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x5e, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0x8c, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x08, 0x68, + 0x08, 0x68, 0x00, 0x4a, 0x48, 0x9a, 0x34, 0xff, + 0x00, 0x3a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x85, 0x01, 0x00, 0x00, 0x00, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x0a, 0x11, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x35, + 0x00, 0x35, 0x00, 0x0a, 0xff, 0x6e, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp6_gtpu4_eh_dw_gre6_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x6e, 0x2f, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x6a, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0x80, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x08, 0x68, + 0x08, 0x68, 0x00, 0x56, 0x53, 0x6b, 0x34, 0xff, + 0x00, 0x46, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x85, 0x01, 0x00, 0x00, 0x00, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x16, 0x06, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x14, + 0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x50, 0x02, 0x20, 0x00, 0x8f, 0x7b, + 0x00, 0x00, 0x00, 0x00, +}; + +/* IPV4 GRE IPV4 GTPU UP IPV4 */ +static const u8 ice_fdir_ipv4_gtpu4_eh_up_gre4_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x5a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x72, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x42, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0xa8, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x2e, + 0xba, 0x0d, 0x34, 0xff, 0x00, 0x1e, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x10, + 0x00, 0x00, 0x45, 0x00, 0x00, 0x16, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x00, 0x7c, 0xe5, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp4_gtpu4_eh_up_gre4_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x62, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x6a, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x4a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0xa0, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x36, + 0xb8, 0x13, 0x34, 0xff, 0x00, 0x26, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x10, + 0x00, 0x00, 0x45, 0x00, 0x00, 0x1e, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0xcc, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x35, + 0x00, 0x35, 0x00, 0x0a, 0x01, 0x6e, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp4_gtpu4_eh_up_gre4_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x6e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x5e, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x56, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x94, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x42, + 0xb7, 0xf0, 0x34, 0xff, 0x00, 0x32, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x10, + 0x00, 0x00, 0x45, 0x00, 0x00, 0x2a, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x06, 0x7c, 0xcb, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x14, + 0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x50, 0x02, 0x20, 0x00, 0x91, 0x7a, + 0x00, 0x00, 0x00, 0x00, +}; + +/* IPV4 GRE IPV4 GTPU UP IPV6 */ +static const u8 ice_fdir_ipv6_gtpu4_eh_up_gre4_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x6e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x5e, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x56, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x94, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x42, + 0x1e, 0x8d, 0x34, 0xff, 0x00, 0x32, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x10, + 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x3b, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp6_gtpu4_eh_up_gre4_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x76, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x56, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x5e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x8c, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x4a, + 0x48, 0x8a, 0x34, 0xff, 0x00, 0x3a, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x10, + 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x0a, + 0x11, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x35, 0x00, 0x35, 0x00, 0x0a, + 0xff, 0x6e, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp6_gtpu4_eh_up_gre4_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x82, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x4a, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x6a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x80, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x08, 0x68, 0x08, 0x68, 0x00, 0x56, + 0x53, 0x5b, 0x34, 0xff, 0x00, 0x46, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x01, 0x10, + 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x16, + 0x06, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x14, 0x00, 0x50, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x02, + 0x20, 0x00, 0x8f, 0x7b, 0x00, 0x00, 0x00, 0x00, +}; + +/* IPV6 GRE IPV4 GTPU UP IPV4 */ +static const u8 ice_fdir_ipv4_gtpu4_eh_up_gre6_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x46, 0x2f, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x42, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0xa8, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x08, 0x68, + 0x08, 0x68, 0x00, 0x2e, 0xba, 0x0d, 0x34, 0xff, + 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x85, 0x01, 0x10, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x16, 0x00, 0x01, 0x00, 0x00, 0x40, 0x00, + 0x7c, 0xe5, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp4_gtpu4_eh_up_gre6_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x4e, 0x2f, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x4a, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0xa0, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x08, 0x68, + 0x08, 0x68, 0x00, 0x36, 0xb8, 0x13, 0x34, 0xff, + 0x00, 0x26, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x85, 0x01, 0x10, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x1e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0xcc, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x35, 0x00, 0x35, 0x00, 0x0a, + 0x01, 0x6e, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp4_gtpu4_eh_up_gre6_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x5a, 0x2f, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x56, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0x94, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x08, 0x68, + 0x08, 0x68, 0x00, 0x42, 0xb7, 0xf0, 0x34, 0xff, + 0x00, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x85, 0x01, 0x10, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x2a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x06, + 0x7c, 0xcb, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x14, 0x00, 0x50, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x02, + 0x20, 0x00, 0x91, 0x7a, 0x00, 0x00, 0x00, 0x00, +}; + +/* IPV6 GRE IPV4 GTPU UP IPV6 */ +static const u8 ice_fdir_ipv6_gtpu4_eh_up_gre6_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x5a, 0x2f, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x56, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0x94, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x08, 0x68, + 0x08, 0x68, 0x00, 0x42, 0x1e, 0x8d, 0x34, 0xff, + 0x00, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x85, 0x01, 0x10, 0x00, 0x00, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x02, 0x3b, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp6_gtpu4_eh_up_gre6_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x62, 0x2f, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x5e, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0x8c, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x08, 0x68, + 0x08, 0x68, 0x00, 0x4a, 0x48, 0x8a, 0x34, 0xff, + 0x00, 0x3a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x85, 0x01, 0x10, 0x00, 0x00, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x0a, 0x11, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x35, + 0x00, 0x35, 0x00, 0x0a, 0xff, 0x6e, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp6_gtpu4_eh_up_gre6_pkt[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x6e, 0x2f, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x08, 0x00, 0x45, 0x00, 0x00, 0x6a, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0x80, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x08, 0x68, + 0x08, 0x68, 0x00, 0x56, 0x53, 0x5b, 0x34, 0xff, + 0x00, 0x46, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x85, 0x01, 0x10, 0x00, 0x00, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x16, 0x06, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x14, + 0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x50, 0x02, 0x20, 0x00, 0x8f, 0x7b, + 0x00, 0x00, 0x00, 0x00, +}; + +/* IPV4 L2TPV2 control */ +static const u8 ice_fdir_ipv4_l2tpv2_ctrl_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x28, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0xc2, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x06, 0xa5, 0x06, 0xa5, 0x00, 0x14, + 0x2c, 0x6b, 0xc8, 0x02, 0x00, 0x0c, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +/* IPV4 L2TPV2 */ +static const u8 ice_fdir_ipv4_l2tpv2_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x28, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0xc2, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x06, 0xa5, 0x06, 0xa5, 0x00, 0x14, + 0x2c, 0x6b, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + +/* IPV4 PPPOL2TPV2 */ +static const u8 ice_fdir_ipv4_l2tpv2_ppp_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x26, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0xc4, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x06, 0xa5, 0x06, 0xa5, 0x00, 0x12, + 0xf5, 0x77, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, + 0xff, 0x03, 0x00, 0x00, 0x00, 0x00, +}; + +/* IPV4 PPPOL2TPV2 IPV4 */ +static const u8 ice_fdir_ipv4_l2tpv2_ppp4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x3a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0xb0, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x06, 0xa5, 0x06, 0xa5, 0x00, 0x26, + 0xf5, 0x2e, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, + 0xff, 0x03, 0x00, 0x21, 0x45, 0x00, 0x00, 0x14, + 0x00, 0x01, 0x00, 0x00, 0x40, 0x00, 0x7c, 0xe7, + 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, + 0x00, 0x00, +}; + +/* IPV4 PPPOL2TPV2 IPV4 UDP */ +static const u8 ice_fdir_udp4_l2tpv2_ppp4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x42, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0xa8, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x06, 0xa5, 0x06, 0xa5, 0x00, 0x2e, + 0xf3, 0x3a, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, + 0xff, 0x03, 0x00, 0x21, 0x45, 0x00, 0x00, 0x1c, + 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, 0x7c, 0xce, + 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, + 0x00, 0x35, 0x00, 0x35, 0x00, 0x08, 0x01, 0x72, + 0x00, 0x00, +}; + +/* IPV4 PPPOL2TPV2 IPV4 TCP */ +static const u8 ice_fdir_tcp4_l2tpv2_ppp4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x4e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x9c, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x06, 0xa5, 0x06, 0xa5, 0x00, 0x3a, + 0xf3, 0x23, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, + 0xff, 0x03, 0x00, 0x21, 0x45, 0x00, 0x00, 0x28, + 0x00, 0x01, 0x00, 0x00, 0x40, 0x06, 0x7c, 0xcd, + 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, + 0x00, 0x14, 0x00, 0x50, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x50, 0x02, 0x20, 0x00, + 0x91, 0x7c, 0x00, 0x00, 0x00, 0x00, +}; + +/* IPV4 PPPOL2TPV2 IPV6 */ +static const u8 ice_fdir_ipv6_l2tpv2_ppp4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x4e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x9c, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x06, 0xa5, 0x06, 0xa5, 0x00, 0x3a, + 0x59, 0x8e, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, + 0xff, 0x03, 0x00, 0x57, 0x60, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x3b, 0x40, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, +}; + +/* IPV4 PPPOL2TPV2 IPV6 UDP */ +static const u8 ice_fdir_udp6_l2tpv2_ppp4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x56, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x94, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x06, 0xa5, 0x06, 0xa5, 0x00, 0x42, + 0x83, 0x91, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, + 0xff, 0x03, 0x00, 0x57, 0x60, 0x00, 0x00, 0x00, + 0x00, 0x08, 0x11, 0x40, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x35, 0x00, 0x35, + 0x00, 0x08, 0xff, 0x72, 0x00, 0x00, +}; + +/* IPV4 PPPOL2TPV2 IPV6 TCP */ +static const u8 ice_fdir_tcp6_l2tpv2_ppp4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x62, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0x88, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x06, 0xa5, 0x06, 0xa5, 0x00, 0x4e, + 0x8e, 0x6e, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, + 0xff, 0x03, 0x00, 0x57, 0x60, 0x00, 0x00, 0x00, + 0x00, 0x14, 0x06, 0x40, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x14, 0x00, 0x50, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x50, 0x02, 0x20, 0x00, 0x8f, 0x7d, 0x00, 0x00, + 0x00, 0x00, +}; + +/* IPV6 L2TPV2 control */ +static const u8 ice_fdir_ipv6_l2tpv2_ctrl_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x14, 0x11, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x06, 0xa5, + 0x06, 0xa5, 0x00, 0x14, 0x2a, 0x6c, 0xc8, 0x02, + 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, +}; + +/* IPV6 L2TPV2 */ +static const u8 ice_fdir_ipv6_l2tpv2_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x14, 0x11, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x06, 0xa5, + 0x06, 0xa5, 0x00, 0x14, 0x2a, 0x6c, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +/* IPV6 PPPOL2TPV2 */ +static const u8 ice_fdir_ipv6_l2tpv2_ppp_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x12, 0x11, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x06, 0xa5, + 0x06, 0xa5, 0x00, 0x12, 0xf3, 0x78, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0xff, 0x03, 0x00, 0x00, + 0x00, 0x00, +}; + +/* IPV6 PPPOL2TPV2 IPV4 */ +static const u8 ice_fdir_ipv4_l2tpv2_ppp6_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x26, 0x11, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x06, 0xa5, + 0x06, 0xa5, 0x00, 0x26, 0xf3, 0x2f, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0xff, 0x03, 0x00, 0x21, + 0x45, 0x00, 0x00, 0x14, 0x00, 0x01, 0x00, 0x00, + 0x40, 0x00, 0x7c, 0xe7, 0x7f, 0x00, 0x00, 0x01, + 0x7f, 0x00, 0x00, 0x01, 0x00, 0x00, +}; + +/* IPV6 PPPOL2TPV2 IPV4 UDP */ +static const u8 ice_fdir_udp4_l2tpv2_ppp6_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x2e, 0x11, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x06, 0xa5, + 0x06, 0xa5, 0x00, 0x2e, 0xf1, 0x3b, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0xff, 0x03, 0x00, 0x21, + 0x45, 0x00, 0x00, 0x1c, 0x00, 0x01, 0x00, 0x00, + 0x40, 0x11, 0x7c, 0xce, 0x7f, 0x00, 0x00, 0x01, + 0x7f, 0x00, 0x00, 0x01, 0x00, 0x35, 0x00, 0x35, + 0x00, 0x08, 0x01, 0x72, 0x00, 0x00, +}; + +/* IPV6 PPPOL2TPV2 IPV4 TCP */ +static const u8 ice_fdir_tcp4_l2tpv2_ppp6_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x3a, 0x11, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x06, 0xa5, + 0x06, 0xa5, 0x00, 0x3a, 0xf1, 0x24, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0xff, 0x03, 0x00, 0x21, + 0x45, 0x00, 0x00, 0x28, 0x00, 0x01, 0x00, 0x00, + 0x40, 0x06, 0x7c, 0xcd, 0x7f, 0x00, 0x00, 0x01, + 0x7f, 0x00, 0x00, 0x01, 0x00, 0x14, 0x00, 0x50, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x50, 0x02, 0x20, 0x00, 0x91, 0x7c, 0x00, 0x00, + 0x00, 0x00, +}; + +/* IPV6 PPPOL2TPV2 IPV6 */ +static const u8 ice_fdir_ipv6_l2tpv2_ppp6_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x3a, 0x11, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x06, 0xa5, + 0x06, 0xa5, 0x00, 0x3a, 0x57, 0x8f, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0xff, 0x03, 0x00, 0x57, + 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0x40, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, +}; + +/* IPV6 PPPOL2TPV2 IPV6 UDP */ +static const u8 ice_fdir_udp6_l2tpv2_ppp6_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x42, 0x11, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x06, 0xa5, + 0x06, 0xa5, 0x00, 0x42, 0x81, 0x92, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0xff, 0x03, 0x00, 0x57, + 0x60, 0x00, 0x00, 0x00, 0x00, 0x08, 0x11, 0x40, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x35, 0x00, 0x35, 0x00, 0x08, 0xff, 0x72, + 0x00, 0x00, +}; + +/* IPV6 PPPOL2TPV2 IPV6 TCP */ +static const u8 ice_fdir_tcp6_l2tpv2_ppp6_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x4e, 0x11, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x06, 0xa5, + 0x06, 0xa5, 0x00, 0x4e, 0x8c, 0x6f, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0xff, 0x03, 0x00, 0x57, + 0x60, 0x00, 0x00, 0x00, 0x00, 0x14, 0x06, 0x40, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x14, 0x00, 0x50, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x50, 0x02, 0x20, 0x00, + 0x8f, 0x7d, 0x00, 0x00, 0x00, 0x00, +}; + static const u8 ice_fdir_tcpv6_pkt[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, @@ -772,6 +2346,70 @@ static const struct ice_fdir_base_pkt ice_fdir_pkt[] = { sizeof(ice_fdir_ipv4_gtpu4_eh_up_pkt), ice_fdir_ipv4_gtpu4_eh_up_pkt, }, + /* IPV4 GRE IPV4 GTPU */ + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU, + sizeof(ice_fdir_ipv4_gtpu4_gre4_pkt), + ice_fdir_ipv4_gtpu4_gre4_pkt, + sizeof(ice_fdir_ipv4_gtpu4_gre4_pkt), + ice_fdir_ipv4_gtpu4_gre4_pkt, + }, + /* IPV4 GRE IPV4 GTPU EH */ + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH, + sizeof(ice_fdir_ipv4_gtpu4_eh_gre4_pkt), + ice_fdir_ipv4_gtpu4_eh_gre4_pkt, + sizeof(ice_fdir_ipv4_gtpu4_eh_gre4_pkt), + ice_fdir_ipv4_gtpu4_eh_gre4_pkt, + }, + /* IPV4 GRE IPV4 GTPU DW */ + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW, + sizeof(ice_fdir_ipv4_gtpu4_eh_dw_gre4_pkt), + ice_fdir_ipv4_gtpu4_eh_dw_gre4_pkt, + sizeof(ice_fdir_ipv4_gtpu4_eh_dw_gre4_pkt), + ice_fdir_ipv4_gtpu4_eh_dw_gre4_pkt, + }, + /* IPV4 GRE IPV4 GTPU UP */ + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP, + sizeof(ice_fdir_ipv4_gtpu4_eh_up_gre4_pkt), + ice_fdir_ipv4_gtpu4_eh_up_gre4_pkt, + sizeof(ice_fdir_ipv4_gtpu4_eh_up_gre4_pkt), + ice_fdir_ipv4_gtpu4_eh_up_gre4_pkt, + }, + /* IPV6 GRE IPV4 GTPU */ + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU, + sizeof(ice_fdir_ipv4_gtpu4_gre6_pkt), + ice_fdir_ipv4_gtpu4_gre6_pkt, + sizeof(ice_fdir_ipv4_gtpu4_gre6_pkt), + ice_fdir_ipv4_gtpu4_gre6_pkt, + }, + /* IPV6 GRE IPV4 GTPU EH */ + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH, + sizeof(ice_fdir_ipv4_gtpu4_eh_gre6_pkt), + ice_fdir_ipv4_gtpu4_eh_gre6_pkt, + sizeof(ice_fdir_ipv4_gtpu4_eh_gre6_pkt), + ice_fdir_ipv4_gtpu4_eh_gre6_pkt, + }, + /* IPV6 GRE IPV4 GTPU DW */ + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW, + sizeof(ice_fdir_ipv4_gtpu4_eh_dw_gre6_pkt), + ice_fdir_ipv4_gtpu4_eh_dw_gre6_pkt, + sizeof(ice_fdir_ipv4_gtpu4_eh_dw_gre6_pkt), + ice_fdir_ipv4_gtpu4_eh_dw_gre6_pkt, + }, + /* IPV6 GRE IPV4 GTPU UP */ + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP, + sizeof(ice_fdir_ipv4_gtpu4_eh_up_gre6_pkt), + ice_fdir_ipv4_gtpu4_eh_up_gre6_pkt, + sizeof(ice_fdir_ipv4_gtpu4_eh_up_gre6_pkt), + ice_fdir_ipv4_gtpu4_eh_up_gre6_pkt, + }, { ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4, sizeof(ice_fdir_ipv4_gtpu4_pkt), @@ -793,6 +2431,27 @@ static const struct ice_fdir_base_pkt ice_fdir_pkt[] = { sizeof(ice_fdir_tcp4_gtpu4_pkt), ice_fdir_tcp4_gtpu4_pkt, }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV6, + sizeof(ice_fdir_ipv6_gtpu4_pkt), + ice_fdir_ipv6_gtpu4_pkt, + sizeof(ice_fdir_ipv6_gtpu4_pkt), + ice_fdir_ipv6_gtpu4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV6_UDP, + sizeof(ice_fdir_udp6_gtpu4_pkt), + ice_fdir_udp6_gtpu4_pkt, + sizeof(ice_fdir_udp6_gtpu4_pkt), + ice_fdir_udp6_gtpu4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV6_TCP, + sizeof(ice_fdir_tcp6_gtpu4_pkt), + ice_fdir_tcp6_gtpu4_pkt, + sizeof(ice_fdir_tcp6_gtpu4_pkt), + ice_fdir_tcp6_gtpu4_pkt, + }, { ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4, sizeof(ice_fdir_ipv4_gtpu4_eh_pkt), @@ -814,6 +2473,27 @@ static const struct ice_fdir_base_pkt ice_fdir_pkt[] = { sizeof(ice_fdir_tcp4_gtpu4_eh_pkt), ice_fdir_tcp4_gtpu4_eh_pkt, }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6, + sizeof(ice_fdir_ipv6_gtpu4_eh_pkt), + ice_fdir_ipv6_gtpu4_eh_pkt, + sizeof(ice_fdir_ipv6_gtpu4_eh_pkt), + ice_fdir_ipv6_gtpu4_eh_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6_UDP, + sizeof(ice_fdir_udp6_gtpu4_eh_pkt), + ice_fdir_udp6_gtpu4_eh_pkt, + sizeof(ice_fdir_udp6_gtpu4_eh_pkt), + ice_fdir_udp6_gtpu4_eh_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6_TCP, + sizeof(ice_fdir_tcp6_gtpu4_eh_pkt), + ice_fdir_tcp6_gtpu4_eh_pkt, + sizeof(ice_fdir_tcp6_gtpu4_eh_pkt), + ice_fdir_tcp6_gtpu4_eh_pkt, + }, { ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4, sizeof(ice_fdir_ipv4_gtpu4_eh_dw_pkt), @@ -835,6 +2515,27 @@ static const struct ice_fdir_base_pkt ice_fdir_pkt[] = { sizeof(ice_fdir_tcp4_gtpu4_eh_dw_pkt), ice_fdir_tcp4_gtpu4_eh_dw_pkt, }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6, + sizeof(ice_fdir_ipv6_gtpu4_eh_dw_pkt), + ice_fdir_ipv6_gtpu4_eh_dw_pkt, + sizeof(ice_fdir_ipv6_gtpu4_eh_dw_pkt), + ice_fdir_ipv6_gtpu4_eh_dw_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6_UDP, + sizeof(ice_fdir_udp6_gtpu4_eh_dw_pkt), + ice_fdir_udp6_gtpu4_eh_dw_pkt, + sizeof(ice_fdir_udp6_gtpu4_eh_dw_pkt), + ice_fdir_udp6_gtpu4_eh_dw_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6_TCP, + sizeof(ice_fdir_tcp6_gtpu4_eh_dw_pkt), + ice_fdir_tcp6_gtpu4_eh_dw_pkt, + sizeof(ice_fdir_tcp6_gtpu4_eh_dw_pkt), + ice_fdir_tcp6_gtpu4_eh_dw_pkt, + }, { ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4, sizeof(ice_fdir_ipv4_gtpu4_eh_up_pkt), @@ -856,6 +2557,27 @@ static const struct ice_fdir_base_pkt ice_fdir_pkt[] = { sizeof(ice_fdir_tcp4_gtpu4_eh_up_pkt), ice_fdir_tcp4_gtpu4_eh_up_pkt, }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6, + sizeof(ice_fdir_ipv6_gtpu4_eh_up_pkt), + ice_fdir_ipv6_gtpu4_eh_up_pkt, + sizeof(ice_fdir_ipv6_gtpu4_eh_up_pkt), + ice_fdir_ipv6_gtpu4_eh_up_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6_UDP, + sizeof(ice_fdir_udp6_gtpu4_eh_up_pkt), + ice_fdir_udp6_gtpu4_eh_up_pkt, + sizeof(ice_fdir_udp6_gtpu4_eh_up_pkt), + ice_fdir_udp6_gtpu4_eh_up_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6_TCP, + sizeof(ice_fdir_tcp6_gtpu4_eh_up_pkt), + ice_fdir_tcp6_gtpu4_eh_up_pkt, + sizeof(ice_fdir_tcp6_gtpu4_eh_up_pkt), + ice_fdir_tcp6_gtpu4_eh_up_pkt, + }, { ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP, sizeof(ice_fdir_icmp4_gtpu4_pkt), @@ -870,27 +2592,6 @@ static const struct ice_fdir_base_pkt ice_fdir_pkt[] = { sizeof(ice_fdir_ipv4_gtpu4_pkt), ice_fdir_ipv4_gtpu4_pkt, }, - { - ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV6, - sizeof(ice_fdir_ipv6_gtpu4_pkt), - ice_fdir_ipv6_gtpu4_pkt, - sizeof(ice_fdir_ipv6_gtpu4_pkt), - ice_fdir_ipv6_gtpu4_pkt, - }, - { - ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV6_UDP, - sizeof(ice_fdir_udp6_gtpu4_pkt), - ice_fdir_udp6_gtpu4_pkt, - sizeof(ice_fdir_udp6_gtpu4_pkt), - ice_fdir_udp6_gtpu4_pkt, - }, - { - ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV6_TCP, - sizeof(ice_fdir_tcp6_gtpu4_pkt), - ice_fdir_tcp6_gtpu4_pkt, - sizeof(ice_fdir_tcp6_gtpu4_pkt), - ice_fdir_tcp6_gtpu4_pkt, - }, { ICE_FLTR_PTYPE_NONF_IPV6_GTPU, sizeof(ice_fdir_ipv6_gtpu6_pkt), @@ -1022,6 +2723,26 @@ static const struct ice_fdir_base_pkt ice_fdir_pkt[] = { sizeof(ice_fdir_udp4_vxlan_pkt), ice_fdir_udp4_vxlan_pkt, sizeof(ice_fdir_udp4_vxlan_pkt), ice_fdir_udp4_vxlan_pkt, }, + { + ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_UDP, + sizeof(ice_fdir_udp4_tun_pkt), ice_fdir_udp4_tun_pkt, + sizeof(ice_fdir_udp4_tun_pkt), ice_fdir_udp4_tun_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_TCP, + sizeof(ice_fdir_tcp4_tun_pkt), ice_fdir_tcp4_tun_pkt, + sizeof(ice_fdir_tcp4_tun_pkt), ice_fdir_tcp4_tun_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_SCTP, + sizeof(ice_fdir_sctp4_tun_pkt), ice_fdir_sctp4_tun_pkt, + sizeof(ice_fdir_sctp4_tun_pkt), ice_fdir_sctp4_tun_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_OTHER, + sizeof(ice_fdir_ip4_tun_pkt), ice_fdir_ip4_tun_pkt, + sizeof(ice_fdir_ip4_tun_pkt), ice_fdir_ip4_tun_pkt, + }, { ICE_FLTR_PTYPE_NONF_ECPRI_TP0, sizeof(ice_fdir_ecpri_tp0_pkt), ice_fdir_ecpri_tp0_pkt, @@ -1034,6 +2755,582 @@ static const struct ice_fdir_base_pkt ice_fdir_pkt[] = { sizeof(ice_fdir_ipv4_udp_ecpri_tp0_pkt), ice_fdir_ipv4_udp_ecpri_tp0_pkt, }, + /* IPV4 GRE INNER IPV4 */ + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4, + sizeof(ice_fdir_ipv4_gre4_pkt), + ice_fdir_ipv4_gre4_pkt, + sizeof(ice_fdir_ipv4_gre4_pkt), + ice_fdir_ipv4_gre4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_UDP, + sizeof(ice_fdir_udp4_gre4_pkt), + ice_fdir_udp4_gre4_pkt, + sizeof(ice_fdir_udp4_gre4_pkt), + ice_fdir_udp4_gre4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_TCP, + sizeof(ice_fdir_tcp4_gre4_pkt), + ice_fdir_tcp4_gre4_pkt, + sizeof(ice_fdir_tcp4_gre4_pkt), + ice_fdir_tcp4_gre4_pkt, + }, + /* IPV4 GRE INNER IPV6 */ + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6, + sizeof(ice_fdir_ipv6_gre4_pkt), + ice_fdir_ipv6_gre4_pkt, + sizeof(ice_fdir_ipv6_gre4_pkt), + ice_fdir_ipv6_gre4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_UDP, + sizeof(ice_fdir_udp6_gre4_pkt), + ice_fdir_udp6_gre4_pkt, + sizeof(ice_fdir_udp6_gre4_pkt), + ice_fdir_udp6_gre4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_TCP, + sizeof(ice_fdir_tcp6_gre4_pkt), + ice_fdir_tcp6_gre4_pkt, + sizeof(ice_fdir_tcp6_gre4_pkt), + ice_fdir_tcp6_gre4_pkt, + }, + /* IPV6 GRE INNER IPV4 */ + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4, + sizeof(ice_fdir_ipv4_gre6_pkt), + ice_fdir_ipv4_gre6_pkt, + sizeof(ice_fdir_ipv4_gre6_pkt), + ice_fdir_ipv4_gre6_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_UDP, + sizeof(ice_fdir_udp4_gre6_pkt), + ice_fdir_udp4_gre6_pkt, + sizeof(ice_fdir_udp4_gre6_pkt), + ice_fdir_udp4_gre6_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_TCP, + sizeof(ice_fdir_tcp4_gre6_pkt), + ice_fdir_tcp4_gre6_pkt, + sizeof(ice_fdir_tcp4_gre6_pkt), + ice_fdir_tcp4_gre6_pkt, + }, + /* IPV4 GRE INNER IPV6 */ + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6, + sizeof(ice_fdir_ipv6_gre6_pkt), + ice_fdir_ipv6_gre6_pkt, + sizeof(ice_fdir_ipv6_gre6_pkt), + ice_fdir_ipv6_gre6_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_UDP, + sizeof(ice_fdir_udp6_gre6_pkt), + ice_fdir_udp6_gre6_pkt, + sizeof(ice_fdir_udp6_gre6_pkt), + ice_fdir_udp6_gre6_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_TCP, + sizeof(ice_fdir_tcp6_gre6_pkt), + ice_fdir_tcp6_gre6_pkt, + sizeof(ice_fdir_tcp6_gre6_pkt), + ice_fdir_tcp6_gre6_pkt, + }, + /* IPV4 GRE IPV4 GTPU IPV4 */ + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV4, + sizeof(ice_fdir_ipv4_gtpu4_gre4_pkt), + ice_fdir_ipv4_gtpu4_gre4_pkt, + sizeof(ice_fdir_ipv4_gtpu4_gre4_pkt), + ice_fdir_ipv4_gtpu4_gre4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV4_UDP, + sizeof(ice_fdir_udp4_gtpu4_gre4_pkt), + ice_fdir_udp4_gtpu4_gre4_pkt, + sizeof(ice_fdir_udp4_gtpu4_gre4_pkt), + ice_fdir_udp4_gtpu4_gre4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV4_TCP, + sizeof(ice_fdir_tcp4_gtpu4_gre4_pkt), + ice_fdir_tcp4_gtpu4_gre4_pkt, + sizeof(ice_fdir_tcp4_gtpu4_gre4_pkt), + ice_fdir_tcp4_gtpu4_gre4_pkt, + }, + /* IPV4 GRE IPV4 GTPU IPV6 */ + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV6, + sizeof(ice_fdir_ipv6_gtpu4_gre4_pkt), + ice_fdir_ipv6_gtpu4_gre4_pkt, + sizeof(ice_fdir_ipv6_gtpu4_gre4_pkt), + ice_fdir_ipv6_gtpu4_gre4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV6_UDP, + sizeof(ice_fdir_udp6_gtpu4_gre4_pkt), + ice_fdir_udp6_gtpu4_gre4_pkt, + sizeof(ice_fdir_udp6_gtpu4_gre4_pkt), + ice_fdir_udp6_gtpu4_gre4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV6_TCP, + sizeof(ice_fdir_tcp6_gtpu4_gre4_pkt), + ice_fdir_tcp6_gtpu4_gre4_pkt, + sizeof(ice_fdir_tcp6_gtpu4_gre4_pkt), + ice_fdir_tcp6_gtpu4_gre4_pkt, + }, + /* IPV6 GRE IPV4 GTPU IPV4 */ + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV4, + sizeof(ice_fdir_ipv4_gtpu4_gre6_pkt), + ice_fdir_ipv4_gtpu4_gre6_pkt, + sizeof(ice_fdir_ipv4_gtpu4_gre6_pkt), + ice_fdir_ipv4_gtpu4_gre6_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV4_UDP, + sizeof(ice_fdir_udp4_gtpu4_gre6_pkt), + ice_fdir_udp4_gtpu4_gre6_pkt, + sizeof(ice_fdir_udp4_gtpu4_gre6_pkt), + ice_fdir_udp4_gtpu4_gre6_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV4_TCP, + sizeof(ice_fdir_tcp4_gtpu4_gre6_pkt), + ice_fdir_tcp4_gtpu4_gre6_pkt, + sizeof(ice_fdir_tcp4_gtpu4_gre6_pkt), + ice_fdir_tcp4_gtpu4_gre6_pkt, + }, + /* IPV6 GRE IPV4 GTPU IPV6 */ + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV6, + sizeof(ice_fdir_ipv6_gtpu4_gre6_pkt), + ice_fdir_ipv6_gtpu4_gre6_pkt, + sizeof(ice_fdir_ipv6_gtpu4_gre6_pkt), + ice_fdir_ipv6_gtpu4_gre6_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV6_UDP, + sizeof(ice_fdir_udp6_gtpu4_gre6_pkt), + ice_fdir_udp6_gtpu4_gre6_pkt, + sizeof(ice_fdir_udp6_gtpu4_gre6_pkt), + ice_fdir_udp6_gtpu4_gre6_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV6_TCP, + sizeof(ice_fdir_tcp6_gtpu4_gre6_pkt), + ice_fdir_tcp6_gtpu4_gre6_pkt, + sizeof(ice_fdir_tcp6_gtpu4_gre6_pkt), + ice_fdir_tcp6_gtpu4_gre6_pkt, + }, + /* IPV4 GRE IPV4 GTPU EH IPV4 */ + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV4, + sizeof(ice_fdir_ipv4_gtpu4_eh_gre4_pkt), + ice_fdir_ipv4_gtpu4_eh_gre4_pkt, + sizeof(ice_fdir_ipv4_gtpu4_eh_gre4_pkt), + ice_fdir_ipv4_gtpu4_eh_gre4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV4_UDP, + sizeof(ice_fdir_udp4_gtpu4_eh_gre4_pkt), + ice_fdir_udp4_gtpu4_eh_gre4_pkt, + sizeof(ice_fdir_udp4_gtpu4_eh_gre4_pkt), + ice_fdir_udp4_gtpu4_eh_gre4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV4_TCP, + sizeof(ice_fdir_tcp4_gtpu4_eh_gre4_pkt), + ice_fdir_tcp4_gtpu4_eh_gre4_pkt, + sizeof(ice_fdir_tcp4_gtpu4_eh_gre4_pkt), + ice_fdir_tcp4_gtpu4_eh_gre4_pkt, + }, + /* IPV4 GRE IPV4 GTPU EH IPV6 */ + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV6, + sizeof(ice_fdir_ipv6_gtpu4_eh_gre4_pkt), + ice_fdir_ipv6_gtpu4_eh_gre4_pkt, + sizeof(ice_fdir_ipv6_gtpu4_eh_gre4_pkt), + ice_fdir_ipv6_gtpu4_eh_gre4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV6_UDP, + sizeof(ice_fdir_udp6_gtpu4_eh_gre4_pkt), + ice_fdir_udp6_gtpu4_eh_gre4_pkt, + sizeof(ice_fdir_udp6_gtpu4_eh_gre4_pkt), + ice_fdir_udp6_gtpu4_eh_gre4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV6_TCP, + sizeof(ice_fdir_tcp6_gtpu4_eh_gre4_pkt), + ice_fdir_tcp6_gtpu4_eh_gre4_pkt, + sizeof(ice_fdir_tcp6_gtpu4_eh_gre4_pkt), + ice_fdir_tcp6_gtpu4_eh_gre4_pkt, + }, + /* IPV6 GRE IPV4 GTPU EH IPV4 */ + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV4, + sizeof(ice_fdir_ipv4_gtpu4_eh_gre6_pkt), + ice_fdir_ipv4_gtpu4_eh_gre6_pkt, + sizeof(ice_fdir_ipv4_gtpu4_eh_gre6_pkt), + ice_fdir_ipv4_gtpu4_eh_gre6_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV4_UDP, + sizeof(ice_fdir_udp4_gtpu4_eh_gre6_pkt), + ice_fdir_udp4_gtpu4_eh_gre6_pkt, + sizeof(ice_fdir_udp4_gtpu4_eh_gre6_pkt), + ice_fdir_udp4_gtpu4_eh_gre6_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV4_TCP, + sizeof(ice_fdir_tcp4_gtpu4_eh_gre6_pkt), + ice_fdir_tcp4_gtpu4_eh_gre6_pkt, + sizeof(ice_fdir_tcp4_gtpu4_eh_gre6_pkt), + ice_fdir_tcp4_gtpu4_eh_gre6_pkt, + }, + /* IPV6 GRE IPV4 GTPU EH IPV6 */ + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV6, + sizeof(ice_fdir_ipv6_gtpu4_eh_gre6_pkt), + ice_fdir_ipv6_gtpu4_eh_gre6_pkt, + sizeof(ice_fdir_ipv6_gtpu4_eh_gre6_pkt), + ice_fdir_ipv6_gtpu4_eh_gre6_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV6_UDP, + sizeof(ice_fdir_udp6_gtpu4_eh_gre6_pkt), + ice_fdir_udp6_gtpu4_eh_gre6_pkt, + sizeof(ice_fdir_udp6_gtpu4_eh_gre6_pkt), + ice_fdir_udp6_gtpu4_eh_gre6_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV6_TCP, + sizeof(ice_fdir_tcp6_gtpu4_eh_gre6_pkt), + ice_fdir_tcp6_gtpu4_eh_gre6_pkt, + sizeof(ice_fdir_tcp6_gtpu4_eh_gre6_pkt), + ice_fdir_tcp6_gtpu4_eh_gre6_pkt, + }, + /* IPV4 GRE IPV4 GTPU DW IPV4 */ + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV4, + sizeof(ice_fdir_ipv4_gtpu4_eh_dw_gre4_pkt), + ice_fdir_ipv4_gtpu4_eh_dw_gre4_pkt, + sizeof(ice_fdir_ipv4_gtpu4_eh_dw_gre4_pkt), + ice_fdir_ipv4_gtpu4_eh_dw_gre4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV4_UDP, + sizeof(ice_fdir_udp4_gtpu4_eh_dw_gre4_pkt), + ice_fdir_udp4_gtpu4_eh_dw_gre4_pkt, + sizeof(ice_fdir_udp4_gtpu4_eh_dw_gre4_pkt), + ice_fdir_udp4_gtpu4_eh_dw_gre4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV4_TCP, + sizeof(ice_fdir_tcp4_gtpu4_eh_dw_gre4_pkt), + ice_fdir_tcp4_gtpu4_eh_dw_gre4_pkt, + sizeof(ice_fdir_tcp4_gtpu4_eh_dw_gre4_pkt), + ice_fdir_tcp4_gtpu4_eh_dw_gre4_pkt, + }, + /* IPV4 GRE IPV4 GTPU DW IPV6 */ + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV6, + sizeof(ice_fdir_ipv6_gtpu4_eh_dw_gre4_pkt), + ice_fdir_ipv6_gtpu4_eh_dw_gre4_pkt, + sizeof(ice_fdir_ipv6_gtpu4_eh_dw_gre4_pkt), + ice_fdir_ipv6_gtpu4_eh_dw_gre4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV6_UDP, + sizeof(ice_fdir_udp6_gtpu4_eh_dw_gre4_pkt), + ice_fdir_udp6_gtpu4_eh_dw_gre4_pkt, + sizeof(ice_fdir_udp6_gtpu4_eh_dw_gre4_pkt), + ice_fdir_udp6_gtpu4_eh_dw_gre4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV6_TCP, + sizeof(ice_fdir_tcp6_gtpu4_eh_dw_gre4_pkt), + ice_fdir_tcp6_gtpu4_eh_dw_gre4_pkt, + sizeof(ice_fdir_tcp6_gtpu4_eh_dw_gre4_pkt), + ice_fdir_tcp6_gtpu4_eh_dw_gre4_pkt, + }, + /* IPV6 GRE IPV4 GTPU DW IPV4 */ + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV4, + sizeof(ice_fdir_ipv4_gtpu4_eh_dw_gre6_pkt), + ice_fdir_ipv4_gtpu4_eh_dw_gre6_pkt, + sizeof(ice_fdir_ipv4_gtpu4_eh_dw_gre6_pkt), + ice_fdir_ipv4_gtpu4_eh_dw_gre6_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV4_UDP, + sizeof(ice_fdir_udp4_gtpu4_eh_dw_gre6_pkt), + ice_fdir_udp4_gtpu4_eh_dw_gre6_pkt, + sizeof(ice_fdir_udp4_gtpu4_eh_dw_gre6_pkt), + ice_fdir_udp4_gtpu4_eh_dw_gre6_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV4_TCP, + sizeof(ice_fdir_tcp4_gtpu4_eh_dw_gre6_pkt), + ice_fdir_tcp4_gtpu4_eh_dw_gre6_pkt, + sizeof(ice_fdir_tcp4_gtpu4_eh_dw_gre6_pkt), + ice_fdir_tcp4_gtpu4_eh_dw_gre6_pkt, + }, + /* IPV6 GRE IPV4 GTPU DW IPV6 */ + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV6, + sizeof(ice_fdir_ipv6_gtpu4_eh_dw_gre6_pkt), + ice_fdir_ipv6_gtpu4_eh_dw_gre6_pkt, + sizeof(ice_fdir_ipv6_gtpu4_eh_dw_gre6_pkt), + ice_fdir_ipv6_gtpu4_eh_dw_gre6_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV6_UDP, + sizeof(ice_fdir_udp6_gtpu4_eh_dw_gre6_pkt), + ice_fdir_udp6_gtpu4_eh_dw_gre6_pkt, + sizeof(ice_fdir_udp6_gtpu4_eh_dw_gre6_pkt), + ice_fdir_udp6_gtpu4_eh_dw_gre6_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV6_TCP, + sizeof(ice_fdir_tcp6_gtpu4_eh_dw_gre6_pkt), + ice_fdir_tcp6_gtpu4_eh_dw_gre6_pkt, + sizeof(ice_fdir_tcp6_gtpu4_eh_dw_gre6_pkt), + ice_fdir_tcp6_gtpu4_eh_dw_gre6_pkt, + }, + /* IPV4 GRE IPV4 GTPU UP IPV4 */ + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV4, + sizeof(ice_fdir_ipv4_gtpu4_eh_up_gre4_pkt), + ice_fdir_ipv4_gtpu4_eh_up_gre4_pkt, + sizeof(ice_fdir_ipv4_gtpu4_eh_up_gre4_pkt), + ice_fdir_ipv4_gtpu4_eh_up_gre4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV4_UDP, + sizeof(ice_fdir_udp4_gtpu4_eh_up_gre4_pkt), + ice_fdir_udp4_gtpu4_eh_up_gre4_pkt, + sizeof(ice_fdir_udp4_gtpu4_eh_up_gre4_pkt), + ice_fdir_udp4_gtpu4_eh_up_gre4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV4_TCP, + sizeof(ice_fdir_tcp4_gtpu4_eh_up_gre4_pkt), + ice_fdir_tcp4_gtpu4_eh_up_gre4_pkt, + sizeof(ice_fdir_tcp4_gtpu4_eh_up_gre4_pkt), + ice_fdir_tcp4_gtpu4_eh_up_gre4_pkt, + }, + /* IPV4 GRE IPV4 GTPU UP IPV6 */ + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV6, + sizeof(ice_fdir_ipv6_gtpu4_eh_up_gre4_pkt), + ice_fdir_ipv6_gtpu4_eh_up_gre4_pkt, + sizeof(ice_fdir_ipv6_gtpu4_eh_up_gre4_pkt), + ice_fdir_ipv6_gtpu4_eh_up_gre4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV6_UDP, + sizeof(ice_fdir_udp6_gtpu4_eh_up_gre4_pkt), + ice_fdir_udp6_gtpu4_eh_up_gre4_pkt, + sizeof(ice_fdir_udp6_gtpu4_eh_up_gre4_pkt), + ice_fdir_udp6_gtpu4_eh_up_gre4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV6_TCP, + sizeof(ice_fdir_tcp6_gtpu4_eh_up_gre4_pkt), + ice_fdir_tcp6_gtpu4_eh_up_gre4_pkt, + sizeof(ice_fdir_tcp6_gtpu4_eh_up_gre4_pkt), + ice_fdir_tcp6_gtpu4_eh_up_gre4_pkt, + }, + /* IPV6 GRE IPV4 GTPU UP IPV4 */ + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV4, + sizeof(ice_fdir_ipv4_gtpu4_eh_up_gre6_pkt), + ice_fdir_ipv4_gtpu4_eh_up_gre6_pkt, + sizeof(ice_fdir_ipv4_gtpu4_eh_up_gre6_pkt), + ice_fdir_ipv4_gtpu4_eh_up_gre6_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV4_UDP, + sizeof(ice_fdir_udp4_gtpu4_eh_up_gre6_pkt), + ice_fdir_udp4_gtpu4_eh_up_gre6_pkt, + sizeof(ice_fdir_udp4_gtpu4_eh_up_gre6_pkt), + ice_fdir_udp4_gtpu4_eh_up_gre6_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV4_TCP, + sizeof(ice_fdir_tcp4_gtpu4_eh_up_gre6_pkt), + ice_fdir_tcp4_gtpu4_eh_up_gre6_pkt, + sizeof(ice_fdir_tcp4_gtpu4_eh_up_gre6_pkt), + ice_fdir_tcp4_gtpu4_eh_up_gre6_pkt, + }, + /* IPV6 GRE IPV4 GTPU UP IPV6 */ + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV6, + sizeof(ice_fdir_ipv6_gtpu4_eh_up_gre6_pkt), + ice_fdir_ipv6_gtpu4_eh_up_gre6_pkt, + sizeof(ice_fdir_ipv6_gtpu4_eh_up_gre6_pkt), + ice_fdir_ipv6_gtpu4_eh_up_gre6_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV6_UDP, + sizeof(ice_fdir_udp6_gtpu4_eh_up_gre6_pkt), + ice_fdir_udp6_gtpu4_eh_up_gre6_pkt, + sizeof(ice_fdir_udp6_gtpu4_eh_up_gre6_pkt), + ice_fdir_udp6_gtpu4_eh_up_gre6_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV6_TCP, + sizeof(ice_fdir_tcp6_gtpu4_eh_up_gre6_pkt), + ice_fdir_tcp6_gtpu4_eh_up_gre6_pkt, + sizeof(ice_fdir_tcp6_gtpu4_eh_up_gre6_pkt), + ice_fdir_tcp6_gtpu4_eh_up_gre6_pkt, + }, + /* IPV4 L2TPV2 CONTROL */ + { + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_CONTROL, + sizeof(ice_fdir_ipv4_l2tpv2_ctrl_pkt), + ice_fdir_ipv4_l2tpv2_ctrl_pkt, + sizeof(ice_fdir_ipv4_l2tpv2_ctrl_pkt), + ice_fdir_ipv4_l2tpv2_ctrl_pkt, + }, + /* IPV4 L2TPV2 */ + { + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2, + sizeof(ice_fdir_ipv4_l2tpv2_pkt), + ice_fdir_ipv4_l2tpv2_pkt, + sizeof(ice_fdir_ipv4_l2tpv2_pkt), + ice_fdir_ipv4_l2tpv2_pkt, + }, + /* IPV4 L2TPV2 PPP */ + { + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP, + sizeof(ice_fdir_ipv4_l2tpv2_ppp_pkt), + ice_fdir_ipv4_l2tpv2_ppp_pkt, + sizeof(ice_fdir_ipv4_l2tpv2_ppp_pkt), + ice_fdir_ipv4_l2tpv2_ppp_pkt, + }, + /* IPV4 L2TPV2 PPP IPV4 */ + { + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4, + sizeof(ice_fdir_ipv4_l2tpv2_ppp4_pkt), + ice_fdir_ipv4_l2tpv2_ppp4_pkt, + sizeof(ice_fdir_ipv4_l2tpv2_ppp4_pkt), + ice_fdir_ipv4_l2tpv2_ppp4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_UDP, + sizeof(ice_fdir_udp4_l2tpv2_ppp4_pkt), + ice_fdir_udp4_l2tpv2_ppp4_pkt, + sizeof(ice_fdir_udp4_l2tpv2_ppp4_pkt), + ice_fdir_udp4_l2tpv2_ppp4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_TCP, + sizeof(ice_fdir_tcp4_l2tpv2_ppp4_pkt), + ice_fdir_tcp4_l2tpv2_ppp4_pkt, + sizeof(ice_fdir_tcp4_l2tpv2_ppp4_pkt), + ice_fdir_tcp4_l2tpv2_ppp4_pkt, + }, + /* IPV4 L2TPV2 PPP IPV6 */ + { + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6, + sizeof(ice_fdir_ipv6_l2tpv2_ppp4_pkt), + ice_fdir_ipv6_l2tpv2_ppp4_pkt, + sizeof(ice_fdir_ipv6_l2tpv2_ppp4_pkt), + ice_fdir_ipv6_l2tpv2_ppp4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6_UDP, + sizeof(ice_fdir_udp6_l2tpv2_ppp4_pkt), + ice_fdir_udp6_l2tpv2_ppp4_pkt, + sizeof(ice_fdir_udp6_l2tpv2_ppp4_pkt), + ice_fdir_udp6_l2tpv2_ppp4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6_TCP, + sizeof(ice_fdir_tcp6_l2tpv2_ppp4_pkt), + ice_fdir_tcp6_l2tpv2_ppp4_pkt, + sizeof(ice_fdir_tcp6_l2tpv2_ppp4_pkt), + ice_fdir_tcp6_l2tpv2_ppp4_pkt, + }, + /* IPV6 L2TPV2 CONTROL */ + { + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_CONTROL, + sizeof(ice_fdir_ipv6_l2tpv2_ctrl_pkt), + ice_fdir_ipv6_l2tpv2_ctrl_pkt, + sizeof(ice_fdir_ipv6_l2tpv2_ctrl_pkt), + ice_fdir_ipv6_l2tpv2_ctrl_pkt, + }, + /* IPV6 L2TPV2 */ + { + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2, + sizeof(ice_fdir_ipv6_l2tpv2_pkt), + ice_fdir_ipv6_l2tpv2_pkt, + sizeof(ice_fdir_ipv6_l2tpv2_pkt), + ice_fdir_ipv6_l2tpv2_pkt, + }, + /* IPV6 L2TPV2 PPP */ + { + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP, + sizeof(ice_fdir_ipv6_l2tpv2_ppp_pkt), + ice_fdir_ipv6_l2tpv2_ppp_pkt, + sizeof(ice_fdir_ipv6_l2tpv2_ppp_pkt), + ice_fdir_ipv6_l2tpv2_ppp_pkt, + }, + /* IPV6 L2TPV2 PPP IPV4 */ + { + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4, + sizeof(ice_fdir_ipv4_l2tpv2_ppp6_pkt), + ice_fdir_ipv4_l2tpv2_ppp6_pkt, + sizeof(ice_fdir_ipv4_l2tpv2_ppp6_pkt), + ice_fdir_ipv4_l2tpv2_ppp6_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_UDP, + sizeof(ice_fdir_udp4_l2tpv2_ppp6_pkt), + ice_fdir_udp4_l2tpv2_ppp6_pkt, + sizeof(ice_fdir_udp4_l2tpv2_ppp6_pkt), + ice_fdir_udp4_l2tpv2_ppp6_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_TCP, + sizeof(ice_fdir_tcp4_l2tpv2_ppp6_pkt), + ice_fdir_tcp4_l2tpv2_ppp6_pkt, + sizeof(ice_fdir_tcp4_l2tpv2_ppp6_pkt), + ice_fdir_tcp4_l2tpv2_ppp6_pkt, + }, + /* IPV6 L2TPV2 PPP IPV6 */ + { + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6, + sizeof(ice_fdir_ipv6_l2tpv2_ppp6_pkt), + ice_fdir_ipv6_l2tpv2_ppp6_pkt, + sizeof(ice_fdir_ipv6_l2tpv2_ppp6_pkt), + ice_fdir_ipv6_l2tpv2_ppp6_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6_UDP, + sizeof(ice_fdir_udp6_l2tpv2_ppp6_pkt), + ice_fdir_udp6_l2tpv2_ppp6_pkt, + sizeof(ice_fdir_udp6_l2tpv2_ppp6_pkt), + ice_fdir_udp6_l2tpv2_ppp6_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6_TCP, + sizeof(ice_fdir_tcp6_l2tpv2_ppp6_pkt), + ice_fdir_tcp6_l2tpv2_ppp6_pkt, + sizeof(ice_fdir_tcp6_l2tpv2_ppp6_pkt), + ice_fdir_tcp6_l2tpv2_ppp6_pkt, + }, { ICE_FLTR_PTYPE_NONF_IPV6_TCP, sizeof(ice_fdir_tcpv6_pkt), ice_fdir_tcpv6_pkt, @@ -1205,7 +3502,7 @@ ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input, * @hw: pointer to the hardware structure * @cntr_id: returns counter index */ -enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id) +int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id) { return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK, ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id); @@ -1216,7 +3513,7 @@ enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id) * @hw: pointer to the hardware structure * @cntr_id: counter index to be freed */ -enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id) +int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id) { return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK, ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id); @@ -1228,7 +3525,7 @@ enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id) * @cntr_id: returns counter index * @num_fltr: number of filter entries to be allocated */ -enum ice_status +int ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr) { return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES, @@ -1242,7 +3539,7 @@ ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr) * @cntr_id: counter index that needs to be freed * @num_fltr: number of filters to be freed */ -enum ice_status +int ice_free_fd_guar_item(struct ice_hw *hw, u16 cntr_id, u16 num_fltr) { return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES, @@ -1256,7 +3553,7 @@ ice_free_fd_guar_item(struct ice_hw *hw, u16 cntr_id, u16 num_fltr) * @cntr_id: returns counter index * @num_fltr: number of filter entries to be allocated */ -enum ice_status +int ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr) { return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES, @@ -1270,7 +3567,7 @@ ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr) * @cntr_id: counter index that needs to be freed * @num_fltr: number of filters to be freed */ -enum ice_status +int ice_free_fd_shrd_item(struct ice_hw *hw, u16 cntr_id, u16 num_fltr) { return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES, @@ -1393,7 +3690,7 @@ static void ice_pkt_insert_mac_addr(u8 *pkt, u8 *addr) * * returns an open tunnel port specified for this flow type */ -static enum ice_status +static int ice_fdir_get_open_tunnel_port(struct ice_hw *hw, enum ice_fltr_ptype flow, u16 *port) { @@ -1401,17 +3698,109 @@ ice_fdir_get_open_tunnel_port(struct ice_hw *hw, enum ice_fltr_ptype flow, case ICE_FLTR_PTYPE_NONF_IPV4_UDP_ECPRI_TP0: /* eCPRI tunnel */ if (!ice_get_open_tunnel_port(hw, TNL_ECPRI, port)) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; break; default: if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, port) && !ice_get_open_tunnel_port(hw, TNL_GENEVE, port)) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } return 0; } +/** + * ice_fdir_gen_l2tpv2_pkt - generate L2TPv2 training packet + * @pkt: pointer to return filter packet + * @l2tpv2_data: pointer to ice_fdir_l2tpv2 data structure + * @idx: the matched packet index of FDIR training packet table + * @offset: position of end byte for PPPoL2TPv2 packet + * @tun: true implies generate a tunnel packet + */ +static u16 +ice_fdir_gen_l2tpv2_pkt(u8 *pkt, struct ice_fdir_l2tpv2 *l2tpv2_data, + u16 idx, u16 offset, bool tun) +{ + u16 flags_version; + u16 offset_size; + u16 pos; + + /* get outer packet end pos, 10 = l2tpv2 default len 6 + ppp len 4 */ + pos = offset - ICE_L2TPV2_PKT_LENGTH - ICE_PPP_PKT_LENGTH; + + /* copy outer packet */ + memcpy(pkt, ice_fdir_pkt[idx].tun_pkt, pos); + + /* copy l2tpv2 packet common header */ + memcpy(pkt + pos, &l2tpv2_data->flags_version, + sizeof(l2tpv2_data->flags_version)); + pos += sizeof(l2tpv2_data->flags_version); + + flags_version = be16_to_cpu(l2tpv2_data->flags_version); + if (flags_version == 0) { + l2tpv2_data->flags_version = cpu_to_be16(ICE_L2TPV2_FLAGS_VER); + flags_version = ICE_L2TPV2_FLAGS_VER; + } + + /* copy l2tpv2 length */ + if (flags_version & ICE_L2TPV2_FLAGS_LEN) { + memcpy(pkt + pos, &l2tpv2_data->length, + sizeof(l2tpv2_data->length)); + pos += sizeof(l2tpv2_data->length); + } + + /* copy l2tpv2 tunnel id */ + memcpy(pkt + pos, &l2tpv2_data->tunnel_id, + sizeof(l2tpv2_data->tunnel_id)); + pos += sizeof(l2tpv2_data->tunnel_id); + + /* copy l2tpv2 session id */ + memcpy(pkt + pos, &l2tpv2_data->session_id, + sizeof(l2tpv2_data->session_id)); + pos += sizeof(l2tpv2_data->session_id); + + /* copy l2tpv2 ns + nr */ + if (flags_version & ICE_L2TPV2_FLAGS_SEQ) { + memcpy(pkt + pos, &l2tpv2_data->ns, sizeof(l2tpv2_data->ns)); + pos += sizeof(l2tpv2_data->ns); + + memcpy(pkt + pos, &l2tpv2_data->nr, sizeof(l2tpv2_data->nr)); + pos += sizeof(l2tpv2_data->nr); + } + + /* copy l2tpv2 offset size + offset padding */ + if (flags_version & ICE_L2TPV2_FLAGS_OFF) { + memcpy(pkt + pos, &l2tpv2_data->offset_size, + sizeof(l2tpv2_data->offset_size)); + pos += sizeof(l2tpv2_data->offset_size); + /* insert 0 into offset padding */ + offset_size = be16_to_cpu(l2tpv2_data->offset_size); + if (offset_size > ICE_FDIR_MAX_RAW_PKT_SIZE - + ice_fdir_pkt[idx].tun_pkt_len) { + offset_size = ICE_FDIR_MAX_RAW_PKT_SIZE - + ice_fdir_pkt[idx].tun_pkt_len; + } + memset(pkt + pos, 0, offset_size); + pos += offset_size; + } + + if (ice_fdir_pkt[idx].tun_pkt_len > offset) { + /* copy ppp packet */ + memcpy(pkt + pos, + ice_fdir_pkt[idx].tun_pkt + offset - ICE_PPP_PKT_LENGTH, + ICE_PPP_PKT_LENGTH); + pos += ICE_PPP_PKT_LENGTH; + + /* copy inner packets */ + if (tun) { + memcpy(pkt + pos, ice_fdir_pkt[idx].tun_pkt + offset, + ice_fdir_pkt[idx].tun_pkt_len - offset); + } + } + + return pos; +} + /** * ice_fdir_get_gen_prgm_pkt - generate a training packet * @hw: pointer to the hardware structure @@ -1420,7 +3809,7 @@ ice_fdir_get_open_tunnel_port(struct ice_hw *hw, enum ice_fltr_ptype flow, * @frag: generate a fragment packet * @tun: true implies generate a tunnel packet */ -enum ice_status +int ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, u8 *pkt, bool frag, bool tun) { @@ -1428,6 +3817,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, u16 tnl_port; u8 *loc; u16 idx; + u16 flags_version; + u16 pos; + u16 offset; if (input->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) { switch (input->ip.v4.proto) { @@ -1467,13 +3859,33 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, if (ice_fdir_pkt[idx].flow == flow) break; if (idx == ICE_FDIR_NUM_PKT) - return ICE_ERR_PARAM; + return -EINVAL; + if (!tun) { - memcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len); + switch (flow) { + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_CONTROL: + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2: + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP: + offset = ICE_FDIR_IPV4_L2TPV2_PPP_PKT_OFF; + ice_fdir_gen_l2tpv2_pkt(pkt, &input->l2tpv2_data, + idx, offset, tun); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_CONTROL: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP: + offset = ICE_FDIR_IPV6_L2TPV2_PPP_PKT_OFF; + ice_fdir_gen_l2tpv2_pkt(pkt, &input->l2tpv2_data, + idx, offset, tun); + break; + default: + memcpy(pkt, ice_fdir_pkt[idx].pkt, + ice_fdir_pkt[idx].pkt_len); + break; + } loc = pkt; } else { if (!ice_fdir_pkt[idx].tun_pkt) - return ICE_ERR_PARAM; + return -EINVAL; switch (flow) { case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4: @@ -1489,19 +3901,134 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4: case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_UDP: case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6_TCP: case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4: case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4_UDP: case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6_TCP: case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4: case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4_UDP: case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6_TCP: memcpy(pkt, ice_fdir_pkt[idx].tun_pkt, ice_fdir_pkt[idx].tun_pkt_len); loc = &pkt[ICE_FDIR_GTPU_EH_INNER_PKT_OFF]; break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_TCP: + memcpy(pkt, ice_fdir_pkt[idx].tun_pkt, + ice_fdir_pkt[idx].tun_pkt_len); + loc = &pkt[ICE_FDIR_IPV4_GRE_INNER_PKT_OFF]; + break; + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_TCP: + memcpy(pkt, ice_fdir_pkt[idx].tun_pkt, + ice_fdir_pkt[idx].tun_pkt_len); + loc = &pkt[ICE_FDIR_IPV6_GRE_INNER_PKT_OFF]; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV6_TCP: + memcpy(pkt, ice_fdir_pkt[idx].tun_pkt, + ice_fdir_pkt[idx].tun_pkt_len); + loc = &pkt[ICE_FDIR_V4_V4_GTPOGRE_PKT_OFF]; + break; + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV6_TCP: + memcpy(pkt, ice_fdir_pkt[idx].tun_pkt, + ice_fdir_pkt[idx].tun_pkt_len); + loc = &pkt[ICE_FDIR_V6_V4_GTPOGRE_PKT_OFF]; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV6_TCP: + memcpy(pkt, ice_fdir_pkt[idx].tun_pkt, + ice_fdir_pkt[idx].tun_pkt_len); + loc = &pkt[ICE_FDIR_V4_V4_GTPOGRE_EH_PKT_OFF]; + break; + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV6_TCP: + memcpy(pkt, ice_fdir_pkt[idx].tun_pkt, + ice_fdir_pkt[idx].tun_pkt_len); + loc = &pkt[ICE_FDIR_V6_V4_GTPOGRE_EH_PKT_OFF]; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6_TCP: + offset = ICE_FDIR_IPV4_L2TPV2_PPP_PKT_OFF; + pos = ice_fdir_gen_l2tpv2_pkt(pkt, &input->l2tpv2_data, + idx, offset, tun); + loc = &pkt[pos]; + break; + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6_TCP: + offset = ICE_FDIR_IPV6_L2TPV2_PPP_PKT_OFF; + pos = ice_fdir_gen_l2tpv2_pkt(pkt, &input->l2tpv2_data, + idx, offset, tun); + loc = &pkt[pos]; + break; default: if (ice_fdir_get_open_tunnel_port(hw, flow, &tnl_port)) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; memcpy(pkt, ice_fdir_pkt[idx].tun_pkt, ice_fdir_pkt[idx].tun_pkt_len); @@ -1579,6 +4106,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); break; case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN: + case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_UDP: ice_pkt_insert_mac_addr(pkt, input->ext_data_outer.dst_mac); ice_pkt_insert_mac_addr(pkt + ETH_ALEN, input->ext_data_outer.src_mac); ice_pkt_insert_u32(pkt, ICE_IPV4_SRC_ADDR_OFFSET, @@ -1601,6 +4129,84 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); ice_pkt_insert_mac_addr(loc + ETH_ALEN, input->ext_data.src_mac); break; + case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_TCP: + ice_pkt_insert_mac_addr(pkt, input->ext_data_outer.dst_mac); + ice_pkt_insert_mac_addr(pkt + ETH_ALEN, + input->ext_data_outer.src_mac); + ice_pkt_insert_u32(pkt, ICE_IPV4_SRC_ADDR_OFFSET, + input->ip_outer.v4.dst_ip); + ice_pkt_insert_u32(pkt, ICE_IPV4_DST_ADDR_OFFSET, + input->ip_outer.v4.src_ip); + ice_pkt_insert_u8(pkt, ICE_IPV4_TOS_OFFSET, + input->ip_outer.v4.tos); + ice_pkt_insert_u32(pkt, ICE_IPV4_VXLAN_VNI_OFFSET, + input->vxlan_data.vni); + ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, + input->ip.v4.src_ip); + ice_pkt_insert_u16(loc, ICE_IPV4_TCP_DST_PORT_OFFSET, + input->ip.v4.src_port); + ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET, + input->ip.v4.dst_ip); + ice_pkt_insert_u16(loc, ICE_IPV4_TCP_SRC_PORT_OFFSET, + input->ip.v4.dst_port); + ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos); + ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); + ice_pkt_insert_mac_addr(loc + ETH_ALEN, + input->ext_data.src_mac); + if (frag) + loc[20] = ICE_FDIR_IPV4_PKT_FLAG_MF; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_SCTP: + ice_pkt_insert_mac_addr(pkt, input->ext_data_outer.dst_mac); + ice_pkt_insert_mac_addr(pkt + ETH_ALEN, + input->ext_data_outer.src_mac); + ice_pkt_insert_u32(pkt, ICE_IPV4_SRC_ADDR_OFFSET, + input->ip_outer.v4.dst_ip); + ice_pkt_insert_u32(pkt, ICE_IPV4_DST_ADDR_OFFSET, + input->ip_outer.v4.src_ip); + ice_pkt_insert_u8(pkt, ICE_IPV4_TOS_OFFSET, + input->ip_outer.v4.tos); + ice_pkt_insert_u32(pkt, ICE_IPV4_VXLAN_VNI_OFFSET, + input->vxlan_data.vni); + ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, + input->ip.v4.src_ip); + ice_pkt_insert_u16(loc, ICE_IPV4_SCTP_DST_PORT_OFFSET, + input->ip.v4.src_port); + ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET, + input->ip.v4.dst_ip); + ice_pkt_insert_u16(loc, ICE_IPV4_SCTP_SRC_PORT_OFFSET, + input->ip.v4.dst_port); + ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos); + ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); + ice_pkt_insert_mac_addr(loc + ETH_ALEN, + input->ext_data.src_mac); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_OTHER: + ice_pkt_insert_mac_addr(pkt, input->ext_data_outer.dst_mac); + ice_pkt_insert_mac_addr(pkt + ETH_ALEN, + input->ext_data_outer.src_mac); + ice_pkt_insert_u32(pkt, ICE_IPV4_SRC_ADDR_OFFSET, + input->ip_outer.v4.dst_ip); + ice_pkt_insert_u32(pkt, ICE_IPV4_DST_ADDR_OFFSET, + input->ip_outer.v4.src_ip); + ice_pkt_insert_u8(pkt, ICE_IPV4_TOS_OFFSET, + input->ip_outer.v4.tos); + ice_pkt_insert_u32(pkt, ICE_IPV4_VXLAN_VNI_OFFSET, + input->vxlan_data.vni); + ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, + input->ip.v4.src_ip); + ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET, + input->ip.v4.dst_ip); + ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos); + ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl); + ice_pkt_insert_u8(loc, ICE_IPV4_PROTO_OFFSET, + input->ip.v4.proto); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); + ice_pkt_insert_mac_addr(loc + ETH_ALEN, + input->ext_data.src_mac); + break; case ICE_FLTR_PTYPE_NONF_IPV4_GTPU: ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, input->ip.v4.src_ip); @@ -1613,6 +4219,14 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4: case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4: case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV4: ice_pkt_insert_u32(loc, ICE_IPV4_NO_MAC_DST_ADDR_OFFSET, input->ip.v4.src_ip); ice_pkt_insert_u32(loc, ICE_IPV4_NO_MAC_SRC_ADDR_OFFSET, @@ -1634,10 +4248,38 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, ice_pkt_insert_u6_qfi(loc, ICE_IPV4_GTPU_QFI_OFFSET, input->gtpu_data.qfi); break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU: + ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, + input->ip.v4.src_ip); + ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET, + input->ip.v4.dst_ip); + ice_pkt_insert_u32(loc, ICE_IPV4_GTPOGRE_TEID_OFFSET, + input->gtpu_data.teid); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP: + ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, + input->ip.v4.src_ip); + ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET, + input->ip.v4.dst_ip); + ice_pkt_insert_u32(loc, ICE_IPV4_GTPOGRE_TEID_OFFSET, + input->gtpu_data.teid); + ice_pkt_insert_u6_qfi(loc, ICE_IPV4_GTPOGRE_QFI_OFFSET, + input->gtpu_data.qfi); + break; case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP: case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_UDP: case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4_UDP: case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV4_UDP: ice_pkt_insert_u32(loc, ICE_IPV4_NO_MAC_DST_ADDR_OFFSET, input->ip.v4.src_ip); ice_pkt_insert_u16(loc, ICE_UDP4_NO_MAC_DST_PORT_OFFSET, @@ -1653,6 +4295,14 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_TCP: case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4_TCP: case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV4_TCP: ice_pkt_insert_u32(loc, ICE_IPV4_NO_MAC_DST_ADDR_OFFSET, input->ip.v4.src_ip); ice_pkt_insert_u16(loc, ICE_TCP4_NO_MAC_DST_PORT_OFFSET, @@ -1665,6 +4315,17 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, ice_pkt_insert_u8(loc, ICE_IPV4_NO_MAC_TTL_OFFSET, input->ip.v4.ttl); break; case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV6: ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_NO_MAC_DST_ADDR_OFFSET, input->ip.v6.src_ip); ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_NO_MAC_SRC_ADDR_OFFSET, @@ -1675,6 +4336,17 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, input->ip.v6.proto); break; case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV6_UDP: ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_NO_MAC_DST_ADDR_OFFSET, input->ip.v6.src_ip); ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_NO_MAC_SRC_ADDR_OFFSET, @@ -1687,6 +4359,17 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, ice_pkt_insert_u8(loc, ICE_IPV6_NO_MAC_HLIM_OFFSET, input->ip.v6.hlim); break; case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV6_TCP: ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_NO_MAC_DST_ADDR_OFFSET, input->ip.v6.src_ip); ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_NO_MAC_SRC_ADDR_OFFSET, @@ -1720,6 +4403,26 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, ice_pkt_insert_u6_qfi(loc, ICE_IPV6_GTPU_QFI_OFFSET, input->gtpu_data.qfi); break; + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU: + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, + input->ip.v6.src_ip); + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET, + input->ip.v6.dst_ip); + ice_pkt_insert_u32(loc, ICE_IPV6_GTPOGRE_TEID_OFFSET, + input->gtpu_data.teid); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP: + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, + input->ip.v6.src_ip); + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET, + input->ip.v6.dst_ip); + ice_pkt_insert_u32(loc, ICE_IPV6_GTPOGRE_TEID_OFFSET, + input->gtpu_data.teid); + ice_pkt_insert_u6_qfi(loc, ICE_IPV6_GTPOGRE_QFI_OFFSET, + input->gtpu_data.qfi); + break; case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3: ice_pkt_insert_u32(loc, ICE_IPV4_L2TPV3_SESS_ID_OFFSET, input->l2tpv3_data.session_id); @@ -1729,10 +4432,18 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, input->l2tpv3_data.session_id); break; case ICE_FLTR_PTYPE_NONF_IPV4_ESP: + ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, + input->ip.v4.src_ip); + ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET, + input->ip.v4.dst_ip); ice_pkt_insert_u32(loc, ICE_IPV4_ESP_SPI_OFFSET, input->ip.v4.sec_parm_idx); break; case ICE_FLTR_PTYPE_NONF_IPV6_ESP: + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, + input->ip.v6.src_ip); + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET, + input->ip.v6.dst_ip); ice_pkt_insert_u32(loc, ICE_IPV6_ESP_SPI_OFFSET, input->ip.v6.sec_parm_idx); break; @@ -1783,6 +4494,212 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, ice_pkt_insert_u16(pkt, ICE_IPV4_UDP_ECPRI_TP0_PC_ID_OFFSET, input->ecpri_data.pc_id); break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4: + ice_pkt_insert_u32(loc, ICE_IPV4_NO_MAC_DST_ADDR_OFFSET, + input->ip.v4.src_ip); + ice_pkt_insert_u32(loc, ICE_IPV4_NO_MAC_SRC_ADDR_OFFSET, + input->ip.v4.dst_ip); + ice_pkt_insert_u8(loc, ICE_IPV4_NO_MAC_TOS_OFFSET, + input->ip.v4.tos); + ice_pkt_insert_u8(loc, ICE_IPV4_NO_MAC_PROTO_OFFSET, + input->ip.v4.proto); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_TCP: + ice_pkt_insert_u32(loc, ICE_IPV4_NO_MAC_DST_ADDR_OFFSET, + input->ip.v4.src_ip); + ice_pkt_insert_u16(loc, ICE_TCP4_NO_MAC_DST_PORT_OFFSET, + input->ip.v4.src_port); + ice_pkt_insert_u32(loc, ICE_IPV4_NO_MAC_SRC_ADDR_OFFSET, + input->ip.v4.dst_ip); + ice_pkt_insert_u16(loc, ICE_TCP4_NO_MAC_SRC_PORT_OFFSET, + input->ip.v4.dst_port); + ice_pkt_insert_u8(loc, ICE_IPV4_NO_MAC_TOS_OFFSET, + input->ip.v4.tos); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_UDP: + ice_pkt_insert_u32(loc, ICE_IPV4_NO_MAC_DST_ADDR_OFFSET, + input->ip.v4.src_ip); + ice_pkt_insert_u16(loc, ICE_UDP4_NO_MAC_DST_PORT_OFFSET, + input->ip.v4.src_port); + ice_pkt_insert_u32(loc, ICE_IPV4_NO_MAC_SRC_ADDR_OFFSET, + input->ip.v4.dst_ip); + ice_pkt_insert_u16(loc, ICE_UDP4_NO_MAC_SRC_PORT_OFFSET, + input->ip.v4.dst_port); + ice_pkt_insert_u8(loc, ICE_IPV4_NO_MAC_TOS_OFFSET, + input->ip.v4.tos); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6: + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_NO_MAC_DST_ADDR_OFFSET, + input->ip.v6.src_ip); + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_NO_MAC_SRC_ADDR_OFFSET, + input->ip.v6.dst_ip); + ice_pkt_insert_u8_tc(loc, ICE_IPV6_NO_MAC_TC_OFFSET, + input->ip.v6.tc); + ice_pkt_insert_u8(loc, ICE_IPV6_NO_MAC_PROTO_OFFSET, + input->ip.v6.proto); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_TCP: + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_NO_MAC_DST_ADDR_OFFSET, + input->ip.v6.src_ip); + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_NO_MAC_SRC_ADDR_OFFSET, + input->ip.v6.dst_ip); + ice_pkt_insert_u16(loc, ICE_TCP6_NO_MAC_DST_PORT_OFFSET, + input->ip.v6.src_port); + ice_pkt_insert_u16(loc, ICE_TCP6_NO_MAC_SRC_PORT_OFFSET, + input->ip.v6.dst_port); + ice_pkt_insert_u8_tc(loc, ICE_IPV6_NO_MAC_TC_OFFSET, + input->ip.v6.tc); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_UDP: + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_NO_MAC_DST_ADDR_OFFSET, + input->ip.v6.src_ip); + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_NO_MAC_SRC_ADDR_OFFSET, + input->ip.v6.dst_ip); + ice_pkt_insert_u16(loc, ICE_UDP6_NO_MAC_DST_PORT_OFFSET, + input->ip.v6.src_port); + ice_pkt_insert_u16(loc, ICE_UDP6_NO_MAC_SRC_PORT_OFFSET, + input->ip.v6.dst_port); + ice_pkt_insert_u8_tc(loc, ICE_IPV6_NO_MAC_TC_OFFSET, + input->ip.v6.tc); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_CONTROL: + ice_pkt_insert_mac_addr(loc, input->ext_data_outer.dst_mac); + ice_pkt_insert_mac_addr(loc + ETH_ALEN, + input->ext_data_outer.src_mac); + ice_pkt_insert_u16(loc, ICE_IPV4_L2TPV2_LEN_SESS_ID_OFFSET, + input->l2tpv2_data.session_id); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2: + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP: + ice_pkt_insert_mac_addr(loc, input->ext_data_outer.dst_mac); + ice_pkt_insert_mac_addr(loc + ETH_ALEN, + input->ext_data_outer.src_mac); + flags_version = be16_to_cpu(input->l2tpv2_data.flags_version); + if (flags_version & ICE_L2TPV2_FLAGS_LEN) { + ice_pkt_insert_u16(loc, + ICE_IPV4_L2TPV2_LEN_SESS_ID_OFFSET, + input->l2tpv2_data.session_id); + } else { + ice_pkt_insert_u16(loc, + ICE_IPV4_L2TPV2_SESS_ID_OFFSET, + input->l2tpv2_data.session_id); + } + break; + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_CONTROL: + ice_pkt_insert_mac_addr(loc, input->ext_data_outer.dst_mac); + ice_pkt_insert_mac_addr(loc + ETH_ALEN, + input->ext_data_outer.src_mac); + ice_pkt_insert_u16(loc, ICE_IPV6_L2TPV2_LEN_SESS_ID_OFFSET, + input->l2tpv2_data.session_id); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP: + ice_pkt_insert_mac_addr(loc, input->ext_data_outer.dst_mac); + ice_pkt_insert_mac_addr(loc + ETH_ALEN, + input->ext_data_outer.src_mac); + flags_version = be16_to_cpu(input->l2tpv2_data.flags_version); + if (flags_version & ICE_L2TPV2_FLAGS_LEN) { + ice_pkt_insert_u16(loc, + ICE_IPV6_L2TPV2_LEN_SESS_ID_OFFSET, + input->l2tpv2_data.session_id); + } else { + ice_pkt_insert_u16(loc, + ICE_IPV6_L2TPV2_SESS_ID_OFFSET, + input->l2tpv2_data.session_id); + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4: + ice_pkt_insert_u32(loc, ICE_IPV4_NO_MAC_DST_ADDR_OFFSET, + input->ip.v4.src_ip); + ice_pkt_insert_u32(loc, ICE_IPV4_NO_MAC_SRC_ADDR_OFFSET, + input->ip.v4.dst_ip); + ice_pkt_insert_u8(loc, ICE_IPV4_NO_MAC_TOS_OFFSET, + input->ip.v4.tos); + ice_pkt_insert_u8(loc, ICE_IPV4_NO_MAC_TTL_OFFSET, + input->ip.v4.ttl); + ice_pkt_insert_u8(loc, ICE_IPV4_NO_MAC_PROTO_OFFSET, + input->ip.v4.proto); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_UDP: + ice_pkt_insert_u32(loc, ICE_IPV4_NO_MAC_DST_ADDR_OFFSET, + input->ip.v4.src_ip); + ice_pkt_insert_u16(loc, ICE_UDP4_NO_MAC_DST_PORT_OFFSET, + input->ip.v4.src_port); + ice_pkt_insert_u32(loc, ICE_IPV4_NO_MAC_SRC_ADDR_OFFSET, + input->ip.v4.dst_ip); + ice_pkt_insert_u16(loc, ICE_UDP4_NO_MAC_SRC_PORT_OFFSET, + input->ip.v4.dst_port); + ice_pkt_insert_u8(loc, ICE_IPV4_NO_MAC_TOS_OFFSET, + input->ip.v4.tos); + ice_pkt_insert_u8(loc, ICE_IPV4_NO_MAC_TTL_OFFSET, + input->ip.v4.ttl); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_TCP: + ice_pkt_insert_u32(loc, ICE_IPV4_NO_MAC_DST_ADDR_OFFSET, + input->ip.v4.src_ip); + ice_pkt_insert_u16(loc, ICE_TCP4_NO_MAC_DST_PORT_OFFSET, + input->ip.v4.src_port); + ice_pkt_insert_u32(loc, ICE_IPV4_NO_MAC_SRC_ADDR_OFFSET, + input->ip.v4.dst_ip); + ice_pkt_insert_u16(loc, ICE_TCP4_NO_MAC_SRC_PORT_OFFSET, + input->ip.v4.dst_port); + ice_pkt_insert_u8(loc, ICE_IPV4_NO_MAC_TOS_OFFSET, + input->ip.v4.tos); + ice_pkt_insert_u8(loc, ICE_IPV4_NO_MAC_TTL_OFFSET, + input->ip.v4.ttl); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6: + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_NO_MAC_DST_ADDR_OFFSET, + input->ip.v6.src_ip); + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_NO_MAC_SRC_ADDR_OFFSET, + input->ip.v6.dst_ip); + ice_pkt_insert_u8_tc(loc, ICE_IPV6_NO_MAC_TC_OFFSET, + input->ip.v6.tc); + ice_pkt_insert_u8(loc, ICE_IPV6_NO_MAC_HLIM_OFFSET, + input->ip.v6.hlim); + ice_pkt_insert_u8(loc, ICE_IPV6_NO_MAC_PROTO_OFFSET, + input->ip.v6.proto); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6_UDP: + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_NO_MAC_DST_ADDR_OFFSET, + input->ip.v6.src_ip); + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_NO_MAC_SRC_ADDR_OFFSET, + input->ip.v6.dst_ip); + ice_pkt_insert_u16(loc, ICE_UDP6_NO_MAC_DST_PORT_OFFSET, + input->ip.v6.src_port); + ice_pkt_insert_u16(loc, ICE_UDP6_NO_MAC_SRC_PORT_OFFSET, + input->ip.v6.dst_port); + ice_pkt_insert_u8_tc(loc, ICE_IPV6_NO_MAC_TC_OFFSET, + input->ip.v6.tc); + ice_pkt_insert_u8(loc, ICE_IPV6_NO_MAC_HLIM_OFFSET, + input->ip.v6.hlim); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6_TCP: + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_NO_MAC_DST_ADDR_OFFSET, + input->ip.v6.src_ip); + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_NO_MAC_SRC_ADDR_OFFSET, + input->ip.v6.dst_ip); + ice_pkt_insert_u16(loc, ICE_TCP6_NO_MAC_DST_PORT_OFFSET, + input->ip.v6.src_port); + ice_pkt_insert_u16(loc, ICE_TCP6_NO_MAC_SRC_PORT_OFFSET, + input->ip.v6.dst_port); + ice_pkt_insert_u8_tc(loc, ICE_IPV6_NO_MAC_TC_OFFSET, + input->ip.v6.tc); + ice_pkt_insert_u8(loc, ICE_IPV6_NO_MAC_HLIM_OFFSET, + input->ip.v6.hlim); + break; case ICE_FLTR_PTYPE_NONF_IPV6_TCP: ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, input->ip.v6.src_ip); @@ -1847,11 +4764,18 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); break; case ICE_FLTR_PTYPE_FRAG_IPV6: + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, + input->ip.v6.src_ip); + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET, + input->ip.v6.dst_ip); + ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc); + ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); ice_pkt_insert_u32(loc, ICE_IPV6_ID_OFFSET, input->ip.v6.packet_id); break; default: - return ICE_ERR_PARAM; + return -EINVAL; } if (input->flex_fltr) @@ -1866,7 +4790,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, * @pkt: pointer to return filter packet * @frag: generate a fragment packet */ -enum ice_status +int ice_fdir_get_prgm_pkt(struct ice_fdir_fltr *input, u8 *pkt, bool frag) { return ice_fdir_get_gen_prgm_pkt(NULL, input, pkt, frag, false); @@ -1957,70 +4881,62 @@ ice_fdir_update_cntrs(struct ice_hw *hw, enum ice_fltr_ptype flow, } /** - * ice_cmp_ipv6_addr - compare 2 IP v6 addresses - * @a: IP v6 address - * @b: IP v6 address - * - * Returns 0 on equal, returns non-0 if different - */ -static int ice_cmp_ipv6_addr(__be32 *a, __be32 *b) -{ - return memcmp(a, b, 4 * sizeof(__be32)); -} - -/** - * ice_fdir_comp_rules - compare 2 filters + * ice_fdir_comp_rules_basic - compare 2 filters * @a: a Flow Director filter data structure * @b: a Flow Director filter data structure - * @v6: bool true if v6 filter * * Returns true if the filters match */ -static bool -ice_fdir_comp_rules(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b, bool v6) +bool +ice_fdir_comp_rules_basic(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b) { - enum ice_fltr_ptype flow_type = a->flow_type; + if (a->flow_type != b->flow_type) + return false; + if (memcmp(&a->ip, &b->ip, sizeof(a->ip))) + return false; + if (memcmp(&a->mask, &b->mask, sizeof(a->mask))) + return false; - /* The calling function already checks that the two filters have the - * same flow_type. - */ - if (!v6) { - if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP) { - if (a->ip.v4.dst_ip == b->ip.v4.dst_ip && - a->ip.v4.src_ip == b->ip.v4.src_ip && - a->ip.v4.dst_port == b->ip.v4.dst_port && - a->ip.v4.src_port == b->ip.v4.src_port) - return true; - } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) { - if (a->ip.v4.dst_ip == b->ip.v4.dst_ip && - a->ip.v4.src_ip == b->ip.v4.src_ip && - a->ip.v4.l4_header == b->ip.v4.l4_header && - a->ip.v4.proto == b->ip.v4.proto && - a->ip.v4.ip_ver == b->ip.v4.ip_ver && - a->ip.v4.tos == b->ip.v4.tos) - return true; - } - } else { - if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV6_SCTP) { - if (a->ip.v6.dst_port == b->ip.v6.dst_port && - a->ip.v6.src_port == b->ip.v6.src_port && - !ice_cmp_ipv6_addr(a->ip.v6.dst_ip, - b->ip.v6.dst_ip) && - !ice_cmp_ipv6_addr(a->ip.v6.src_ip, - b->ip.v6.src_ip)) - return true; - } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) { - if (a->ip.v6.dst_port == b->ip.v6.dst_port && - a->ip.v6.src_port == b->ip.v6.src_port) - return true; - } - } + return true; +} - return false; +/** + * ice_fdir_comp_rules_extended - compare 2 filters + * @a: a Flow Director filter data structure + * @b: a Flow Director filter data structure + * + * Returns true if the filters match + */ +bool +ice_fdir_comp_rules_extended(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b) +{ + if (!ice_fdir_comp_rules_basic(a, b)) + return false; + + if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data))) + return false; + if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask))) + return false; + if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data))) + return false; + if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask))) + return false; + if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data))) + return false; + if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask))) + return false; + if (memcmp(&a->ecpri_data, &b->ecpri_data, sizeof(a->ecpri_data))) + return false; + if (memcmp(&a->ecpri_mask, &b->ecpri_mask, sizeof(a->ecpri_mask))) + return false; + if (memcmp(&a->l2tpv2_data.session_id, &b->l2tpv2_data.session_id, + sizeof(a->l2tpv2_data.session_id))) + return false; + if (memcmp(&a->l2tpv2_mask.session_id, &b->l2tpv2_mask.session_id, + sizeof(a->l2tpv2_mask.session_id))) + return false; + + return true; } /** @@ -2036,19 +4952,8 @@ bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input) bool ret = false; list_for_each_entry(rule, &hw->fdir_list_head, fltr_node) { - enum ice_fltr_ptype flow_type; + ret = ice_fdir_comp_rules_basic(rule, input); - if (rule->flow_type != input->flow_type) - continue; - - flow_type = input->flow_type; - if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) - ret = ice_fdir_comp_rules(rule, input, false); - else - ret = ice_fdir_comp_rules(rule, input, true); if (ret) { if (rule->fltr_id == input->fltr_id && rule->q_index != input->q_index) @@ -2069,7 +4974,7 @@ bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input) * Clears FD table entries by issuing admin command (direct, 0x0B06) * Must to pass valid vsi_num as returned by "AddVSI". */ -enum ice_status ice_clear_vsi_fd_table(struct ice_hw *hw, u16 vsi_num) +int ice_clear_vsi_fd_table(struct ice_hw *hw, u16 vsi_num) { struct ice_aqc_clear_fd_table *cmd; struct ice_aq_desc desc; @@ -2088,7 +4993,7 @@ enum ice_status ice_clear_vsi_fd_table(struct ice_hw *hw, u16 vsi_num) * * Clears FD table entries for a PF by issuing admin command (direct, 0x0B06) */ -enum ice_status ice_clear_pf_fd_table(struct ice_hw *hw) +int ice_clear_pf_fd_table(struct ice_hw *hw) { struct ice_aqc_clear_fd_table *cmd; struct ice_aq_desc desc; diff --git a/drivers/thirdparty/ice/ice_fdir.h b/drivers/thirdparty/ice/ice_fdir.h index 7bdf45458fbb..59036b03cb32 100644 --- a/drivers/thirdparty/ice/ice_fdir.h +++ b/drivers/thirdparty/ice/ice_fdir.h @@ -4,11 +4,22 @@ #ifndef _ICE_FDIR_H_ #define _ICE_FDIR_H_ -#include "ice_common.h" - +#include "ice_type.h" #define ICE_FDIR_GTPU_IP_INNER_PKT_OFF 50 #define ICE_FDIR_GTPU_EH_INNER_PKT_OFF 58 +#define ICE_FDIR_IPV4_GRE_INNER_PKT_OFF 38 +#define ICE_FDIR_IPV6_GRE_INNER_PKT_OFF 58 +#define ICE_FDIR_V4_V4_GTPOGRE_PKT_OFF 74 +#define ICE_FDIR_V4_V6_GTPOGRE_PKT_OFF 94 +#define ICE_FDIR_V6_V4_GTPOGRE_PKT_OFF 94 +#define ICE_FDIR_V6_V6_GTPOGRE_PKT_OFF 114 +#define ICE_FDIR_V4_V4_GTPOGRE_EH_PKT_OFF 82 +#define ICE_FDIR_V4_V6_GTPOGRE_EH_PKT_OFF 102 +#define ICE_FDIR_V6_V4_GTPOGRE_EH_PKT_OFF 102 +#define ICE_FDIR_V6_V6_GTPOGRE_EH_PKT_OFF 122 +#define ICE_FDIR_IPV4_L2TPV2_PPP_PKT_OFF 52 +#define ICE_FDIR_IPV6_L2TPV2_PPP_PKT_OFF 72 #define ICE_FDIR_TUN_PKT_OFF 50 #define ICE_FDIR_MAX_RAW_PKT_SIZE (512 + ICE_FDIR_TUN_PKT_OFF) @@ -64,6 +75,10 @@ #define ICE_IPV4_GTPU_QFI_OFFSET 56 #define ICE_IPV6_GTPU_TEID_OFFSET 66 #define ICE_IPV6_GTPU_QFI_OFFSET 76 +#define ICE_IPV4_GTPOGRE_TEID_OFFSET 70 +#define ICE_IPV4_GTPOGRE_QFI_OFFSET 80 +#define ICE_IPV6_GTPOGRE_TEID_OFFSET 90 +#define ICE_IPV6_GTPOGRE_QFI_OFFSET 100 #define ICE_IPV4_L2TPV3_SESS_ID_OFFSET 34 #define ICE_IPV6_L2TPV3_SESS_ID_OFFSET 54 #define ICE_IPV4_ESP_SPI_OFFSET 34 @@ -72,9 +87,13 @@ #define ICE_IPV6_AH_SPI_OFFSET 58 #define ICE_IPV4_NAT_T_ESP_SPI_OFFSET 42 #define ICE_IPV6_NAT_T_ESP_SPI_OFFSET 62 -#define ICE_IPV4_VXLAN_VNI_OFFSET 45 +#define ICE_IPV4_VXLAN_VNI_OFFSET 46 #define ICE_ECPRI_TP0_PC_ID_OFFSET 18 #define ICE_IPV4_UDP_ECPRI_TP0_PC_ID_OFFSET 46 +#define ICE_IPV4_L2TPV2_SESS_ID_OFFSET 46 +#define ICE_IPV6_L2TPV2_SESS_ID_OFFSET 66 +#define ICE_IPV4_L2TPV2_LEN_SESS_ID_OFFSET 48 +#define ICE_IPV6_L2TPV2_LEN_SESS_ID_OFFSET 68 #define ICE_FDIR_MAX_FLTRS 16384 @@ -201,6 +220,16 @@ struct ice_fdir_ecpri { __be16 pc_id; }; +struct ice_fdir_l2tpv2 { + __be16 flags_version; + __be16 length; + __be16 tunnel_id; + __be16 session_id; + __be16 ns; + __be16 nr; + __be16 offset_size; +}; + struct ice_fdir_extra { u8 dst_mac[ETH_ALEN]; /* dest MAC address */ u8 src_mac[ETH_ALEN]; /* src MAC address */ @@ -240,6 +269,9 @@ struct ice_fdir_fltr { struct ice_fdir_ecpri ecpri_data; struct ice_fdir_ecpri ecpri_mask; + struct ice_fdir_l2tpv2 l2tpv2_data; + struct ice_fdir_l2tpv2 l2tpv2_mask; + struct ice_fdir_extra ext_data; struct ice_fdir_extra ext_mask; @@ -274,29 +306,34 @@ struct ice_fdir_base_pkt { const u8 *tun_pkt; }; -enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id); -enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id); +bool +ice_fdir_comp_rules_basic(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b); +bool +ice_fdir_comp_rules_extended(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b); + +int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id); +int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id); void ice_set_fd_desc_val(struct ice_fd_fltr_desc_ctx *fd_fltr_ctx, struct ice_fltr_desc *fdir_desc); void ice_set_dflt_val_fd_desc(struct ice_fd_fltr_desc_ctx *fd_fltr_ctx); -enum ice_status +int ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr); -enum ice_status +int ice_free_fd_guar_item(struct ice_hw *hw, u16 cntr_id, u16 num_fltr); -enum ice_status +int ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr); -enum ice_status +int ice_free_fd_shrd_item(struct ice_hw *hw, u16 cntr_id, u16 num_fltr); -enum ice_status ice_clear_vsi_fd_table(struct ice_hw *hw, u16 vsi_num); -enum ice_status ice_clear_pf_fd_table(struct ice_hw *hw); +int ice_clear_vsi_fd_table(struct ice_hw *hw, u16 vsi_num); +int ice_clear_pf_fd_table(struct ice_hw *hw); void ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input, struct ice_fltr_desc *fdesc, bool add); -enum ice_status +int ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, u8 *pkt, bool frag, bool tun); -enum ice_status +int ice_fdir_get_prgm_pkt(struct ice_fdir_fltr *input, u8 *pkt, bool frag); int ice_get_fdir_cnt_all(struct ice_hw *hw); bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input); diff --git a/drivers/thirdparty/ice/ice_flex_pipe.c b/drivers/thirdparty/ice/ice_flex_pipe.c index b98e4984f7ff..4b3297d7b09e 100644 --- a/drivers/thirdparty/ice/ice_flex_pipe.c +++ b/drivers/thirdparty/ice/ice_flex_pipe.c @@ -2,23 +2,11 @@ /* Copyright (C) 2018-2021, Intel Corporation. */ #include "ice_common.h" +#include "ice_ddp.h" #include "ice_flex_pipe.h" #include "ice_protocol_type.h" #include "ice_flow.h" - - -/* For supporting double VLAN mode, it is necessary to enable or disable certain - * boost tcam entries. The metadata labels names that match the following - * prefixes will be saved to allow enabling double VLAN mode. - */ -#define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */ -#define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */ - -/* To support tunneling entries by PF, the package will append the PF number to - * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc. - */ -#define ICE_TNL_PRE "TNL_" static const struct ice_tunnel_type_scan tnls[] = { { TNL_VXLAN, "TNL_VXLAN_PF" }, { TNL_GENEVE, "TNL_GENEVE_PF" }, @@ -106,443 +94,13 @@ static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect) return ice_sect_lkup[blk][sect]; } -/** - * ice_pkg_val_buf - * @buf: pointer to the ice buffer - * - * This helper function validates a buffer's header. - */ -static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) -{ - struct ice_buf_hdr *hdr; - u16 section_count; - u16 data_end; - - hdr = (struct ice_buf_hdr *)buf->buf; - /* verify data */ - section_count = le16_to_cpu(hdr->section_count); - if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) - return NULL; - - data_end = le16_to_cpu(hdr->data_end); - if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END) - return NULL; - - return hdr; -} - -/** - * ice_find_buf_table - * @ice_seg: pointer to the ice segment - * - * Returns the address of the buffer table within the ice segment. - */ -static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) -{ - struct ice_nvm_table *nvms; - - nvms = (struct ice_nvm_table *) - (ice_seg->device_table + - le32_to_cpu(ice_seg->device_table_count)); - - return (__force struct ice_buf_table *) - (nvms->vers + le32_to_cpu(nvms->table_count)); -} - -/** - * ice_pkg_enum_buf - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) - * @state: pointer to the enum state - * - * This function will enumerate all the buffers in the ice segment. The first - * call is made with the ice_seg parameter non-NULL; on subsequent calls, - * ice_seg is set to NULL which continues the enumeration. When the function - * returns a NULL pointer, then the end of the buffers has been reached, or an - * unexpected value has been detected (for example an invalid section count or - * an invalid buffer end value). - */ -static struct ice_buf_hdr * -ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state) -{ - if (ice_seg) { - state->buf_table = ice_find_buf_table(ice_seg); - if (!state->buf_table) - return NULL; - - state->buf_idx = 0; - return ice_pkg_val_buf(state->buf_table->buf_array); - } - - if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count)) - return ice_pkg_val_buf(state->buf_table->buf_array + - state->buf_idx); - else - return NULL; -} - -/** - * ice_pkg_advance_sect - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) - * @state: pointer to the enum state - * - * This helper function will advance the section within the ice segment, - * also advancing the buffer if needed. - */ -static bool -ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state) -{ - if (!ice_seg && !state->buf) - return false; - - if (!ice_seg && state->buf) - if (++state->sect_idx < le16_to_cpu(state->buf->section_count)) - return true; - - state->buf = ice_pkg_enum_buf(ice_seg, state); - if (!state->buf) - return false; - - /* start of new buffer, reset section index */ - state->sect_idx = 0; - return true; -} - -/** - * ice_pkg_enum_section - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) - * @state: pointer to the enum state - * @sect_type: section type to enumerate - * - * This function will enumerate all the sections of a particular type in the - * ice segment. The first call is made with the ice_seg parameter non-NULL; - * on subsequent calls, ice_seg is set to NULL which continues the enumeration. - * When the function returns a NULL pointer, then the end of the matching - * sections has been reached. - */ -static void * -ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, - u32 sect_type) -{ - u16 offset, size; - - if (ice_seg) - state->type = sect_type; - - if (!ice_pkg_advance_sect(ice_seg, state)) - return NULL; - - /* scan for next matching section */ - while (state->buf->section_entry[state->sect_idx].type != - cpu_to_le32(state->type)) - if (!ice_pkg_advance_sect(NULL, state)) - return NULL; - - /* validate section */ - offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); - if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF) - return NULL; - - size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size); - if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) - return NULL; - - /* make sure the section fits in the buffer */ - if (offset + size > ICE_PKG_BUF_SIZE) - return NULL; - - state->sect_type = - le32_to_cpu(state->buf->section_entry[state->sect_idx].type); - - /* calc pointer to this section */ - state->sect = ((u8 *)state->buf) + - le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); - - return state->sect; -} - -/** - * ice_pkg_enum_entry - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) - * @state: pointer to the enum state - * @sect_type: section type to enumerate - * @offset: pointer to variable that receives the offset in the table (optional) - * @handler: function that handles access to the entries into the section type - * - * This function will enumerate all the entries in particular section type in - * the ice segment. The first call is made with the ice_seg parameter non-NULL; - * on subsequent calls, ice_seg is set to NULL which continues the enumeration. - * When the function returns a NULL pointer, then the end of the entries has - * been reached. - * - * Since each section may have a different header and entry size, the handler - * function is needed to determine the number and location entries in each - * section. - * - * The offset parameter is optional, but should be used for sections that - * contain an offset for each section table. For such cases, the section handler - * function must return the appropriate offset + index to give the absolution - * offset for each entry. For example, if the base for a section's header - * indicates a base offset of 10, and the index for the entry is 2, then - * section handler function should set the offset to 10 + 2 = 12. - */ -static void * -ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, - u32 sect_type, u32 *offset, - void *(*handler)(u32 sect_type, void *section, - u32 index, u32 *offset)) -{ - void *entry; - - if (ice_seg) { - if (!handler) - return NULL; - - if (!ice_pkg_enum_section(ice_seg, state, sect_type)) - return NULL; - - state->entry_idx = 0; - state->handler = handler; - } else { - state->entry_idx++; - } - - if (!state->handler) - return NULL; - - /* get entry */ - entry = state->handler(state->sect_type, state->sect, state->entry_idx, - offset); - if (!entry) { - /* end of a section, look for another section of this type */ - if (!ice_pkg_enum_section(NULL, state, 0)) - return NULL; - - state->entry_idx = 0; - entry = state->handler(state->sect_type, state->sect, - state->entry_idx, offset); - } - - return entry; -} - -/** - * ice_hw_ptype_ena - check if the PTYPE is enabled or not - * @hw: pointer to the HW structure - * @ptype: the hardware PTYPE - */ -bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype) -{ - return ptype < ICE_FLOW_PTYPE_MAX && - test_bit(ptype, hw->hw_ptype); -} - -/** - * ice_marker_ptype_tcam_handler - * @sect_type: section type - * @section: pointer to section - * @index: index of the Marker PType TCAM entry to be returned - * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections - * - * This is a callback function that can be passed to ice_pkg_enum_entry. - * Handles enumeration of individual Marker PType TCAM entries. - */ -static void * -ice_marker_ptype_tcam_handler(u32 sect_type, void *section, u32 index, - u32 *offset) -{ - struct ice_marker_ptype_tcam_section *marker_ptype; - - if (!section) - return NULL; - - if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE) - return NULL; - - if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF) - return NULL; - - if (offset) - *offset = 0; - - marker_ptype = section; - if (index >= le16_to_cpu(marker_ptype->count)) - return NULL; - - return marker_ptype->tcam + index; -} - -/** - * ice_fill_hw_ptype - fill the enabled PTYPE bit information - * @hw: pointer to the HW structure - */ -static void -ice_fill_hw_ptype(struct ice_hw *hw) -{ - struct ice_marker_ptype_tcam_entry *tcam; - struct ice_seg *seg = hw->seg; - struct ice_pkg_enum state; - - bitmap_zero(hw->hw_ptype, ICE_FLOW_PTYPE_MAX); - if (!seg) - return; - - memset(&state, 0, sizeof(state)); - - do { - tcam = ice_pkg_enum_entry(seg, &state, - ICE_SID_RXPARSER_MARKER_PTYPE, NULL, - ice_marker_ptype_tcam_handler); - if (tcam && - le16_to_cpu(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX && - le16_to_cpu(tcam->ptype) < ICE_FLOW_PTYPE_MAX) - set_bit(le16_to_cpu(tcam->ptype), hw->hw_ptype); - - seg = NULL; - } while (tcam); -} - -/** - * ice_boost_tcam_handler - * @sect_type: section type - * @section: pointer to section - * @index: index of the boost TCAM entry to be returned - * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections - * - * This is a callback function that can be passed to ice_pkg_enum_entry. - * Handles enumeration of individual boost TCAM entries. - */ -static void * -ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset) -{ - struct ice_boost_tcam_section *boost; - - if (!section) - return NULL; - - if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM) - return NULL; - - if (index > ICE_MAX_BST_TCAMS_IN_BUF) - return NULL; - - if (offset) - *offset = 0; - - boost = section; - if (index >= le16_to_cpu(boost->count)) - return NULL; - - return boost->tcam + index; -} - -/** - * ice_find_boost_entry - * @ice_seg: pointer to the ice segment (non-NULL) - * @addr: Boost TCAM address of entry to search for - * @entry: returns pointer to the entry - * - * Finds a particular Boost TCAM entry and returns a pointer to that entry - * if it is found. The ice_seg parameter must not be NULL since the first call - * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure. - */ -static enum ice_status -ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, - struct ice_boost_tcam_entry **entry) -{ - struct ice_boost_tcam_entry *tcam; - struct ice_pkg_enum state; - - memset(&state, 0, sizeof(state)); - - if (!ice_seg) - return ICE_ERR_PARAM; - - do { - tcam = ice_pkg_enum_entry(ice_seg, &state, - ICE_SID_RXPARSER_BOOST_TCAM, NULL, - ice_boost_tcam_handler); - if (tcam && le16_to_cpu(tcam->addr) == addr) { - *entry = tcam; - return 0; - } - - ice_seg = NULL; - } while (tcam); - - *entry = NULL; - return ICE_ERR_CFG; -} - -/** - * ice_label_enum_handler - * @sect_type: section type - * @section: pointer to section - * @index: index of the label entry to be returned - * @offset: pointer to receive absolute offset, always zero for label sections - * - * This is a callback function that can be passed to ice_pkg_enum_entry. - * Handles enumeration of individual label entries. - */ -static void * -ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index, - u32 *offset) -{ - struct ice_label_section *labels; - - if (!section) - return NULL; - - if (index > ICE_MAX_LABELS_IN_BUF) - return NULL; - - if (offset) - *offset = 0; - - labels = section; - if (index >= le16_to_cpu(labels->count)) - return NULL; - - return labels->label + index; -} - -/** - * ice_enum_labels - * @ice_seg: pointer to the ice segment (NULL on subsequent calls) - * @type: the section type that will contain the label (0 on subsequent calls) - * @state: ice_pkg_enum structure that will hold the state of the enumeration - * @value: pointer to a value that will return the label's value if found - * - * Enumerates a list of labels in the package. The caller will call - * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call - * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL - * the end of the list has been reached. - */ -static char * -ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state, - u16 *value) -{ - struct ice_label *label; - - /* Check for valid label section on first call */ - if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST)) - return NULL; - - label = ice_pkg_enum_entry(ice_seg, state, type, NULL, - ice_label_enum_handler); - if (!label) - return NULL; - - *value = le16_to_cpu(label->value); - return label->name; -} - /** * ice_add_tunnel_hint * @hw: pointer to the HW structure * @label_name: label text * @val: value of the tunnel port boost entry */ -static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val) +void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val) { if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) { u16 i; @@ -578,7 +136,7 @@ static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val) * @val: value of the boost entry * @enable: true if entry needs to be enabled, or false if needs to be disabled */ -static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable) +void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable) { if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) { hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val; @@ -587,62 +145,6 @@ static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable) } } -/** - * ice_init_pkg_hints - * @hw: pointer to the HW structure - * @ice_seg: pointer to the segment of the package scan (non-NULL) - * - * This function will scan the package and save off relevant information - * (hints or metadata) for driver use. The ice_seg parameter must not be NULL - * since the first call to ice_enum_labels requires a pointer to an actual - * ice_seg structure. - */ -static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) -{ - struct ice_pkg_enum state; - char *label_name; - u16 val; - int i; - - memset(&hw->tnl, 0, sizeof(hw->tnl)); - memset(&state, 0, sizeof(state)); - - if (!ice_seg) - return; - - label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state, - &val); - - while (label_name) { - if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE))) - /* check for a tunnel entry */ - ice_add_tunnel_hint(hw, label_name, val); - - /* check for a dvm mode entry */ - else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE))) - ice_add_dvm_hint(hw, val, true); - - /* check for a svm mode entry */ - else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE))) - ice_add_dvm_hint(hw, val, false); - - label_name = ice_enum_labels(NULL, 0, &state, &val); - } - - /* Cache the appropriate boost TCAM entry pointers for tunnels */ - for (i = 0; i < hw->tnl.count; i++) { - ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr, - &hw->tnl.tbl[i].boost_entry); - if (hw->tnl.tbl[i].boost_entry) - hw->tnl.tbl[i].valid = true; - } - - /* Cache the appropriate boost TCAM entry pointers for DVM and SVM */ - for (i = 0; i < hw->dvm_upd.count; i++) - ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr, - &hw->dvm_upd.tbl[i].boost_entry); -} - /* Key creation */ #define ICE_DC_KEY 0x1 /* don't care */ @@ -679,7 +181,7 @@ static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) * ------------------------------ * Result: key: b01 10 11 11 00 00 */ -static enum ice_status +static int ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key, u8 *key_inv) { @@ -688,7 +190,7 @@ ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key, /* 'dont_care' and 'nvr_mtch' masks cannot overlap */ if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch)) - return ICE_ERR_CFG; + return -EIO; *key = 0; *key_inv = 0; @@ -781,7 +283,7 @@ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max) * dc == NULL --> dc mask is all 0's (no don't care bits) * nm == NULL --> nm mask is all 0's (no never match bits) */ -enum ice_status +int ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, u16 len) { @@ -790,11 +292,11 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, /* size must be a multiple of 2 bytes. */ if (size % 2) - return ICE_ERR_CFG; + return -EIO; half_size = size / 2; if (off + len > half_size) - return ICE_ERR_CFG; + return -EIO; /* Make sure at most one bit is set in the never match mask. Having more * than one never match mask bit set will cause HW to consume excessive @@ -802,1423 +304,17 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, */ #define ICE_NVR_MTCH_BITS_MAX 1 if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX)) - return ICE_ERR_CFG; + return -EIO; for (i = 0; i < len; i++) if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff, dc ? dc[i] : 0, nm ? nm[i] : 0, key + off + i, key + half_size + off + i)) - return ICE_ERR_CFG; + return -EIO; return 0; } -/** - * ice_acquire_global_cfg_lock - * @hw: pointer to the HW structure - * @access: access type (read or write) - * - * This function will request ownership of the global config lock for reading - * or writing of the package. When attempting to obtain write access, the - * caller must check for the following two return values: - * - * ICE_SUCCESS - Means the caller has acquired the global config lock - * and can perform writing of the package. - * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the - * package or has found that no update was necessary; in - * this case, the caller can just skip performing any - * update of the package. - */ -static enum ice_status -ice_acquire_global_cfg_lock(struct ice_hw *hw, - enum ice_aq_res_access_type access) -{ - enum ice_status status; - - status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, - ICE_GLOBAL_CFG_LOCK_TIMEOUT); - - if (!status) - mutex_lock(&ice_global_cfg_lock_sw); - else if (status == ICE_ERR_AQ_NO_WORK) - ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n"); - - return status; -} - -/** - * ice_release_global_cfg_lock - * @hw: pointer to the HW structure - * - * This function will release the global config lock. - */ -static void ice_release_global_cfg_lock(struct ice_hw *hw) -{ - mutex_unlock(&ice_global_cfg_lock_sw); - ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID); -} - -/** - * ice_acquire_change_lock - * @hw: pointer to the HW structure - * @access: access type (read or write) - * - * This function will request ownership of the change lock. - */ -enum ice_status -ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access) -{ - return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access, - ICE_CHANGE_LOCK_TIMEOUT); -} - -/** - * ice_release_change_lock - * @hw: pointer to the HW structure - * - * This function will release the change lock using the proper Admin Command. - */ -void ice_release_change_lock(struct ice_hw *hw) -{ - ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID); -} - -/** - * ice_aq_download_pkg - * @hw: pointer to the hardware structure - * @pkg_buf: the package buffer to transfer - * @buf_size: the size of the package buffer - * @last_buf: last buffer indicator - * @error_offset: returns error offset - * @error_info: returns error information - * @cd: pointer to command details structure or NULL - * - * Download Package (0x0C40) - */ -static enum ice_status -ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, - u16 buf_size, bool last_buf, u32 *error_offset, - u32 *error_info, struct ice_sq_cd *cd) -{ - struct ice_aqc_download_pkg *cmd; - struct ice_aq_desc desc; - enum ice_status status; - - if (error_offset) - *error_offset = 0; - if (error_info) - *error_info = 0; - - cmd = &desc.params.download_pkg; - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - - - if (last_buf) - cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; - - status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); - if (status == ICE_ERR_AQ_ERROR) { - /* Read error from buffer only when the FW returned an error */ - struct ice_aqc_download_pkg_resp *resp; - - resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; - if (error_offset) - *error_offset = le32_to_cpu(resp->error_offset); - if (error_info) - *error_info = le32_to_cpu(resp->error_info); - } - - return status; -} - -/** - * ice_aq_upload_section - * @hw: pointer to the hardware structure - * @pkg_buf: the package buffer which will receive the section - * @buf_size: the size of the package buffer - * @cd: pointer to command details structure or NULL - * - * Upload Section (0x0C41) - */ -enum ice_status -ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, - u16 buf_size, struct ice_sq_cd *cd) -{ - struct ice_aq_desc desc; - - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section); - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - - - return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); -} - -/** - * ice_aq_update_pkg - * @hw: pointer to the hardware structure - * @pkg_buf: the package cmd buffer - * @buf_size: the size of the package cmd buffer - * @last_buf: last buffer indicator - * @error_offset: returns error offset - * @error_info: returns error information - * @cd: pointer to command details structure or NULL - * - * Update Package (0x0C42) - */ -static enum ice_status -ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, - bool last_buf, u32 *error_offset, u32 *error_info, - struct ice_sq_cd *cd) -{ - struct ice_aqc_download_pkg *cmd; - struct ice_aq_desc desc; - enum ice_status status; - - if (error_offset) - *error_offset = 0; - if (error_info) - *error_info = 0; - - cmd = &desc.params.download_pkg; - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg); - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - - - if (last_buf) - cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; - - status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); - if (status == ICE_ERR_AQ_ERROR) { - /* Read error from buffer only when the FW returned an error */ - struct ice_aqc_download_pkg_resp *resp; - - resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; - if (error_offset) - *error_offset = le32_to_cpu(resp->error_offset); - if (error_info) - *error_info = le32_to_cpu(resp->error_info); - } - - return status; -} - -/** - * ice_find_seg_in_pkg - * @hw: pointer to the hardware structure - * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK) - * @pkg_hdr: pointer to the package header to be searched - * - * This function searches a package file for a particular segment type. On - * success it returns a pointer to the segment header, otherwise it will - * return NULL. - */ -static struct ice_generic_seg_hdr * -ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, - struct ice_pkg_hdr *pkg_hdr) -{ - u32 i; - - ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", - pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor, - pkg_hdr->pkg_format_ver.update, - pkg_hdr->pkg_format_ver.draft); - - /* Search all package segments for the requested segment type */ - for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { - struct ice_generic_seg_hdr *seg; - - seg = (struct ice_generic_seg_hdr *) - ((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i])); - - if (le32_to_cpu(seg->seg_type) == seg_type) - return seg; - } - - return NULL; -} - -/** - * ice_update_pkg_no_lock - * @hw: pointer to the hardware structure - * @bufs: pointer to an array of buffers - * @count: the number of buffers in the array - */ -static enum ice_status -ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count) -{ - enum ice_status status = 0; - u32 i; - - for (i = 0; i < count; i++) { - struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i); - bool last = ((i + 1) == count); - u32 offset, info; - - status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end), - last, &offset, &info, NULL); - - if (status) { - ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n", - status, offset, info); - break; - } - } - - return status; -} - -/** - * ice_update_pkg - * @hw: pointer to the hardware structure - * @bufs: pointer to an array of buffers - * @count: the number of buffers in the array - * - * Obtains change lock and updates package. - */ -static enum ice_status -ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) -{ - enum ice_status status; - - status = ice_acquire_change_lock(hw, ICE_RES_WRITE); - if (status) - return status; - - status = ice_update_pkg_no_lock(hw, bufs, count); - - ice_release_change_lock(hw); - - return status; -} - -/** - * ice_dwnld_cfg_bufs - * @hw: pointer to the hardware structure - * @bufs: pointer to an array of buffers - * @count: the number of buffers in the array - * - * Obtains global config lock and downloads the package configuration buffers - * to the firmware. Metadata buffers are skipped, and the first metadata buffer - * found indicates that the rest of the buffers are all metadata buffers. - */ -static enum ice_status -ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) -{ - enum ice_status status; - struct ice_buf_hdr *bh; - u32 offset, info, i; - - if (!bufs || !count) - return ICE_ERR_PARAM; - - /* If the first buffer's first section has its metadata bit set - * then there are no buffers to be downloaded, and the operation is - * considered a success. - */ - bh = (struct ice_buf_hdr *)bufs; - if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) - return 0; - - /* reset pkg_dwnld_status in case this function is called in the - * reset/rebuild flow - */ - hw->pkg_dwnld_status = ICE_AQ_RC_OK; - - status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); - if (status) { - if (status == ICE_ERR_AQ_NO_WORK) - hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST; - else - hw->pkg_dwnld_status = hw->adminq.sq_last_status; - return status; - } - - for (i = 0; i < count; i++) { - bool last = ((i + 1) == count); - - if (!last) { - /* check next buffer for metadata flag */ - bh = (struct ice_buf_hdr *)(bufs + i + 1); - - /* A set metadata flag in the next buffer will signal - * that the current buffer will be the last buffer - * downloaded - */ - if (le16_to_cpu(bh->section_count)) - if (le32_to_cpu(bh->section_entry[0].type) & - ICE_METADATA_BUF) - last = true; - } - - bh = (struct ice_buf_hdr *)(bufs + i); - - status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, - &offset, &info, NULL); - - /* Save AQ status from download package */ - hw->pkg_dwnld_status = hw->adminq.sq_last_status; - if (status) { - ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", - status, offset, info); - - break; - } - - if (last) - break; - } - - if (!status) { - status = ice_set_vlan_mode(hw); - if (status) - ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n", - status); - } - - ice_release_global_cfg_lock(hw); - - return status; -} - -/** - * ice_aq_get_pkg_info_list - * @hw: pointer to the hardware structure - * @pkg_info: the buffer which will receive the information list - * @buf_size: the size of the pkg_info information buffer - * @cd: pointer to command details structure or NULL - * - * Get Package Info List (0x0C43) - */ -static enum ice_status -ice_aq_get_pkg_info_list(struct ice_hw *hw, - struct ice_aqc_get_pkg_info_resp *pkg_info, - u16 buf_size, struct ice_sq_cd *cd) -{ - struct ice_aq_desc desc; - - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); - - - return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); -} - -/** - * ice_download_pkg - * @hw: pointer to the hardware structure - * @ice_seg: pointer to the segment of the package to be downloaded - * - * Handles the download of a complete package. - */ -static enum ice_status -ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) -{ - struct ice_buf_table *ice_buf_tbl; - enum ice_status status; - - ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", - ice_seg->hdr.seg_format_ver.major, - ice_seg->hdr.seg_format_ver.minor, - ice_seg->hdr.seg_format_ver.update, - ice_seg->hdr.seg_format_ver.draft); - - ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", - le32_to_cpu(ice_seg->hdr.seg_type), - le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id); - - ice_buf_tbl = ice_find_buf_table(ice_seg); - - ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", - le32_to_cpu(ice_buf_tbl->buf_count)); - - status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, - le32_to_cpu(ice_buf_tbl->buf_count)); - - ice_post_pkg_dwnld_vlan_mode_cfg(hw); - - return status; -} - -/** - * ice_init_pkg_info - * @hw: pointer to the hardware structure - * @pkg_hdr: pointer to the driver's package hdr - * - * Saves off the package details into the HW structure. - */ -static enum ice_status -ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) -{ - struct ice_generic_seg_hdr *seg_hdr; - - if (!pkg_hdr) - return ICE_ERR_PARAM; - - seg_hdr = (struct ice_generic_seg_hdr *) - ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); - if (seg_hdr) { - struct ice_meta_sect *meta; - struct ice_pkg_enum state; - - memset(&state, 0, sizeof(state)); - - /* Get package information from the Metadata Section */ - meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state, - ICE_SID_METADATA); - if (!meta) { - ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n"); - return ICE_ERR_CFG; - } - - hw->pkg_ver = meta->ver; - memcpy(hw->pkg_name, meta->name, sizeof(meta->name)); - - ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", - meta->ver.major, meta->ver.minor, meta->ver.update, - meta->ver.draft, meta->name); - - hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver; - memcpy(hw->ice_seg_id, seg_hdr->seg_id, - sizeof(hw->ice_seg_id)); - - ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n", - seg_hdr->seg_format_ver.major, - seg_hdr->seg_format_ver.minor, - seg_hdr->seg_format_ver.update, - seg_hdr->seg_format_ver.draft, - seg_hdr->seg_id); - } else { - ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n"); - return ICE_ERR_CFG; - } - - return 0; -} - -/** - * ice_get_pkg_info - * @hw: pointer to the hardware structure - * - * Store details of the package currently loaded in HW into the HW structure. - */ -static enum ice_status ice_get_pkg_info(struct ice_hw *hw) -{ - struct ice_aqc_get_pkg_info_resp *pkg_info; - enum ice_status status; - u16 size; - u32 i; - - size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT); - pkg_info = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL); - if (!pkg_info) - return ICE_ERR_NO_MEMORY; - - status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL); - if (status) - goto init_pkg_free_alloc; - - for (i = 0; i < le32_to_cpu(pkg_info->count); i++) { -#define ICE_PKG_FLAG_COUNT 4 - char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; - u8 place = 0; - - if (pkg_info->pkg_info[i].is_active) { - flags[place++] = 'A'; - hw->active_pkg_ver = pkg_info->pkg_info[i].ver; - hw->active_track_id = - le32_to_cpu(pkg_info->pkg_info[i].track_id); - memcpy(hw->active_pkg_name, - pkg_info->pkg_info[i].name, - sizeof(pkg_info->pkg_info[i].name)); - hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; - } - if (pkg_info->pkg_info[i].is_active_at_boot) - flags[place++] = 'B'; - if (pkg_info->pkg_info[i].is_modified) - flags[place++] = 'M'; - if (pkg_info->pkg_info[i].is_in_nvm) - flags[place++] = 'N'; - - ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", - i, pkg_info->pkg_info[i].ver.major, - pkg_info->pkg_info[i].ver.minor, - pkg_info->pkg_info[i].ver.update, - pkg_info->pkg_info[i].ver.draft, - pkg_info->pkg_info[i].name, flags); - } - -init_pkg_free_alloc: - devm_kfree(ice_hw_to_dev(hw), pkg_info); - - return status; -} - -/** - * ice_find_label_value - * @ice_seg: pointer to the ice segment (non-NULL) - * @name: name of the label to search for - * @type: the section type that will contain the label - * @value: pointer to a value that will return the label's value if found - * - * Finds a label's value given the label name and the section type to search. - * The ice_seg parameter must not be NULL since the first call to - * ice_enum_labels requires a pointer to an actual ice_seg structure. - */ -enum ice_status -ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type, - u16 *value) -{ - struct ice_pkg_enum state; - char *label_name; - u16 val; - - memset(&state, 0, sizeof(state)); - - if (!ice_seg) - return ICE_ERR_PARAM; - - do { - label_name = ice_enum_labels(ice_seg, type, &state, &val); - if (label_name && !strcmp(label_name, name)) { - *value = val; - return 0; - } - - ice_seg = NULL; - } while (label_name); - - return ICE_ERR_CFG; -} - -/** - * ice_verify_pkg - verify package - * @pkg: pointer to the package buffer - * @len: size of the package buffer - * - * Verifies various attributes of the package file, including length, format - * version, and the requirement of at least one segment. - */ -static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) -{ - u32 seg_count; - u32 i; - - if (len < struct_size(pkg, seg_offset, 1)) - return ICE_ERR_BUF_TOO_SHORT; - - if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || - pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || - pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || - pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) - return ICE_ERR_CFG; - - /* pkg must have at least one segment */ - seg_count = le32_to_cpu(pkg->seg_count); - if (seg_count < 1) - return ICE_ERR_CFG; - - /* make sure segment array fits in package length */ - if (len < struct_size(pkg, seg_offset, seg_count)) - return ICE_ERR_BUF_TOO_SHORT; - - /* all segments must fit within length */ - for (i = 0; i < seg_count; i++) { - u32 off = le32_to_cpu(pkg->seg_offset[i]); - struct ice_generic_seg_hdr *seg; - - /* segment header must fit */ - if (len < off + sizeof(*seg)) - return ICE_ERR_BUF_TOO_SHORT; - - seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); - - /* segment body must fit */ - if (len < off + le32_to_cpu(seg->seg_size)) - return ICE_ERR_BUF_TOO_SHORT; - } - - return 0; -} - -/** - * ice_free_seg - free package segment pointer - * @hw: pointer to the hardware structure - * - * Frees the package segment pointer in the proper manner, depending on if the - * segment was allocated or just the passed in pointer was stored. - */ -void ice_free_seg(struct ice_hw *hw) -{ - if (hw->pkg_copy) { - devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy); - hw->pkg_copy = NULL; - hw->pkg_size = 0; - } - hw->seg = NULL; -} - -/** - * ice_init_pkg_regs - initialize additional package registers - * @hw: pointer to the hardware structure - */ -static void ice_init_pkg_regs(struct ice_hw *hw) -{ -#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF -#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF -#define ICE_SW_BLK_IDX 0 - - /* setup Switch block input mask, which is 48-bits in two parts */ - wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); - wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); -} - -/** - * ice_chk_pkg_version - check package version for compatibility with driver - * @pkg_ver: pointer to a version structure to check - * - * Check to make sure that the package about to be downloaded is compatible with - * the driver. To be compatible, the major and minor components of the package - * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR - * definitions. - */ -static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) -{ - /* The major is 0xFF indicates that it is a custom DDP */ - if (pkg_ver->major == 0xFF) - return 0; - - if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ || - pkg_ver->minor != ICE_PKG_SUPP_VER_MNR) - return ICE_ERR_NOT_SUPPORTED; - - return 0; -} - -/** - * ice_chk_pkg_compat - * @hw: pointer to the hardware structure - * @ospkg: pointer to the package hdr - * @seg: pointer to the package segment hdr - * - * This function checks the package version compatibility with driver and NVM - */ -static enum ice_status -ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, - struct ice_seg **seg) -{ - struct ice_aqc_get_pkg_info_resp *pkg; - enum ice_status status; - u16 size; - u32 i; - - /* Check package version compatibility */ - status = ice_chk_pkg_version(&hw->pkg_ver); - if (status) { - ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n"); - return status; - } - - /* find ICE segment in given package */ - *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, - ospkg); - if (!*seg) { - ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); - return ICE_ERR_CFG; - } - - /* Check if FW is compatible with the OS package */ - size = struct_size(pkg, pkg_info, ICE_PKG_CNT); - pkg = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL); - if (!pkg) - return ICE_ERR_NO_MEMORY; - - status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL); - if (status) - goto fw_ddp_compat_free_alloc; - - for (i = 0; i < le32_to_cpu(pkg->count); i++) { - /* loop till we find the NVM package */ - if (!pkg->pkg_info[i].is_in_nvm) - continue; - if ((*seg)->hdr.seg_format_ver.major != - pkg->pkg_info[i].ver.major || - (*seg)->hdr.seg_format_ver.minor > - pkg->pkg_info[i].ver.minor) { - status = ICE_ERR_FW_DDP_MISMATCH; - ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n"); - } - /* done processing NVM package so break */ - break; - } -fw_ddp_compat_free_alloc: - devm_kfree(ice_hw_to_dev(hw), pkg); - return status; -} - -/** - * ice_sw_fv_handler - * @sect_type: section type - * @section: pointer to section - * @index: index of the field vector entry to be returned - * @offset: ptr to variable that receives the offset in the field vector table - * - * This is a callback function that can be passed to ice_pkg_enum_entry. - * This function treats the given section as of type ice_sw_fv_section and - * enumerates offset field. "offset" is an index into the field vector table. - */ -static void * -ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset) -{ - struct ice_sw_fv_section *fv_section = section; - - if (!section || sect_type != ICE_SID_FLD_VEC_SW) - return NULL; - if (index >= le16_to_cpu(fv_section->count)) - return NULL; - if (offset) - /* "index" passed in to this function is relative to a given - * 4k block. To get to the true index into the field vector - * table need to add the relative index to the base_offset - * field of this section - */ - *offset = le16_to_cpu(fv_section->base_offset) + index; - return fv_section->fv + index; -} - -/** - * ice_get_prof_index_max - get the max profile index for used profile - * @hw: pointer to the HW struct - * - * Calling this function will get the max profile index for used profile - * and store the index number in struct ice_switch_info *switch_info - * in hw for following use. - */ -static int ice_get_prof_index_max(struct ice_hw *hw) -{ - u16 prof_index = 0, j, max_prof_index = 0; - struct ice_pkg_enum state; - struct ice_seg *ice_seg; - bool flag = false; - struct ice_fv *fv; - u32 offset; - - memset(&state, 0, sizeof(state)); - - if (!hw->seg) - return ICE_ERR_PARAM; - - ice_seg = hw->seg; - - do { - fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, - &offset, ice_sw_fv_handler); - if (!fv) - break; - ice_seg = NULL; - - /* in the profile that not be used, the prot_id is set to 0xff - * and the off is set to 0x1ff for all the field vectors. - */ - for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) - if (fv->ew[j].prot_id != ICE_PROT_INVALID || - fv->ew[j].off != ICE_FV_OFFSET_INVAL) - flag = true; - if (flag && prof_index > max_prof_index) - max_prof_index = prof_index; - - prof_index++; - flag = false; - } while (fv); - - hw->switch_info->max_used_prof_index = max_prof_index; - - return 0; -} - -/** - * ice_init_pkg - initialize/download package - * @hw: pointer to the hardware structure - * @buf: pointer to the package buffer - * @len: size of the package buffer - * - * This function initializes a package. The package contains HW tables - * required to do packet processing. First, the function extracts package - * information such as version. Then it finds the ice configuration segment - * within the package; this function then saves a copy of the segment pointer - * within the supplied package buffer. Next, the function will cache any hints - * from the package, followed by downloading the package itself. Note, that if - * a previous PF driver has already downloaded the package successfully, then - * the current driver will not have to download the package again. - * - * The local package contents will be used to query default behavior and to - * update specific sections of the HW's version of the package (e.g. to update - * the parse graph to understand new protocols). - * - * This function stores a pointer to the package buffer memory, and it is - * expected that the supplied buffer will not be freed immediately. If the - * package buffer needs to be freed, such as when read from a file, use - * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this - * case. - */ -enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) -{ - struct ice_pkg_hdr *pkg; - enum ice_status status; - struct ice_seg *seg; - - if (!buf || !len) - return ICE_ERR_PARAM; - - pkg = (struct ice_pkg_hdr *)buf; - status = ice_verify_pkg(pkg, len); - if (status) { - ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", - status); - return status; - } - - /* initialize package info */ - status = ice_init_pkg_info(hw, pkg); - if (status) - return status; - - /* before downloading the package, check package version for - * compatibility with driver - */ - status = ice_chk_pkg_compat(hw, pkg, &seg); - if (status) - return status; - - /* initialize package hints and then download package */ - ice_init_pkg_hints(hw, seg); - status = ice_download_pkg(hw, seg); - if (status == ICE_ERR_AQ_NO_WORK) { - ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n"); - status = 0; - } - - /* Get information on the package currently loaded in HW, then make sure - * the driver is compatible with this version. - */ - if (!status) { - status = ice_get_pkg_info(hw); - if (!status) - status = ice_chk_pkg_version(&hw->active_pkg_ver); - } - - if (!status) { - hw->seg = seg; - /* on successful package download update other required - * registers to support the package and fill HW tables - * with package content. - */ - ice_init_pkg_regs(hw); - ice_fill_blk_tbls(hw); - ice_fill_hw_ptype(hw); - ice_get_prof_index_max(hw); - } else { - ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", - status); - } - - return status; -} - -/** - * ice_copy_and_init_pkg - initialize/download a copy of the package - * @hw: pointer to the hardware structure - * @buf: pointer to the package buffer - * @len: size of the package buffer - * - * This function copies the package buffer, and then calls ice_init_pkg() to - * initialize the copied package contents. - * - * The copying is necessary if the package buffer supplied is constant, or if - * the memory may disappear shortly after calling this function. - * - * If the package buffer resides in the data segment and can be modified, the - * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg(). - * - * However, if the package buffer needs to be copied first, such as when being - * read from a file, the caller should use ice_copy_and_init_pkg(). - * - * This function will first copy the package buffer, before calling - * ice_init_pkg(). The caller is free to immediately destroy the original - * package buffer, as the new copy will be managed by this function and - * related routines. - */ -enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) -{ - enum ice_status status; - u8 *buf_copy; - - if (!buf || !len) - return ICE_ERR_PARAM; - - buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL); - - status = ice_init_pkg(hw, buf_copy, len); - if (status) { - /* Free the copy, since we failed to initialize the package */ - devm_kfree(ice_hw_to_dev(hw), buf_copy); - } else { - /* Track the copied pkg so we can free it later */ - hw->pkg_copy = buf_copy; - hw->pkg_size = len; - } - - return status; -} - -/** - * ice_pkg_buf_alloc - * @hw: pointer to the HW structure - * - * Allocates a package buffer and returns a pointer to the buffer header. - * Note: all package contents must be in Little Endian form. - */ -static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) -{ - struct ice_buf_build *bld; - struct ice_buf_hdr *buf; - - bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL); - if (!bld) - return NULL; - - buf = (struct ice_buf_hdr *)bld; - buf->data_end = cpu_to_le16(offsetof(struct ice_buf_hdr, - section_entry)); - return bld; -} - -/** - * ice_get_sw_prof_type - determine switch profile type - * @hw: pointer to the HW structure - * @fv: pointer to the switch field vector - */ -static enum ice_prof_type -ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv) -{ - u16 i; - - for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { - /* UDP tunnel will have UDP_OF protocol ID and VNI offset */ - if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && - fv->ew[i].off == ICE_VNI_OFFSET) - return ICE_PROF_TUN_UDP; - - /* GRE tunnel will have GRE protocol */ - if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF) - return ICE_PROF_TUN_GRE; - } - - return ICE_PROF_NON_TUN; -} - -/** - * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type - * @hw: pointer to hardware structure - * @req_profs: type of profiles requested - * @bm: pointer to memory for returning the bitmap of field vectors - */ -void -ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, - unsigned long *bm) -{ - struct ice_pkg_enum state; - struct ice_seg *ice_seg; - struct ice_fv *fv; - - if (req_profs == ICE_PROF_ALL) { - bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES); - return; - } - - memset(&state, 0, sizeof(state)); - bitmap_zero(bm, ICE_MAX_NUM_PROFILES); - ice_seg = hw->seg; - do { - enum ice_prof_type prof_type; - u32 offset; - - fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, - &offset, ice_sw_fv_handler); - ice_seg = NULL; - - if (fv) { - /* Determine field vector type */ - prof_type = ice_get_sw_prof_type(hw, fv); - - if (req_profs & prof_type) - set_bit((u16)offset, bm); - } - } while (fv); -} - -/** - * ice_get_sw_fv_list - * @hw: pointer to the HW structure - * @prot_ids: field vector to search for with a given protocol ID - * @ids_cnt: lookup/protocol count - * @bm: bitmap of field vectors to consider - * @fv_list: Head of a list - * - * Finds all the field vector entries from switch block that contain - * a given protocol ID and returns a list of structures of type - * "ice_sw_fv_list_entry". Every structure in the list has a field vector - * definition and profile ID information - * NOTE: The caller of the function is responsible for freeing the memory - * allocated for every list entry. - */ -enum ice_status -ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, - unsigned long *bm, struct list_head *fv_list) -{ - struct ice_sw_fv_list_entry *fvl; - struct ice_sw_fv_list_entry *tmp; - struct ice_pkg_enum state; - struct ice_seg *ice_seg; - struct ice_fv *fv; - u32 offset; - - memset(&state, 0, sizeof(state)); - - if (!ids_cnt || !hw->seg) - return ICE_ERR_PARAM; - - ice_seg = hw->seg; - do { - u16 i; - - fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, - &offset, ice_sw_fv_handler); - if (!fv) - break; - ice_seg = NULL; - - /* If field vector is not in the bitmap list, then skip this - * profile. - */ - if (!test_bit((u16)offset, bm)) - continue; - - for (i = 0; i < ids_cnt; i++) { - int j; - - /* This code assumes that if a switch field vector line - * has a matching protocol, then this line will contain - * the entries necessary to represent every field in - * that protocol header. - */ - for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) - if (fv->ew[j].prot_id == prot_ids[i]) - break; - if (j >= hw->blk[ICE_BLK_SW].es.fvw) - break; - if (i + 1 == ids_cnt) { - fvl = devm_kzalloc(ice_hw_to_dev(hw), - sizeof(*fvl), GFP_KERNEL); - if (!fvl) - goto err; - fvl->fv_ptr = fv; - fvl->profile_id = offset; - list_add(&fvl->list_entry, fv_list); - break; - } - } - } while (fv); - if (list_empty(fv_list)) - return ICE_ERR_CFG; - return 0; - -err: - list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) { - list_del(&fvl->list_entry); - devm_kfree(ice_hw_to_dev(hw), fvl); - } - - return ICE_ERR_NO_MEMORY; -} - -/** - * ice_init_prof_result_bm - Initialize the profile result index bitmap - * @hw: pointer to hardware structure - */ -void ice_init_prof_result_bm(struct ice_hw *hw) -{ - struct ice_pkg_enum state; - struct ice_seg *ice_seg; - struct ice_fv *fv; - - memset(&state, 0, sizeof(state)); - - if (!hw->seg) - return; - - ice_seg = hw->seg; - do { - u32 off; - u16 i; - - fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, - &off, ice_sw_fv_handler); - ice_seg = NULL; - if (!fv) - break; - - bitmap_zero(hw->switch_info->prof_res_bm[off], - ICE_MAX_FV_WORDS); - - /* Determine empty field vector indices, these can be - * used for recipe results. Skip index 0, since it is - * always used for Switch ID. - */ - for (i = 1; i < ICE_MAX_FV_WORDS; i++) - if (fv->ew[i].prot_id == ICE_PROT_INVALID && - fv->ew[i].off == ICE_FV_OFFSET_INVAL) - set_bit(i, hw->switch_info->prof_res_bm[off]); - } while (fv); -} - -/** - * ice_pkg_buf_free - * @hw: pointer to the HW structure - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * - * Frees a package buffer - */ -void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) -{ - devm_kfree(ice_hw_to_dev(hw), bld); -} - -/** - * ice_pkg_buf_reserve_section - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * @count: the number of sections to reserve - * - * Reserves one or more section table entries in a package buffer. This routine - * can be called multiple times as long as they are made before calling - * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() - * is called once, the number of sections that can be allocated will not be able - * to be increased; not using all reserved sections is fine, but this will - * result in some wasted space in the buffer. - * Note: all package contents must be in Little Endian form. - */ -static enum ice_status -ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) -{ - struct ice_buf_hdr *buf; - u16 section_count; - u16 data_end; - - if (!bld) - return ICE_ERR_PARAM; - - buf = (struct ice_buf_hdr *)&bld->buf; - - /* already an active section, can't increase table size */ - section_count = le16_to_cpu(buf->section_count); - if (section_count > 0) - return ICE_ERR_CFG; - - if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) - return ICE_ERR_CFG; - bld->reserved_section_table_entries += count; - - data_end = le16_to_cpu(buf->data_end) + - flex_array_size(buf, section_entry, count); - buf->data_end = cpu_to_le16(data_end); - - return 0; -} - -/** - * ice_pkg_buf_alloc_section - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * @type: the section type value - * @size: the size of the section to reserve (in bytes) - * - * Reserves memory in the buffer for a section's content and updates the - * buffers' status accordingly. This routine returns a pointer to the first - * byte of the section start within the buffer, which is used to fill in the - * section contents. - * Note: all package contents must be in Little Endian form. - */ -static void * -ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size) -{ - struct ice_buf_hdr *buf; - u16 sect_count; - u16 data_end; - - if (!bld || !type || !size) - return NULL; - - buf = (struct ice_buf_hdr *)&bld->buf; - - /* check for enough space left in buffer */ - data_end = le16_to_cpu(buf->data_end); - - /* section start must align on 4 byte boundary */ - data_end = ALIGN(data_end, 4); - - if ((data_end + size) > ICE_MAX_S_DATA_END) - return NULL; - - /* check for more available section table entries */ - sect_count = le16_to_cpu(buf->section_count); - if (sect_count < bld->reserved_section_table_entries) { - void *section_ptr = ((u8 *)buf) + data_end; - - buf->section_entry[sect_count].offset = cpu_to_le16(data_end); - buf->section_entry[sect_count].size = cpu_to_le16(size); - buf->section_entry[sect_count].type = cpu_to_le32(type); - - data_end += size; - buf->data_end = cpu_to_le16(data_end); - - buf->section_count = cpu_to_le16(sect_count + 1); - return section_ptr; - } - - /* no free section table entries */ - return NULL; -} - -/** - * ice_pkg_buf_alloc_single_section - * @hw: pointer to the HW structure - * @type: the section type value - * @size: the size of the section to reserve (in bytes) - * @section: returns pointer to the section - * - * Allocates a package buffer with a single section. - * Note: all package contents must be in Little Endian form. - */ -struct ice_buf_build * -ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size, - void **section) -{ - struct ice_buf_build *buf; - - if (!section) - return NULL; - - buf = ice_pkg_buf_alloc(hw); - if (!buf) - return NULL; - - if (ice_pkg_buf_reserve_section(buf, 1)) - goto ice_pkg_buf_alloc_single_section_err; - - *section = ice_pkg_buf_alloc_section(buf, type, size); - if (!*section) - goto ice_pkg_buf_alloc_single_section_err; - - return buf; - -ice_pkg_buf_alloc_single_section_err: - ice_pkg_buf_free(hw, buf); - return NULL; -} - -/** - * ice_pkg_buf_unreserve_section - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * @count: the number of sections to unreserve - * - * Unreserves one or more section table entries in a package buffer, releasing - * space that can be used for section data. This routine can be called - * multiple times as long as they are made before calling - * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() - * is called once, the number of sections that can be allocated will not be able - * to be increased; not using all reserved sections is fine, but this will - * result in some wasted space in the buffer. - * Note: all package contents must be in Little Endian form. - */ -enum ice_status -ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count) -{ - struct ice_buf_hdr *buf; - u16 section_count; - u16 data_end; - - if (!bld) - return ICE_ERR_PARAM; - - buf = (struct ice_buf_hdr *)&bld->buf; - - /* already an active section, can't decrease table size */ - section_count = le16_to_cpu(buf->section_count); - if (section_count > 0) - return ICE_ERR_CFG; - - if (count > bld->reserved_section_table_entries) - return ICE_ERR_CFG; - bld->reserved_section_table_entries -= count; - - data_end = le16_to_cpu(buf->data_end) - - flex_array_size(buf, section_entry, count); - buf->data_end = cpu_to_le16(data_end); - - return 0; -} - -/** - * ice_pkg_buf_get_free_space - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * - * Returns the number of free bytes remaining in the buffer. - * Note: all package contents must be in Little Endian form. - */ -u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld) -{ - struct ice_buf_hdr *buf; - - if (!bld) - return 0; - - buf = (struct ice_buf_hdr *)&bld->buf; - return ICE_MAX_S_DATA_END - le16_to_cpu(buf->data_end); -} - -/** - * ice_pkg_buf_get_active_sections - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * - * Returns the number of active sections. Before using the package buffer - * in an update package command, the caller should make sure that there is at - * least one active section - otherwise, the buffer is not legal and should - * not be used. - * Note: all package contents must be in Little Endian form. - */ -static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld) -{ - struct ice_buf_hdr *buf; - - if (!bld) - return 0; - - buf = (struct ice_buf_hdr *)&bld->buf; - return le16_to_cpu(buf->section_count); -} - -/** - * ice_pkg_buf - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * - * Return a pointer to the buffer's header - */ -struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) -{ - if (!bld) - return NULL; - - return &bld->buf; -} - /** * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage * @hw: pointer to the HW structure @@ -2349,22 +445,22 @@ ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type, * @type: type of tunnel * @port: port of tunnel to create * - * Function returns ICE_SUCCESS if a tunnel can be created using specified - * tunnel type and port. If the tunnel is already present in hardware then - * ICE_ERR_ALREADY_EXISTS is returned, or if there's no space, then - * ICE_ERR_OUT_OF_RANGE. + * Function returns 0 if a tunnel can be created using specified tunnel type + * and port. If the tunnel is already present in hardware then + * -EEXIST is returned, or if there's no space, then + * -EIO. */ -enum ice_status +int ice_is_create_tunnel_possible(struct ice_hw *hw, enum ice_tunnel_type type, u16 port) { u16 index; if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) - return ICE_ERR_ALREADY_EXISTS; + return -EEXIST; if (!ice_find_free_tunnel_entry(hw, type, &index)) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; return 0; } @@ -2388,17 +484,17 @@ bool ice_is_tunnel_empty(struct ice_hw *hw) * @hw: pointer to the HW structure * @entry: pointer to double vlan boost entry info */ -static enum ice_status +static int ice_upd_dvm_boost_entry(struct ice_hw *hw, struct ice_dvm_entry *entry) { struct ice_boost_tcam_section *sect_rx, *sect_tx; - enum ice_status status = ICE_ERR_MAX_LIMIT; struct ice_buf_build *bld; + int status = -ENOSPC; u8 val, dc, nm; bld = ice_pkg_buf_alloc(hw); if (!bld) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* allocate 2 sections, one for Rx parser, one for Tx parser */ if (ice_pkg_buf_reserve_section(bld, 2)) @@ -2452,12 +548,13 @@ ice_upd_dvm_boost_entry_err: * * Enable double vlan by updating the appropriate boost tcam entries. */ -enum ice_status ice_set_dvm_boost_entries(struct ice_hw *hw) +int ice_set_dvm_boost_entries(struct ice_hw *hw) { - enum ice_status status; u16 i; for (i = 0; i < hw->dvm_upd.count; i++) { + int status; + status = ice_upd_dvm_boost_entry(hw, &hw->dvm_upd.tbl[i]); if (status) return status; @@ -2476,12 +573,12 @@ enum ice_status ice_set_dvm_boost_entries(struct ice_hw *hw) * creating a package buffer with the tunnel info and issuing an update package * command. */ -enum ice_status +int ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port) { struct ice_boost_tcam_section *sect_rx, *sect_tx; - enum ice_status status = ICE_ERR_MAX_LIMIT; struct ice_buf_build *bld; + int status = -ENOSPC; u16 index; mutex_lock(&hw->tnl_lock); @@ -2493,13 +590,13 @@ ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port) } if (!ice_find_free_tunnel_entry(hw, type, &index)) { - status = ICE_ERR_OUT_OF_RANGE; + status = -EIO; goto ice_create_tunnel_end; } bld = ice_pkg_buf_alloc(hw); if (!bld) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto ice_create_tunnel_end; } @@ -2560,11 +657,11 @@ ice_create_tunnel_end: * targeting the specific updates requested and then performing an update * package. */ -enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all) +int ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all) { struct ice_boost_tcam_section *sect_rx, *sect_tx; - enum ice_status status = ICE_ERR_MAX_LIMIT; struct ice_buf_build *bld; + int status = -ENOSPC; u16 count = 0; u16 index; u16 size; @@ -2586,7 +683,7 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all) count++; if (!count) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto ice_destroy_tunnel_end; } @@ -2595,7 +692,7 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all) bld = ice_pkg_buf_alloc(hw); if (!bld) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto ice_destroy_tunnel_end; } @@ -2655,9 +752,9 @@ ice_destroy_tunnel_end: * * Replays all tunnels */ -enum ice_status ice_replay_tunnels(struct ice_hw *hw) +int ice_replay_tunnels(struct ice_hw *hw) { - enum ice_status status = 0; + int status = 0; u16 i; for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) { @@ -2702,17 +799,17 @@ enum ice_status ice_replay_tunnels(struct ice_hw *hw) * @prot: variable to receive the protocol ID * @off: variable to receive the protocol offset */ -enum ice_status +int ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, u8 *prot, u16 *off) { struct ice_fv_word *fv_ext; if (prof >= hw->blk[blk].es.count) - return ICE_ERR_PARAM; + return -EINVAL; if (fv_idx >= hw->blk[blk].es.fvw) - return ICE_ERR_PARAM; + return -EINVAL; fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw); @@ -2732,18 +829,18 @@ ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, * This function will update the XLT1 hardware table to reflect the new * packet type group configuration. */ -enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk) +int ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk) { struct ice_xlt1_section *sect; struct ice_buf_build *bld; - enum ice_status status; + int status; u16 index; bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT1), struct_size(sect, value, ICE_XLT1_CNT), (void **)§); if (!bld) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; sect->count = cpu_to_le16(ICE_XLT1_CNT); sect->offset = cpu_to_le16(0); @@ -2768,11 +865,11 @@ enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk) * PTG ID that contains it through the PTG parameter, with the value of * ICE_DEFAULT_PTG (0) meaning it is part the default PTG. */ -static enum ice_status +static int ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg) { if (ptype >= ICE_XLT1_CNT || !ptg) - return ICE_ERR_PARAM; + return -EINVAL; *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg; return 0; @@ -2827,21 +924,21 @@ void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg) * This function will remove the ptype from the specific PTG, and move it to * the default PTG (ICE_DEFAULT_PTG). */ -static enum ice_status +static int ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) { struct ice_ptg_ptype **ch; struct ice_ptg_ptype *p; if (ptype > ICE_XLT1_CNT - 1) - return ICE_ERR_PARAM; + return -EINVAL; if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; /* Should not happen if .in_use is set, bad config */ if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype) - return ICE_ERR_CFG; + return -EIO; /* find the ptype within this PTG, and bypass the link over it */ p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; @@ -2874,17 +971,17 @@ ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the * default PTG. */ -static enum ice_status +static int ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) { - enum ice_status status; u8 original_ptg; + int status; if (ptype > ICE_XLT1_CNT - 1) - return ICE_ERR_PARAM; + return -EINVAL; if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg); if (status) @@ -2986,7 +1083,9 @@ ice_match_prop_lst(struct list_head *list1, struct list_head *list2) count++; list_for_each_entry(tmp2, list2, list) chk_count++; +#ifdef __CHECKER__ /* cppcheck-suppress knownConditionTrueFalse */ +#endif /* __CHECKER__ */ if (!count || count != chk_count) return false; @@ -3019,19 +1118,19 @@ ice_match_prop_lst(struct list_head *list1, struct list_head *list2) * This function will update the XLT2 hardware table with the input VSI * group configuration. */ -static enum ice_status +static int ice_vsig_update_xlt2_sect(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) { struct ice_xlt2_section *sect; struct ice_buf_build *bld; - enum ice_status status; + int status; bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT2), struct_size(sect, value, 1), (void **)§); if (!bld) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; sect->count = cpu_to_le16(1); sect->offset = cpu_to_le16(vsi); @@ -3052,14 +1151,14 @@ ice_vsig_update_xlt2_sect(struct ice_hw *hw, enum ice_block blk, u16 vsi, * This function will update the XLT2 hardware table with the input VSI * group configuration of used vsis. */ -enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk) +int ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk) { u16 vsi; for (vsi = 0; vsi < ICE_MAX_VSI; vsi++) { /* update only vsis that have been changed */ if (hw->blk[blk].xlt2.vsis[vsi].changed) { - enum ice_status status; + int status; u16 vsig; vsig = hw->blk[blk].xlt2.vsis[vsi].vsig; @@ -3084,11 +1183,11 @@ enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk) * This function will lookup the VSI entry in the XLT2 list and return * the VSI group its associated with. */ -static enum ice_status +static int ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig) { if (!vsig || vsi >= ICE_MAX_VSI) - return ICE_ERR_PARAM; + return -EINVAL; /* As long as there's a default or valid VSIG associated with the input * VSI, the functions returns a success. Any handling of VSIG will be @@ -3153,7 +1252,7 @@ static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk) * for, the list must match exactly, including the order in which the * characteristics are listed. */ -static enum ice_status +static int ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk, struct list_head *chs, u16 *vsig) { @@ -3167,7 +1266,7 @@ ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk, return 0; } - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } /** @@ -3179,7 +1278,7 @@ ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk, * The function will remove all VSIs associated with the input VSIG and move * them to the DEFAULT_VSIG and mark the VSIG available. */ -static enum ice_status +static int ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig) { struct ice_vsig_prof *dtmp, *del; @@ -3188,10 +1287,10 @@ ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig) idx = vsig & ICE_VSIG_IDX_M; if (idx >= ICE_MAX_VSIGS) - return ICE_ERR_PARAM; + return -EINVAL; if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false; @@ -3240,7 +1339,7 @@ ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig) * The function will remove the input VSI from its VSI group and move it * to the DEFAULT_VSIG. */ -static enum ice_status +static int ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) { struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt; @@ -3249,10 +1348,10 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) idx = vsig & ICE_VSIG_IDX_M; if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) - return ICE_ERR_PARAM; + return -EINVAL; if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; /* entry already in default VSIG, don't have to remove */ if (idx == ICE_DEFAULT_VSIG) @@ -3260,7 +1359,7 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; if (!(*vsi_head)) - return ICE_ERR_CFG; + return -EIO; vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi]; vsi_cur = (*vsi_head); @@ -3277,7 +1376,7 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) /* verify if VSI was removed from group list */ if (!vsi_cur) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; vsi_cur->vsig = ICE_DEFAULT_VSIG; vsi_cur->changed = 1; @@ -3298,24 +1397,24 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) * move the entry to the DEFAULT_VSIG, update the original VSIG and * then move entry to the new VSIG. */ -static enum ice_status +static int ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) { struct ice_vsig_vsi *tmp; - enum ice_status status; u16 orig_vsig, idx; + int status; idx = vsig & ICE_VSIG_IDX_M; if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) - return ICE_ERR_PARAM; + return -EINVAL; /* if VSIG not in use and VSIG is not default type this VSIG * doesn't exist. */ if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use && vsig != ICE_DEFAULT_VSIG) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); if (status) @@ -3421,7 +1520,7 @@ ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks) * @masks: masks for fv * @prof_id: receives the profile ID */ -static enum ice_status +static int ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk, struct ice_fv_word *fv, u16 *masks, u8 *prof_id) { @@ -3432,7 +1531,7 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk, * same field vector and mask. This will cause rule interference. */ if (blk == ICE_BLK_FD || blk == ICE_BLK_RSS) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; for (i = 0; i < (u8)es->count; i++) { u16 off = i * es->fvw; @@ -3448,7 +1547,7 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk, return 0; } - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } /** @@ -3519,14 +1618,14 @@ static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type) * This function allocates a new entry in a Profile ID TCAM for a specific * block. */ -static enum ice_status +static int ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm, u16 *tcam_idx) { u16 res_type; if (!ice_tcam_ent_rsrc_type(blk, &res_type)) - return ICE_ERR_PARAM; + return -EINVAL; return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx); } @@ -3539,13 +1638,13 @@ ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm, * * This function frees an entry in a Profile ID TCAM for a specific block. */ -static enum ice_status +static int ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx) { u16 res_type; if (!ice_tcam_ent_rsrc_type(blk, &res_type)) - return ICE_ERR_PARAM; + return -EINVAL; return ice_free_hw_res(hw, res_type, 1, &tcam_idx); } @@ -3559,15 +1658,15 @@ ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx) * This function allocates a new profile ID, which also corresponds to a Field * Vector (Extraction Sequence) entry. */ -static enum ice_status +static int ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id) { - enum ice_status status; u16 res_type; u16 get_prof; + int status; if (!ice_prof_id_rsrc_type(blk, &res_type)) - return ICE_ERR_PARAM; + return -EINVAL; status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof); if (!status) @@ -3584,14 +1683,14 @@ ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id) * * This function frees a profile ID, which also corresponds to a Field Vector. */ -static enum ice_status +static int ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id) { u16 tmp_prof_id = (u16)prof_id; u16 res_type; if (!ice_prof_id_rsrc_type(blk, &res_type)) - return ICE_ERR_PARAM; + return -EINVAL; return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id); } @@ -3602,11 +1701,11 @@ ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id) * @blk: the block from which to free the profile ID * @prof_id: the profile ID for which to increment the reference count */ -static enum ice_status +static int ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) { if (prof_id > hw->blk[blk].es.count) - return ICE_ERR_PARAM; + return -EINVAL; hw->blk[blk].es.ref_count[prof_id]++; @@ -3699,7 +1798,7 @@ static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk) per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs; hw->blk[blk].masks.count = per_pf; - hw->blk[blk].masks.first = hw->pf_id * per_pf; + hw->blk[blk].masks.first = hw->logical_pf_id * per_pf; memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks)); @@ -3726,17 +1825,17 @@ void ice_init_all_prof_masks(struct ice_hw *hw) * @mask: the 16-bit mask * @mask_idx: variable to receive the mask index */ -static enum ice_status +static int ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask, u16 *mask_idx) { bool found_unused = false, found_copy = false; - enum ice_status status = ICE_ERR_MAX_LIMIT; u16 unused_idx = 0, copy_idx = 0; + int status = -ENOSPC; u16 i; if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&hw->blk[blk].masks.lock); @@ -3794,15 +1893,15 @@ err_ice_alloc_prof_mask: * @blk: hardware block * @mask_idx: index of mask */ -static enum ice_status +static int ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx) { if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) - return ICE_ERR_PARAM; + return -EINVAL; if (!(mask_idx >= hw->blk[blk].masks.first && mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count)) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; mutex_lock(&hw->blk[blk].masks.lock); @@ -3836,14 +1935,14 @@ exit_ice_free_prof_mask: * @blk: hardware block * @prof_id: profile ID */ -static enum ice_status +static int ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id) { u32 mask_bm; u16 i; if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) - return ICE_ERR_PARAM; + return -EINVAL; mask_bm = hw->blk[blk].es.mask_ena[prof_id]; for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++) @@ -3898,7 +1997,7 @@ void ice_shutdown_all_prof_masks(struct ice_hw *hw) * @prof_id: profile ID * @masks: masks */ -static enum ice_status +static int ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id, u16 *masks) { @@ -3928,7 +2027,7 @@ ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id, if (ena_mask & BIT(i)) ice_free_prof_mask(hw, blk, i); - return ICE_ERR_OUT_OF_RANGE; + return -EIO; } /* enable the masks for this profile */ @@ -3970,11 +2069,11 @@ ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id, * @blk: the block from which to free the profile ID * @prof_id: the profile ID for which to decrement the reference count */ -static enum ice_status +static int ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) { if (prof_id > hw->blk[blk].es.count) - return ICE_ERR_PARAM; + return -EINVAL; if (hw->blk[blk].es.ref_count[prof_id] > 0) { if (!--hw->blk[blk].es.ref_count[prof_id]) { @@ -4218,6 +2317,147 @@ static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid) } } +/** + * ice_init_flow_profs - init flow profile locks and list heads + * @hw: pointer to the hardware structure + * @blk_idx: HW block index + */ +static +void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx) +{ + mutex_init(&hw->fl_profs_locks[blk_idx]); + INIT_LIST_HEAD(&hw->fl_profs[blk_idx]); +} + +/** + * ice_init_hw_tbls - init hardware table memory + * @hw: pointer to the hardware structure + */ +int ice_init_hw_tbls(struct ice_hw *hw) +{ + u8 i; + + mutex_init(&hw->rss_locks); + INIT_LIST_HEAD(&hw->rss_list_head); + ice_init_all_prof_masks(hw); + for (i = 0; i < ICE_BLK_COUNT; i++) { + struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; + struct ice_prof_tcam *prof = &hw->blk[i].prof; + struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; + struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; + struct ice_es *es = &hw->blk[i].es; + u16 j; + + if (hw->blk[i].is_list_init) + continue; + + ice_init_flow_profs(hw, i); + mutex_init(&es->prof_map_lock); + INIT_LIST_HEAD(&es->prof_map); + hw->blk[i].is_list_init = true; + + hw->blk[i].overwrite = blk_sizes[i].overwrite; + es->reverse = blk_sizes[i].reverse; + + xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF]; + xlt1->count = blk_sizes[i].xlt1; + + xlt1->ptypes = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count, + sizeof(*xlt1->ptypes), GFP_KERNEL); + + if (!xlt1->ptypes) + goto err; + + xlt1->ptg_tbl = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_PTGS, + sizeof(*xlt1->ptg_tbl), + GFP_KERNEL); + + if (!xlt1->ptg_tbl) + goto err; + + xlt1->t = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count, + sizeof(*xlt1->t), GFP_KERNEL); + if (!xlt1->t) + goto err; + + xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF]; + xlt2->count = blk_sizes[i].xlt2; + + xlt2->vsis = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count, + sizeof(*xlt2->vsis), GFP_KERNEL); + + if (!xlt2->vsis) + goto err; + + xlt2->vsig_tbl = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count, + sizeof(*xlt2->vsig_tbl), + GFP_KERNEL); + if (!xlt2->vsig_tbl) + goto err; + + for (j = 0; j < xlt2->count; j++) + INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst); + + xlt2->t = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count, + sizeof(*xlt2->t), GFP_KERNEL); + if (!xlt2->t) + goto err; + + prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF]; + prof->count = blk_sizes[i].prof_tcam; + prof->max_prof_id = blk_sizes[i].prof_id; + prof->cdid_bits = blk_sizes[i].prof_cdid_bits; + prof->t = devm_kcalloc(ice_hw_to_dev(hw), prof->count, + sizeof(*prof->t), GFP_KERNEL); + + if (!prof->t) + goto err; + + prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF]; + prof_redir->count = blk_sizes[i].prof_redir; + prof_redir->t = devm_kcalloc(ice_hw_to_dev(hw), + prof_redir->count, + sizeof(*prof_redir->t), + GFP_KERNEL); + + if (!prof_redir->t) + goto err; + + es->sid = ice_blk_sids[i][ICE_SID_ES_OFF]; + es->count = blk_sizes[i].es; + es->fvw = blk_sizes[i].fvw; + es->t = devm_kcalloc(ice_hw_to_dev(hw), + (u32)(es->count * es->fvw), + sizeof(*es->t), GFP_KERNEL); + if (!es->t) + goto err; + + es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count, + sizeof(*es->ref_count), + GFP_KERNEL); + + if (!es->ref_count) + goto err; + + es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count, + sizeof(*es->written), GFP_KERNEL); + + if (!es->written) + goto err; + + es->mask_ena = devm_kcalloc(ice_hw_to_dev(hw), es->count, + sizeof(*es->mask_ena), GFP_KERNEL); + + if (!es->mask_ena) + goto err; + } + return 0; + +err: + ice_free_hw_tbls(hw); + return -ENOMEM; +} + /** * ice_fill_blk_tbls - Read package context for tables * @hw: pointer to the hardware structure @@ -4356,17 +2596,6 @@ void ice_free_hw_tbls(struct ice_hw *hw) memset(hw->blk, 0, sizeof(hw->blk)); } -/** - * ice_init_flow_profs - init flow profile locks and list heads - * @hw: pointer to the hardware structure - * @blk_idx: HW block index - */ -static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx) -{ - mutex_init(&hw->fl_profs_locks[blk_idx]); - INIT_LIST_HEAD(&hw->fl_profs[blk_idx]); -} - /** * ice_clear_hw_tbls - clear HW tables and flow profiles * @hw: pointer to the hardware structure @@ -4389,156 +2618,52 @@ void ice_clear_hw_tbls(struct ice_hw *hw) ice_free_vsig_tbl(hw, (enum ice_block)i); - memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes)); - memset(xlt1->ptg_tbl, 0, - ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl)); - memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t)); + if (xlt1->ptypes) + memset(xlt1->ptypes, 0, + xlt1->count * sizeof(*xlt1->ptypes)); - memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis)); - memset(xlt2->vsig_tbl, 0, - xlt2->count * sizeof(*xlt2->vsig_tbl)); - memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t)); + if (xlt1->ptg_tbl) + memset(xlt1->ptg_tbl, 0, + ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl)); - memset(prof->t, 0, prof->count * sizeof(*prof->t)); - memset(prof_redir->t, 0, - prof_redir->count * sizeof(*prof_redir->t)); + if (xlt1->t) + memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t)); - memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw); - memset(es->ref_count, 0, es->count * sizeof(*es->ref_count)); - memset(es->written, 0, es->count * sizeof(*es->written)); - memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena)); + if (xlt2->vsis) + memset(xlt2->vsis, 0, + xlt2->count * sizeof(*xlt2->vsis)); + + if (xlt2->vsig_tbl) + memset(xlt2->vsig_tbl, 0, + xlt2->count * sizeof(*xlt2->vsig_tbl)); + + if (xlt2->t) + memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t)); + + if (prof->t) + memset(prof->t, 0, prof->count * sizeof(*prof->t)); + + if (prof_redir->t) + memset(prof_redir->t, 0, + prof_redir->count * sizeof(*prof_redir->t)); + + if (es->t) + memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw); + + if (es->ref_count) + memset(es->ref_count, 0, + es->count * sizeof(*es->ref_count)); + + if (es->written) + memset(es->written, 0, + es->count * sizeof(*es->written)); + + if (es->mask_ena) + memset(es->mask_ena, 0, + es->count * sizeof(*es->mask_ena)); } } -/** - * ice_init_hw_tbls - init hardware table memory - * @hw: pointer to the hardware structure - */ -enum ice_status ice_init_hw_tbls(struct ice_hw *hw) -{ - u8 i; - - mutex_init(&hw->rss_locks); - INIT_LIST_HEAD(&hw->rss_list_head); - ice_init_all_prof_masks(hw); - for (i = 0; i < ICE_BLK_COUNT; i++) { - struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; - struct ice_prof_tcam *prof = &hw->blk[i].prof; - struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; - struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; - struct ice_es *es = &hw->blk[i].es; - u16 j; - - if (hw->blk[i].is_list_init) - continue; - - ice_init_flow_profs(hw, i); - mutex_init(&es->prof_map_lock); - INIT_LIST_HEAD(&es->prof_map); - hw->blk[i].is_list_init = true; - - hw->blk[i].overwrite = blk_sizes[i].overwrite; - es->reverse = blk_sizes[i].reverse; - - xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF]; - xlt1->count = blk_sizes[i].xlt1; - - xlt1->ptypes = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count, - sizeof(*xlt1->ptypes), GFP_KERNEL); - - if (!xlt1->ptypes) - goto err; - - xlt1->ptg_tbl = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_PTGS, - sizeof(*xlt1->ptg_tbl), - GFP_KERNEL); - - if (!xlt1->ptg_tbl) - goto err; - - xlt1->t = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count, - sizeof(*xlt1->t), GFP_KERNEL); - if (!xlt1->t) - goto err; - - xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF]; - xlt2->count = blk_sizes[i].xlt2; - - xlt2->vsis = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count, - sizeof(*xlt2->vsis), GFP_KERNEL); - - if (!xlt2->vsis) - goto err; - - xlt2->vsig_tbl = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count, - sizeof(*xlt2->vsig_tbl), - GFP_KERNEL); - if (!xlt2->vsig_tbl) - goto err; - - for (j = 0; j < xlt2->count; j++) - INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst); - - xlt2->t = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count, - sizeof(*xlt2->t), GFP_KERNEL); - if (!xlt2->t) - goto err; - - prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF]; - prof->count = blk_sizes[i].prof_tcam; - prof->max_prof_id = blk_sizes[i].prof_id; - prof->cdid_bits = blk_sizes[i].prof_cdid_bits; - prof->t = devm_kcalloc(ice_hw_to_dev(hw), prof->count, - sizeof(*prof->t), GFP_KERNEL); - - if (!prof->t) - goto err; - - prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF]; - prof_redir->count = blk_sizes[i].prof_redir; - prof_redir->t = devm_kcalloc(ice_hw_to_dev(hw), - prof_redir->count, - sizeof(*prof_redir->t), - GFP_KERNEL); - - if (!prof_redir->t) - goto err; - - es->sid = ice_blk_sids[i][ICE_SID_ES_OFF]; - es->count = blk_sizes[i].es; - es->fvw = blk_sizes[i].fvw; - es->t = devm_kcalloc(ice_hw_to_dev(hw), - (u32)(es->count * es->fvw), - sizeof(*es->t), GFP_KERNEL); - if (!es->t) - goto err; - - es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count, - sizeof(*es->ref_count), - GFP_KERNEL); - - if (!es->ref_count) - goto err; - - es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count, - sizeof(*es->written), GFP_KERNEL); - - if (!es->written) - goto err; - - es->mask_ena = devm_kcalloc(ice_hw_to_dev(hw), es->count, - sizeof(*es->mask_ena), GFP_KERNEL); - - if (!es->mask_ena) - goto err; - } - return 0; - -err: - ice_free_hw_tbls(hw); - return ICE_ERR_NO_MEMORY; -} - /** * ice_prof_gen_key - generate profile ID key * @hw: pointer to the HW struct @@ -4552,7 +2677,7 @@ err: * @nm_msk: never match mask * @key: output of profile ID key */ -static enum ice_status +static int ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig, u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ], u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ], @@ -4608,7 +2733,7 @@ ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig, * @dc_msk: don't care mask * @nm_msk: never match mask */ -static enum ice_status +static int ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx, u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ], @@ -4616,7 +2741,7 @@ ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx, u8 nm_msk[ICE_TCAM_KEY_VAL_SZ]) { struct ice_prof_tcam_entry; - enum ice_status status; + int status; status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk, dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key); @@ -4635,7 +2760,7 @@ ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx, * @vsig: VSIG to query * @refs: pointer to variable to receive the reference count */ -static enum ice_status +static int ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs) { u16 idx = vsig & ICE_VSIG_IDX_M; @@ -4644,7 +2769,7 @@ ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs) *refs = 0; if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; while (ptr) { @@ -4685,7 +2810,7 @@ ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl) * @bld: the update package buffer build to add to * @chgs: the list of changes to make in hardware */ -static enum ice_status +static int ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk, struct ice_buf_build *bld, struct list_head *chgs) { @@ -4703,7 +2828,7 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk, struct_size(p, es, 1) + vec_size - sizeof(p->es[0])); if (!p) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; p->count = cpu_to_le16(1); p->offset = cpu_to_le16(tmp->prof_id); @@ -4721,7 +2846,7 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk, * @bld: the update package buffer build to add to * @chgs: the list of changes to make in hardware */ -static enum ice_status +static int ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk, struct ice_buf_build *bld, struct list_head *chgs) { @@ -4737,7 +2862,7 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk, struct_size(p, entry, 1)); if (!p) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; p->count = cpu_to_le16(1); p->entry[0].addr = cpu_to_le16(tmp->tcam_idx); @@ -4757,7 +2882,7 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk, * @bld: the update package buffer build to add to * @chgs: the list of changes to make in hardware */ -static enum ice_status +static int ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld, struct list_head *chgs) { @@ -4773,7 +2898,7 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld, struct_size(p, value, 1)); if (!p) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; p->count = cpu_to_le16(1); p->offset = cpu_to_le16(tmp->ptype); @@ -4789,7 +2914,7 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld, * @bld: the update package buffer build to add to * @chgs: the list of changes to make in hardware */ -static enum ice_status +static int ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld, struct list_head *chgs) { @@ -4808,7 +2933,7 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld, struct_size(p, value, 1)); if (!p) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; p->count = cpu_to_le16(1); p->offset = cpu_to_le16(tmp->vsi); @@ -4828,18 +2953,18 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld, * @blk: hardware block * @chgs: the list of changes to make in hardware */ -static enum ice_status +static int ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk, struct list_head *chgs) { struct ice_buf_build *b; struct ice_chs_chg *tmp; - enum ice_status status; u16 pkg_sects; u16 xlt1 = 0; u16 xlt2 = 0; u16 tcam = 0; u16 es = 0; + int status; u16 sects; /* count number of sections we need */ @@ -4871,7 +2996,7 @@ ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk, /* Build update package buffer */ b = ice_pkg_buf_alloc(hw); if (!b) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; status = ice_pkg_buf_reserve_section(b, sects); if (status) @@ -4908,13 +3033,13 @@ ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk, */ pkg_sects = ice_pkg_buf_get_active_sections(b); if (!pkg_sects || pkg_sects != sects) { - status = ICE_ERR_INVAL_SIZE; + status = -EINVAL; goto error_tmp; } /* update package */ status = ice_update_pkg(hw, ice_pkg_buf(b), 1); - if (status == ICE_ERR_AQ_ERROR) + if (status == -EIO) ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n"); error_tmp: @@ -4953,12 +3078,18 @@ static const struct ice_fd_src_dst_pair ice_fd_pairs[] = { { ICE_PROT_IPV4_IL, 2, 12 }, { ICE_PROT_IPV4_IL, 2, 16 }, + { ICE_PROT_IPV4_IL_IL, 2, 12 }, + { ICE_PROT_IPV4_IL_IL, 2, 16 }, + { ICE_PROT_IPV6_OF_OR_S, 8, 8 }, { ICE_PROT_IPV6_OF_OR_S, 8, 24 }, { ICE_PROT_IPV6_IL, 8, 8 }, { ICE_PROT_IPV6_IL, 8, 24 }, + { ICE_PROT_IPV6_IL_IL, 8, 8 }, + { ICE_PROT_IPV6_IL_IL, 8, 24 }, + { ICE_PROT_TCP_IL, 1, 0 }, { ICE_PROT_TCP_IL, 1, 2 }, @@ -4980,7 +3111,7 @@ static const struct ice_fd_src_dst_pair ice_fd_pairs[] = { * @prof_id: profile ID * @es: extraction sequence (length of array is determined by the block) */ -static enum ice_status +static int ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es) { DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT); @@ -5035,7 +3166,7 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es) /* check for room */ if (first_free + 1 < (s8)ice_fd_pairs[index].count) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; /* place in extraction sequence */ for (k = 0; k < ice_fd_pairs[index].count; k++) { @@ -5045,7 +3176,7 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es) ice_fd_pairs[index].off + (k * 2); if (k > first_free) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; /* keep track of non-relevant fields */ mask_sel |= BIT(first_free - k); @@ -5156,7 +3287,7 @@ ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type, * @attr: array of attributes that will be considered * @attr_cnt: number of elements in the attribute array */ -static enum ice_status +static int ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, const struct ice_ptype_attributes *attr, u16 attr_cnt) { @@ -5172,43 +3303,80 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, &prof->attr[prof->ptg_cnt]); if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; } } if (!found) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; return 0; } +/** + * ice_disable_fd_swap - set register appropriately to disable FD swap + * @hw: pointer to the HW struct + * @prof_id: profile ID + */ +static void ice_disable_fd_swap(struct ice_hw *hw, u16 prof_id) +{ + u8 swap_val = ICE_SWAP_VALID; + u8 i; + /* Since the SWAP Flag in the Programming Desc doesn't work, + * here add method to disable the SWAP Option via setting + * certain SWAP and INSET register set. + */ + for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw / 4; i++) { + u32 raw_swap = 0; + u32 raw_in = 0; + u8 j; + + for (j = 0; j < 4; j++) { + raw_swap |= (swap_val++) << (j * BITS_PER_BYTE); + raw_in |= ICE_INSET_DFLT << (j * BITS_PER_BYTE); + } + + /* write the FDIR swap register set */ + wr32(hw, GLQF_FDSWAP(prof_id, i), raw_swap); + + ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n", + prof_id, i, GLQF_FDSWAP(prof_id, i), raw_swap); + + /* write the FDIR inset register set */ + wr32(hw, GLQF_FDINSET(prof_id, i), raw_in); + + ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n", + prof_id, i, GLQF_FDINSET(prof_id, i), raw_in); + } +} + /** * ice_add_prof - add profile * @hw: pointer to the HW struct * @blk: hardware block * @id: profile tracking ID - * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits) + * @ptypes: bitmap indicating ptypes (ICE_FLOW_PTYPE_MAX bits) * @attr: array of attributes * @attr_cnt: number of elements in attrib array * @es: extraction sequence (length of array is determined by the block) * @masks: mask for extraction sequence + * @fd_swap: enable/disable FDIR paired src/dst fields swap option * * This function registers a profile, which matches a set of PTYPES with a * particular extraction sequence. While the hardware profile is allocated * it will not be written until the first call to ice_add_flow that specifies * the ID value used here. */ -enum ice_status -ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], - const struct ice_ptype_attributes *attr, u16 attr_cnt, - struct ice_fv_word *es, u16 *masks) +int +ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, + unsigned long *ptypes, const struct ice_ptype_attributes *attr, + u16 attr_cnt, struct ice_fv_word *es, u16 *masks, bool fd_swap) { - u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE); DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT); struct ice_prof_map *prof; - enum ice_status status; - u8 byte = 0; + int status; u8 prof_id; + u16 ptype; bitmap_zero(ptgs_used, ICE_XLT1_CNT); @@ -5221,7 +3389,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], status = ice_alloc_prof_id(hw, blk, &prof_id); if (status) goto err_ice_add_prof; - if (blk == ICE_BLK_FD) { + if (blk == ICE_BLK_FD && fd_swap) { /* For Flow Director block, the extraction sequence may * need to be altered in the case where there are paired * fields that have no match. This is necessary because @@ -5232,6 +3400,8 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], status = ice_update_fd_swap(hw, prof_id, es); if (status) goto err_ice_add_prof; + } else if (blk == ICE_BLK_FD) { + ice_disable_fd_swap(hw, prof_id); } status = ice_update_prof_masking(hw, blk, prof_id, masks); if (status) @@ -5246,8 +3416,10 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], /* add profile info */ prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL); - if (!prof) + if (!prof) { + status = -ENOMEM; goto err_ice_add_prof; + } prof->profile_cookie = id; prof->prof_id = prof_id; @@ -5255,56 +3427,35 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], prof->context = 0; /* build list of ptgs */ - while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) { - u8 bit; + for_each_set_bit(ptype, ptypes, ICE_FLOW_PTYPE_MAX) { + u8 ptg; - if (!ptypes[byte]) { - bytes--; - byte++; + /* The package should place all ptypes in a non-zero + * PTG, so the following call should never fail. + */ + if (ice_ptg_find_ptype(hw, blk, ptype, &ptg)) continue; - } - /* Examine 8 bits per byte */ - for_each_set_bit(bit, (unsigned long *)&ptypes[byte], - BITS_PER_BYTE) { - u16 ptype; - u8 ptg; + /* If PTG is already added, skip and continue */ + if (test_bit(ptg, ptgs_used)) + continue; - ptype = byte * BITS_PER_BYTE + bit; + set_bit(ptg, ptgs_used); + /* Check to see there are any attributes for this ptype, and + * add them if found. + */ + status = ice_add_prof_attrib(prof, ptg, ptype, attr, attr_cnt); + if (status == -ENOSPC) + break; + if (status) { + /* This is simple a ptype/PTG with no attribute */ + prof->ptg[prof->ptg_cnt] = ptg; + prof->attr[prof->ptg_cnt].flags = 0; + prof->attr[prof->ptg_cnt].mask = 0; - /* The package should place all ptypes in a non-zero - * PTG, so the following call should never fail. - */ - if (ice_ptg_find_ptype(hw, blk, ptype, &ptg)) - continue; - - /* If PTG is already added, skip and continue */ - if (test_bit(ptg, ptgs_used)) - continue; - - set_bit(ptg, ptgs_used); - /* Check to see there are any attributes for this - * ptype, and add them if found. - */ - status = ice_add_prof_attrib(prof, ptg, ptype, attr, - attr_cnt); - if (status == ICE_ERR_MAX_LIMIT) + if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE) break; - if (status) { - /* This is simple a ptype/PTG with no - * attribute - */ - prof->ptg[prof->ptg_cnt] = ptg; - prof->attr[prof->ptg_cnt].flags = 0; - prof->attr[prof->ptg_cnt].mask = 0; - - if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE) - break; - } } - - bytes--; - byte++; } list_add(&prof->list, &hw->blk[blk].es.prof_map); @@ -5346,11 +3497,11 @@ ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id) * @id: profile tracking ID * @cntxt: context */ -enum ice_status +int ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt) { - enum ice_status status = ICE_ERR_DOES_NOT_EXIST; struct ice_prof_map *entry; + int status = -ENOENT; mutex_lock(&hw->blk[blk].es.prof_map_lock); entry = ice_search_prof_id(hw, blk, id); @@ -5369,11 +3520,11 @@ ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt) * @id: profile tracking ID * @cntxt: pointer to variable to receive the context */ -enum ice_status +int ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt) { - enum ice_status status = ICE_ERR_DOES_NOT_EXIST; struct ice_prof_map *entry; + int status = -ENOENT; mutex_lock(&hw->blk[blk].es.prof_map_lock); entry = ice_search_prof_id(hw, blk, id); @@ -5410,14 +3561,14 @@ ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig) * @blk: hardware block * @idx: the index to release */ -static enum ice_status +static int ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx) { /* Masks to invoke a never match entry */ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF }; u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 }; - enum ice_status status; + int status; /* write the TCAM entry */ status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk, @@ -5437,11 +3588,11 @@ ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx) * @blk: hardware block * @prof: pointer to profile structure to remove */ -static enum ice_status +static int ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk, struct ice_vsig_prof *prof) { - enum ice_status status; + int status; u16 i; for (i = 0; i < prof->tcam_count; i++) @@ -5450,7 +3601,7 @@ ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk, status = ice_rel_tcam_idx(hw, blk, prof->tcam[i].tcam_idx); if (status) - return ICE_ERR_HW_TABLE; + return -EIO; } return 0; @@ -5463,19 +3614,20 @@ ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk, * @vsig: the VSIG to remove * @chg: the change list */ -static enum ice_status +static int ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, struct list_head *chg) { u16 idx = vsig & ICE_VSIG_IDX_M; struct ice_vsig_vsi *vsi_cur; struct ice_vsig_prof *d, *t; - enum ice_status status; /* remove TCAM entries */ list_for_each_entry_safe(d, t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, list) { + int status; + status = ice_rem_prof_id(hw, blk, d); if (status) return status; @@ -5497,12 +3649,12 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; p->type = ICE_VSIG_REM; p->orig_vsig = vsig; p->vsig = ICE_DEFAULT_VSIG; - p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis; + p->vsi = (u16)(vsi_cur - hw->blk[blk].xlt2.vsis); list_add(&p->list_entry, chg); @@ -5520,18 +3672,19 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, * @hdl: profile handle indicating which profile to remove * @chg: list to receive a record of changes */ -static enum ice_status +static int ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, struct list_head *chg) { u16 idx = vsig & ICE_VSIG_IDX_M; struct ice_vsig_prof *p, *t; - enum ice_status status; list_for_each_entry_safe(p, t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, list) if (p->profile_cookie == hdl) { + int status; + if (ice_vsig_prof_id_count(hw, blk, vsig) == 1) /* this is the last profile, remove the VSIG */ return ice_rem_vsig(hw, blk, vsig, chg); @@ -5544,7 +3697,7 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, return status; } - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } /** @@ -5553,12 +3706,12 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, * @blk: hardware block * @id: profile tracking ID */ -static enum ice_status +static int ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id) { struct ice_chs_chg *del, *tmp; - enum ice_status status; struct list_head chg; + int status; u16 i; INIT_LIST_HEAD(&chg); @@ -5594,16 +3747,16 @@ err_ice_rem_flow_all: * previously created through ice_add_prof. If any existing entries * are associated with this profile, they will be removed as well. */ -enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id) +int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id) { struct ice_prof_map *pmap; - enum ice_status status; + int status; mutex_lock(&hw->blk[blk].es.prof_map_lock); pmap = ice_search_prof_id(hw, blk, id); if (!pmap) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto err_ice_rem_prof; } @@ -5630,20 +3783,20 @@ err_ice_rem_prof: * @hdl: profile handle * @chg: change list */ -static enum ice_status +static int ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, struct list_head *chg) { - enum ice_status status = 0; struct ice_prof_map *map; struct ice_chs_chg *p; + int status = 0; u16 i; mutex_lock(&hw->blk[blk].es.prof_map_lock); /* Get the details on the profile specified by the handle ID */ map = ice_search_prof_id(hw, blk, hdl); if (!map) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto err_ice_get_prof; } @@ -5653,7 +3806,7 @@ ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_ice_get_prof; } @@ -5686,7 +3839,7 @@ err_ice_get_prof: * * This routine makes a copy of the list of profiles in the specified VSIG. */ -static enum ice_status +static int ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, struct list_head *lst) { @@ -5714,7 +3867,7 @@ err_ice_get_profs_vsig: devm_kfree(ice_hw_to_dev(hw), ent1); } - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } /** @@ -5724,25 +3877,25 @@ err_ice_get_profs_vsig: * @lst: the list to be added to * @hdl: profile handle of entry to add */ -static enum ice_status +static int ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk, struct list_head *lst, u64 hdl) { - enum ice_status status = 0; struct ice_prof_map *map; struct ice_vsig_prof *p; + int status = 0; u16 i; mutex_lock(&hw->blk[blk].es.prof_map_lock); map = ice_search_prof_id(hw, blk, hdl); if (!map) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto err_ice_add_prof_to_lst; } p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_ice_add_prof_to_lst; } @@ -5772,17 +3925,17 @@ err_ice_add_prof_to_lst: * @vsig: the VSIG to move the VSI to * @chg: the change list */ -static enum ice_status +static int ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig, struct list_head *chg) { - enum ice_status status; struct ice_chs_chg *p; u16 orig_vsig; + int status; p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); if (!status) @@ -5846,13 +3999,13 @@ ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg) * * This function appends an enable or disable TCAM entry in the change log */ -static enum ice_status +static int ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable, u16 vsig, struct ice_tcam_inf *tcam, struct list_head *chg) { - enum ice_status status; struct ice_chs_chg *p; + int status; u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; @@ -5885,7 +4038,7 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable, /* add TCAM to change list */ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* set don't care masks for TCAM flags */ ice_set_tcam_flags(tcam->attr.mask, dc_msk); @@ -5948,22 +4101,22 @@ ice_ptg_attr_in_use(struct ice_tcam_inf *ptg_attr, unsigned long *ptgs_used, * @vsig: the VSIG for which to adjust profile priorities * @chg: the change list */ -static enum ice_status +static int ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, struct list_head *chg) { DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT); struct ice_tcam_inf **attr_used; - enum ice_status status = 0; struct ice_vsig_prof *t; u16 attr_used_cnt = 0; + int status = 0; u16 idx; #define ICE_MAX_PTG_ATTRS 1024 attr_used = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_PTG_ATTRS, sizeof(*attr_used), GFP_KERNEL); if (!attr_used) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; bitmap_zero(ptgs_used, ICE_XLT1_CNT); idx = vsig & ICE_VSIG_IDX_M; @@ -6036,7 +4189,7 @@ err_ice_adj_prof_priorities: * @rev: true to add entries to the end of the list * @chg: the change list */ -static enum ice_status +static int ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, bool rev, struct list_head *chg) { @@ -6044,26 +4197,26 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; - enum ice_status status = 0; struct ice_prof_map *map; struct ice_vsig_prof *t; struct ice_chs_chg *p; u16 vsig_idx, i; + int status = 0; /* Error, if this VSIG already has this profile */ if (ice_has_prof_vsig(hw, blk, vsig, hdl)) - return ICE_ERR_ALREADY_EXISTS; + return -EEXIST; /* new VSIG profile structure */ t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL); if (!t) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; mutex_lock(&hw->blk[blk].es.prof_map_lock); /* Get the details on the profile specified by the handle ID */ map = ice_search_prof_id(hw, blk, hdl); if (!map) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto err_ice_add_prof_id_vsig; } @@ -6078,7 +4231,7 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, /* add TCAM to change list */ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_ice_add_prof_id_vsig; } @@ -6152,21 +4305,21 @@ err_ice_add_prof_id_vsig: * @hdl: the profile handle of the profile that will be added to the VSIG * @chg: the change list */ -static enum ice_status +static int ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl, struct list_head *chg) { - enum ice_status status; struct ice_chs_chg *p; u16 new_vsig; + int status; p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; new_vsig = ice_vsig_alloc(hw, blk); if (!new_vsig) { - status = ICE_ERR_HW_TABLE; + status = -EIO; goto err_ice_create_prof_id_vsig; } @@ -6202,18 +4355,18 @@ err_ice_create_prof_id_vsig: * @new_vsig: return of new VSIG * @chg: the change list */ -static enum ice_status +static int ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi, struct list_head *lst, u16 *new_vsig, struct list_head *chg) { struct ice_vsig_prof *t; - enum ice_status status; + int status; u16 vsig; vsig = ice_vsig_alloc(hw, blk); if (!vsig) - return ICE_ERR_HW_TABLE; + return -EIO; status = ice_move_vsi(hw, blk, vsi, vsig, chg); if (status) @@ -6243,8 +4396,8 @@ static bool ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig) { struct ice_vsig_prof *t; - enum ice_status status; struct list_head lst; + int status; INIT_LIST_HEAD(&lst); @@ -6277,16 +4430,16 @@ ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig) * save time in generating a new VSIG and TCAMs till a match is * found and subsequent rollback when a matching VSIG is found. */ -enum ice_status +int ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) { struct ice_chs_chg *tmp, *del; struct list_head chg; - enum ice_status status; + int status; /* if target VSIG is default the move is invalid */ if ((vsig & ICE_VSIG_IDX_M) == ICE_DEFAULT_VSIG) - return ICE_ERR_PARAM; + return -EINVAL; INIT_LIST_HEAD(&chg); @@ -6315,14 +4468,14 @@ ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) * profile indicated by the ID parameter for the VSIs specified in the VSI * array. Once successfully called, the flow will be enabled. */ -enum ice_status +int ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) { struct ice_vsig_prof *tmp1, *del1; struct ice_chs_chg *tmp, *del; struct list_head union_lst; - enum ice_status status; struct list_head chg; + int status; u16 vsig; INIT_LIST_HEAD(&union_lst); @@ -6348,7 +4501,7 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) * scenario */ if (ice_has_prof_vsig(hw, blk, vsig, hdl)) { - status = ICE_ERR_ALREADY_EXISTS; + status = -EEXIST; goto err_ice_add_prof_id_flow; } @@ -6450,6 +4603,55 @@ err_ice_add_prof_id_flow: return status; } +/** + * ice_flow_assoc_hw_prof - add profile id flow for main/ctrl VSI flow entry + * @hw: pointer to the HW struct + * @blk: HW block + * @dest_vsi_handle: dest VSI handle + * @fdir_vsi_handle: fdir programming VSI handle + * @id: profile id (handle) + * + * Calling this function will update the hardware tables to enable the + * profile indicated by the ID parameter for the VSIs specified in the VSI + * array. Once successfully called, the flow will be enabled. + */ +int +ice_flow_assoc_hw_prof(struct ice_hw *hw, enum ice_block blk, + u16 dest_vsi_handle, u16 fdir_vsi_handle, int id) +{ + int status = 0; + u16 vsi_num; + + vsi_num = ice_get_hw_vsi_num(hw, dest_vsi_handle); + status = ice_add_prof_id_flow(hw, blk, vsi_num, id); + if (status) { + ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed for main VSI flow entry, %d\n", + status); + goto err_add_prof; + } + + if (blk != ICE_BLK_FD) + return status; + + vsi_num = ice_get_hw_vsi_num(hw, fdir_vsi_handle); + status = ice_add_prof_id_flow(hw, blk, vsi_num, id); + if (status) { + ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed for ctrl VSI flow entry, %d\n", + status); + goto err_add_entry; + } + + return status; + +err_add_entry: + vsi_num = ice_get_hw_vsi_num(hw, dest_vsi_handle); + ice_rem_prof_id_flow(hw, blk, vsi_num, id); +err_add_prof: + ice_flow_rem_prof(hw, blk, id); + + return status; +} + /** * ice_add_flow - add flow * @hw: pointer to the HW struct @@ -6462,14 +4664,15 @@ err_ice_add_prof_id_flow: * profile indicated by the ID parameter for the VSIs specified in the VSI * array. Once successfully called, the flow will be enabled. */ -enum ice_status +int ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count, u64 id) { - enum ice_status status; u16 i; for (i = 0; i < count; i++) { + int status; + status = ice_add_prof_id_flow(hw, blk, vsi[i], id); if (status) return status; @@ -6484,7 +4687,7 @@ ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count, * @lst: list to remove the profile from * @hdl: the profile handle indicating the profile to remove */ -static enum ice_status +static int ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl) { struct ice_vsig_prof *ent, *tmp; @@ -6496,7 +4699,7 @@ ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl) return 0; } - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } /** @@ -6510,13 +4713,13 @@ ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl) * profile indicated by the ID parameter for the VSIs specified in the VSI * array. Once successfully called, the flow will be disabled. */ -enum ice_status +int ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) { struct ice_vsig_prof *tmp1, *del1; struct ice_chs_chg *tmp, *del; struct list_head chg, copy; - enum ice_status status; + int status; u16 vsig; INIT_LIST_HEAD(©); @@ -6611,7 +4814,7 @@ ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) } } } else { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; } /* update hardware tables */ @@ -6644,14 +4847,15 @@ err_ice_rem_prof_id_flow: * using ice_add_flow. The ID value will indicated which profile will be * removed. Once successfully called, the flow will be disabled. */ -enum ice_status +int ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count, u64 id) { - enum ice_status status; u16 i; for (i = 0; i < count; i++) { + int status; + status = ice_rem_prof_id_flow(hw, blk, vsi[i], id); if (status) return status; diff --git a/drivers/thirdparty/ice/ice_flex_pipe.h b/drivers/thirdparty/ice/ice_flex_pipe.h index 356d9f46f554..28c622c58961 100644 --- a/drivers/thirdparty/ice/ice_flex_pipe.h +++ b/drivers/thirdparty/ice/ice_flex_pipe.h @@ -6,25 +6,10 @@ #include "ice_type.h" -/* Package minimal version supported */ -#define ICE_PKG_SUPP_VER_MAJ 1 -#define ICE_PKG_SUPP_VER_MNR 3 - -/* Package format version */ -#define ICE_PKG_FMT_VER_MAJ 1 -#define ICE_PKG_FMT_VER_MNR 0 -#define ICE_PKG_FMT_VER_UPD 0 -#define ICE_PKG_FMT_VER_DFT 0 - -#define ICE_PKG_CNT 4 - -enum ice_status -ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access); -void ice_release_change_lock(struct ice_hw *hw); -enum ice_status +int ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, u8 *prot, u16 *off); -enum ice_status +int ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type, u16 *value); void @@ -32,81 +17,86 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type, unsigned long *bm); void ice_init_prof_result_bm(struct ice_hw *hw); -enum ice_status -ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, - unsigned long *bm, struct list_head *fv_list); -enum ice_status -ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count); -u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld); -enum ice_status +int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, struct ice_sq_cd *cd); bool ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type, u16 *port); -enum ice_status +int ice_is_create_tunnel_possible(struct ice_hw *hw, enum ice_tunnel_type type, u16 port); bool ice_is_tunnel_empty(struct ice_hw *hw); -enum ice_status +int ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port); -enum ice_status ice_set_dvm_boost_entries(struct ice_hw *hw); -enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all); +int ice_set_dvm_boost_entries(struct ice_hw *hw); +int ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all); bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index); bool ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type); -enum ice_status ice_replay_tunnels(struct ice_hw *hw); +int ice_replay_tunnels(struct ice_hw *hw); /* RX parser PType functions */ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype); /* XLT1/PType group functions */ -enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk); +int ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk); void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg); /* XLT2/VSI group functions */ -enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk); -enum ice_status -ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], - const struct ice_ptype_attributes *attr, u16 attr_cnt, - struct ice_fv_word *es, u16 *masks); +int ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk); +int +ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, + unsigned long *ptypes, const struct ice_ptype_attributes *attr, + u16 attr_cnt, struct ice_fv_word *es, u16 *masks, bool fd_swap); void ice_init_all_prof_masks(struct ice_hw *hw); void ice_shutdown_all_prof_masks(struct ice_hw *hw); struct ice_prof_map * ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id); -enum ice_status +int ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig); -enum ice_status +int ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); -enum ice_status +int ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); -enum ice_status +int +ice_flow_assoc_hw_prof(struct ice_hw *hw, enum ice_block blk, + u16 dest_vsi_handle, u16 fdir_vsi_handle, int id); +int ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt); -enum ice_status +int ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt); -enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len); -enum ice_status -ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len); -enum ice_status ice_init_hw_tbls(struct ice_hw *hw); -void ice_free_seg(struct ice_hw *hw); +int ice_init_hw_tbls(struct ice_hw *hw); void ice_fill_blk_tbls(struct ice_hw *hw); void ice_clear_hw_tbls(struct ice_hw *hw); void ice_free_hw_tbls(struct ice_hw *hw); -enum ice_status +int ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count, u64 id); -enum ice_status +int ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count, u64 id); -enum ice_status +int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id); -struct ice_buf_build * -ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size, - void **section); -struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld); -void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld); -enum ice_status +int ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, u16 len); + +void ice_fill_blk_tbls(struct ice_hw *hw); + +/* To support tunneling entries by PF, the package will append the PF number to + * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc. + */ +#define ICE_TNL_PRE "TNL_" +/* For supporting double VLAN mode, it is necessary to enable or disable certain + * boost tcam entries. The metadata labels names that match the following + * prefixes will be saved to allow enabling double VLAN mode. + */ +#define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */ +#define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */ + +void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val); +void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable); + #endif /* _ICE_FLEX_PIPE_H_ */ diff --git a/drivers/thirdparty/ice/ice_flex_type.h b/drivers/thirdparty/ice/ice_flex_type.h index 32bcb7704048..2744a330c67e 100644 --- a/drivers/thirdparty/ice/ice_flex_type.h +++ b/drivers/thirdparty/ice/ice_flex_type.h @@ -13,6 +13,7 @@ struct ice_fv_word { u8 resvrd; } __packed; + #define ICE_MAX_NUM_PROFILES 256 #define ICE_MAX_FV_WORDS 48 @@ -20,249 +21,6 @@ struct ice_fv { struct ice_fv_word ew[ICE_MAX_FV_WORDS]; }; -/* Package and segment headers and tables */ -struct ice_pkg_hdr { - struct ice_pkg_ver pkg_format_ver; - __le32 seg_count; - __le32 seg_offset[]; -}; - -/* generic segment */ -struct ice_generic_seg_hdr { -#define SEGMENT_TYPE_METADATA 0x00000001 -#define SEGMENT_TYPE_ICE 0x00000010 - __le32 seg_type; - struct ice_pkg_ver seg_format_ver; - __le32 seg_size; - char seg_id[ICE_PKG_NAME_SIZE]; -}; - -/* ice specific segment */ - -union ice_device_id { - struct { - __le16 device_id; - __le16 vendor_id; - } dev_vend_id; - __le32 id; -}; - -struct ice_device_id_entry { - union ice_device_id device; - union ice_device_id sub_device; -}; - -struct ice_seg { - struct ice_generic_seg_hdr hdr; - __le32 device_table_count; - struct ice_device_id_entry device_table[]; -}; - -struct ice_nvm_table { - __le32 table_count; - __le32 vers[]; -}; - -struct ice_buf { -#define ICE_PKG_BUF_SIZE 4096 - u8 buf[ICE_PKG_BUF_SIZE]; -}; - -struct ice_buf_table { - __le32 buf_count; - struct ice_buf buf_array[]; -}; - -/* global metadata specific segment */ -struct ice_global_metadata_seg { - struct ice_generic_seg_hdr hdr; - struct ice_pkg_ver pkg_ver; - __le32 rsvd; - char pkg_name[ICE_PKG_NAME_SIZE]; -}; - -#define ICE_MIN_S_OFF 12 -#define ICE_MAX_S_OFF 4095 -#define ICE_MIN_S_SZ 1 -#define ICE_MAX_S_SZ 4084 - -/* section information */ -struct ice_section_entry { - __le32 type; - __le16 offset; - __le16 size; -}; - -#define ICE_MIN_S_COUNT 1 -#define ICE_MAX_S_COUNT 511 -#define ICE_MIN_S_DATA_END 12 -#define ICE_MAX_S_DATA_END 4096 - -#define ICE_METADATA_BUF 0x80000000 - -struct ice_buf_hdr { - __le16 section_count; - __le16 data_end; - struct ice_section_entry section_entry[]; -}; - -#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \ - struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\ - (ent_sz)) - -/* ice package section IDs */ -#define ICE_SID_METADATA 1 -#define ICE_SID_XLT0_SW 10 -#define ICE_SID_XLT_KEY_BUILDER_SW 11 -#define ICE_SID_XLT1_SW 12 -#define ICE_SID_XLT2_SW 13 -#define ICE_SID_PROFID_TCAM_SW 14 -#define ICE_SID_PROFID_REDIR_SW 15 -#define ICE_SID_FLD_VEC_SW 16 -#define ICE_SID_CDID_KEY_BUILDER_SW 17 -#define ICE_SID_CDID_REDIR_SW 18 - -#define ICE_SID_XLT0_ACL 20 -#define ICE_SID_XLT_KEY_BUILDER_ACL 21 -#define ICE_SID_XLT1_ACL 22 -#define ICE_SID_XLT2_ACL 23 -#define ICE_SID_PROFID_TCAM_ACL 24 -#define ICE_SID_PROFID_REDIR_ACL 25 -#define ICE_SID_FLD_VEC_ACL 26 -#define ICE_SID_CDID_KEY_BUILDER_ACL 27 -#define ICE_SID_CDID_REDIR_ACL 28 - -#define ICE_SID_XLT0_FD 30 -#define ICE_SID_XLT_KEY_BUILDER_FD 31 -#define ICE_SID_XLT1_FD 32 -#define ICE_SID_XLT2_FD 33 -#define ICE_SID_PROFID_TCAM_FD 34 -#define ICE_SID_PROFID_REDIR_FD 35 -#define ICE_SID_FLD_VEC_FD 36 -#define ICE_SID_CDID_KEY_BUILDER_FD 37 -#define ICE_SID_CDID_REDIR_FD 38 - -#define ICE_SID_XLT0_RSS 40 -#define ICE_SID_XLT_KEY_BUILDER_RSS 41 -#define ICE_SID_XLT1_RSS 42 -#define ICE_SID_XLT2_RSS 43 -#define ICE_SID_PROFID_TCAM_RSS 44 -#define ICE_SID_PROFID_REDIR_RSS 45 -#define ICE_SID_FLD_VEC_RSS 46 -#define ICE_SID_CDID_KEY_BUILDER_RSS 47 -#define ICE_SID_CDID_REDIR_RSS 48 - -#define ICE_SID_RXPARSER_CAM 50 -#define ICE_SID_RXPARSER_NOMATCH_CAM 51 -#define ICE_SID_RXPARSER_IMEM 52 -#define ICE_SID_RXPARSER_XLT0_BUILDER 53 -#define ICE_SID_RXPARSER_NODE_PTYPE 54 -#define ICE_SID_RXPARSER_MARKER_PTYPE 55 -#define ICE_SID_RXPARSER_BOOST_TCAM 56 -#define ICE_SID_RXPARSER_PROTO_GRP 57 -#define ICE_SID_RXPARSER_METADATA_INIT 58 -#define ICE_SID_RXPARSER_XLT0 59 - -#define ICE_SID_TXPARSER_CAM 60 -#define ICE_SID_TXPARSER_NOMATCH_CAM 61 -#define ICE_SID_TXPARSER_IMEM 62 -#define ICE_SID_TXPARSER_XLT0_BUILDER 63 -#define ICE_SID_TXPARSER_NODE_PTYPE 64 -#define ICE_SID_TXPARSER_MARKER_PTYPE 65 -#define ICE_SID_TXPARSER_BOOST_TCAM 66 -#define ICE_SID_TXPARSER_PROTO_GRP 67 -#define ICE_SID_TXPARSER_METADATA_INIT 68 -#define ICE_SID_TXPARSER_XLT0 69 - -#define ICE_SID_RXPARSER_INIT_REDIR 70 -#define ICE_SID_TXPARSER_INIT_REDIR 71 -#define ICE_SID_RXPARSER_MARKER_GRP 72 -#define ICE_SID_TXPARSER_MARKER_GRP 73 -#define ICE_SID_RXPARSER_LAST_PROTO 74 -#define ICE_SID_TXPARSER_LAST_PROTO 75 -#define ICE_SID_RXPARSER_PG_SPILL 76 -#define ICE_SID_TXPARSER_PG_SPILL 77 -#define ICE_SID_RXPARSER_NOMATCH_SPILL 78 -#define ICE_SID_TXPARSER_NOMATCH_SPILL 79 - -#define ICE_SID_XLT0_PE 80 -#define ICE_SID_XLT_KEY_BUILDER_PE 81 -#define ICE_SID_XLT1_PE 82 -#define ICE_SID_XLT2_PE 83 -#define ICE_SID_PROFID_TCAM_PE 84 -#define ICE_SID_PROFID_REDIR_PE 85 -#define ICE_SID_FLD_VEC_PE 86 -#define ICE_SID_CDID_KEY_BUILDER_PE 87 -#define ICE_SID_CDID_REDIR_PE 88 - -/* Label Metadata section IDs */ -#define ICE_SID_LBL_FIRST 0x80000010 -#define ICE_SID_LBL_RXPARSER_IMEM 0x80000010 -#define ICE_SID_LBL_TXPARSER_IMEM 0x80000011 -#define ICE_SID_LBL_RESERVED_12 0x80000012 -#define ICE_SID_LBL_RESERVED_13 0x80000013 -#define ICE_SID_LBL_RXPARSER_MARKER 0x80000014 -#define ICE_SID_LBL_TXPARSER_MARKER 0x80000015 -#define ICE_SID_LBL_PTYPE 0x80000016 -#define ICE_SID_LBL_PROTOCOL_ID 0x80000017 -#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018 -#define ICE_SID_LBL_TXPARSER_TMEM 0x80000019 -#define ICE_SID_LBL_RXPARSER_PG 0x8000001A -#define ICE_SID_LBL_TXPARSER_PG 0x8000001B -#define ICE_SID_LBL_RXPARSER_M_TCAM 0x8000001C -#define ICE_SID_LBL_TXPARSER_M_TCAM 0x8000001D -#define ICE_SID_LBL_SW_PROFID_TCAM 0x8000001E -#define ICE_SID_LBL_ACL_PROFID_TCAM 0x8000001F -#define ICE_SID_LBL_PE_PROFID_TCAM 0x80000020 -#define ICE_SID_LBL_RSS_PROFID_TCAM 0x80000021 -#define ICE_SID_LBL_FD_PROFID_TCAM 0x80000022 -#define ICE_SID_LBL_FLAG 0x80000023 -#define ICE_SID_LBL_REG 0x80000024 -#define ICE_SID_LBL_SW_PTG 0x80000025 -#define ICE_SID_LBL_ACL_PTG 0x80000026 -#define ICE_SID_LBL_PE_PTG 0x80000027 -#define ICE_SID_LBL_RSS_PTG 0x80000028 -#define ICE_SID_LBL_FD_PTG 0x80000029 -#define ICE_SID_LBL_SW_VSIG 0x8000002A -#define ICE_SID_LBL_ACL_VSIG 0x8000002B -#define ICE_SID_LBL_PE_VSIG 0x8000002C -#define ICE_SID_LBL_RSS_VSIG 0x8000002D -#define ICE_SID_LBL_FD_VSIG 0x8000002E -#define ICE_SID_LBL_PTYPE_META 0x8000002F -#define ICE_SID_LBL_SW_PROFID 0x80000030 -#define ICE_SID_LBL_ACL_PROFID 0x80000031 -#define ICE_SID_LBL_PE_PROFID 0x80000032 -#define ICE_SID_LBL_RSS_PROFID 0x80000033 -#define ICE_SID_LBL_FD_PROFID 0x80000034 -#define ICE_SID_LBL_RXPARSER_MARKER_GRP 0x80000035 -#define ICE_SID_LBL_TXPARSER_MARKER_GRP 0x80000036 -#define ICE_SID_LBL_RXPARSER_PROTO 0x80000037 -#define ICE_SID_LBL_TXPARSER_PROTO 0x80000038 -/* The following define MUST be updated to reflect the last label section ID */ -#define ICE_SID_LBL_LAST 0x80000038 - -enum ice_block { - ICE_BLK_SW = 0, - ICE_BLK_ACL, - ICE_BLK_FD, - ICE_BLK_RSS, - ICE_BLK_PE, - ICE_BLK_COUNT -}; - -enum ice_sect { - ICE_XLT0 = 0, - ICE_XLT_KB, - ICE_XLT1, - ICE_XLT2, - ICE_PROF_TCAM, - ICE_PROF_REDIR, - ICE_VEC_TBL, - ICE_CDID_KB, - ICE_CDID_REDIR, - ICE_SECT_COUNT -}; - /* Packet Type (PTYPE) values */ #define ICE_PTYPE_MAC_PAY 1 #define ICE_MAC_PTP 2 @@ -413,36 +171,84 @@ enum ice_sect { #define ICE_MAC_IPV6_PFCP_SESSION 354 #define ICE_MAC_IPV4_L2TPV3 360 #define ICE_MAC_IPV6_L2TPV3 361 -#define ICE_MAC_IPV4_L2TPV2_CONTROL 392 -#define ICE_MAC_IPV6_L2TPV2_CONTROL 393 -#define ICE_MAC_IPV4_L2TPV2 394 -#define ICE_MAC_IPV6_L2TPV2 395 -#define ICE_MAC_IPV4_PPPOL2TPV2 396 -#define ICE_MAC_IPV6_PPPOL2TPV2 397 -#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_FRAG 398 -#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_PAY 399 -#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_UDP_PAY 400 -#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_TCP 401 -#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_SCTP 402 -#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_ICMP 403 -#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_FRAG 404 -#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_PAY 405 -#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_UDP_PAY 406 -#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_TCP 407 -#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_SCTP 408 -#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_ICMPV6 409 -#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_FRAG 410 -#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_PAY 411 -#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_UDP_PAY 412 -#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_TCP 413 -#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_SCTP 414 -#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_ICMP 415 -#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_FRAG 416 -#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_PAY 417 -#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_UDP_PAY 418 -#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_TCP 419 -#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_SCTP 420 -#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_ICMPV6 421 +#define ICE_MAC_IPV4_L2TPV2_CONTROL 396 +#define ICE_MAC_IPV6_L2TPV2_CONTROL 397 +#define ICE_MAC_IPV4_L2TPV2 398 +#define ICE_MAC_IPV6_L2TPV2 399 +#define ICE_MAC_IPV4_PPPOL2TPV2 400 +#define ICE_MAC_IPV6_PPPOL2TPV2 401 +#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_FRAG 402 +#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_PAY 403 +#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_UDP_PAY 404 +#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_TCP 405 +#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_SCTP 406 +#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_ICMP 407 +#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_FRAG 408 +#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_PAY 409 +#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_UDP_PAY 410 +#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_TCP 411 +#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_SCTP 412 +#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_ICMPV6 413 +#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_FRAG 414 +#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_PAY 415 +#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_UDP_PAY 416 +#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_TCP 417 +#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_SCTP 418 +#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_ICMP 419 +#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_FRAG 420 +#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_PAY 421 +#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_UDP_PAY 422 +#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_TCP 423 +#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_SCTP 424 +#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_ICMPV6 425 +#define MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG 450 +#define MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY 451 +#define MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY 452 +#define MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP 453 +#define MAC_IPV4_TUN_IPV4_GTPU_IPV4_SCTP 454 +#define MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP 455 +#define MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG 456 +#define MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY 457 +#define MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY 458 +#define MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP 459 +#define MAC_IPV4_TUN_IPV4_GTPU_IPV6_SCTP 460 +#define MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6 461 +#define MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG 462 +#define MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY 463 +#define MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY 464 +#define MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP 465 +#define MAC_IPV4_TUN_IPV6_GTPU_IPV4_SCTP 466 +#define MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP 467 +#define MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG 468 +#define MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY 469 +#define MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY 470 +#define MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP 471 +#define MAC_IPV4_TUN_IPV6_GTPU_IPV6_SCTP 472 +#define MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6 473 +#define MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG 474 +#define MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY 475 +#define MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY 476 +#define MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP 477 +#define MAC_IPV6_TUN_IPV4_GTPU_IPV4_SCTP 478 +#define MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP 479 +#define MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG 480 +#define MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY 481 +#define MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY 482 +#define MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP 483 +#define MAC_IPV6_TUN_IPV4_GTPU_IPV6_SCTP 484 +#define MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6 485 +#define MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG 486 +#define MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY 487 +#define MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY 488 +#define MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP 489 +#define MAC_IPV6_TUN_IPV6_GTPU_IPV4_SCTP 490 +#define MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP 491 +#define MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG 492 +#define MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY 493 +#define MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY 494 +#define MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP 495 +#define MAC_IPV6_TUN_IPV6_GTPU_IPV6_SCTP 496 +#define MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6 497 /* Attributes that can modify PTYPE definitions. * @@ -569,10 +375,18 @@ struct ice_sw_fv_list_entry { * fields of the packet are now little endian. */ struct ice_boost_key_value { -#define ICE_BOOST_REMAINING_HV_KEY 15 +#define ICE_BOOST_REMAINING_HV_KEY 15 u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY]; - __le16 hv_dst_port_key; - __le16 hv_src_port_key; + union { + struct { + __le16 hv_dst_port_key; + __le16 hv_src_port_key; + } /* udp_tunnel */; + struct { + __le16 hv_vlan_id_key; + __le16 hv_etype_key; + } vlan; + }; u8 tcam_search_key; } __packed; @@ -605,24 +419,6 @@ struct ice_boost_tcam_section { sizeof(struct ice_boost_tcam_entry), \ sizeof(struct ice_boost_tcam_entry)) -/* package Marker PType TCAM entry */ -struct ice_marker_ptype_tcam_entry { -#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX 1024 - __le16 addr; - __le16 ptype; - u8 keys[20]; -}; - -struct ice_marker_ptype_tcam_section { - __le16 count; - __le16 reserved; - struct ice_marker_ptype_tcam_entry tcam[]; -}; - -#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, 1) - \ - sizeof(struct ice_marker_ptype_tcam_entry), \ - sizeof(struct ice_marker_ptype_tcam_entry)) - struct ice_xlt1_section { __le16 count; __le16 offset; @@ -641,34 +437,16 @@ struct ice_prof_redir_section { u8 redir_value[]; }; -/* package buffer building */ - -struct ice_buf_build { - struct ice_buf buf; - u16 reserved_section_table_entries; -}; - -struct ice_pkg_enum { - struct ice_buf_table *buf_table; - u32 buf_idx; - - u32 type; - struct ice_buf_hdr *buf; - u32 sect_idx; - void *sect; - u32 sect_type; - - u32 entry_idx; - void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset); -}; - /* Tunnel enabling */ enum ice_tunnel_type { TNL_VXLAN = 0, TNL_GENEVE, - TNL_ECPRI, + TNL_GRETAP, TNL_GTP, + TNL_GTPC, + TNL_GTPU, + TNL_ECPRI, TNL_LAST = 0xFF, TNL_ALL = 0xFF, }; @@ -748,8 +526,8 @@ struct ice_ptg_ptype { u8 ptg; }; -#define ICE_MAX_TCAM_PER_PROFILE 32 -#define ICE_MAX_PTG_PER_PROFILE 32 +#define ICE_MAX_TCAM_PER_PROFILE 64 +#define ICE_MAX_PTG_PER_PROFILE 64 struct ice_prof_map { struct list_head list; @@ -942,11 +720,14 @@ struct ice_chs_chg { #define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT enum ice_prof_type { + ICE_PROF_INVALID = 0x0, ICE_PROF_NON_TUN = 0x1, ICE_PROF_TUN_UDP = 0x2, ICE_PROF_TUN_GRE = 0x4, - ICE_PROF_TUN_PPPOE = 0x8, - ICE_PROF_TUN_ALL = 0xE, + ICE_PROF_TUN_GTPU = 0x8, + ICE_PROF_TUN_GTPC = 0x10, + ICE_PROF_TUN_PPPOE = 0x20, + ICE_PROF_TUN_ALL = 0x3E, ICE_PROF_ALL = 0xFF, }; diff --git a/drivers/thirdparty/ice/ice_flg_rd.c b/drivers/thirdparty/ice/ice_flg_rd.c new file mode 100644 index 000000000000..c141ae14231a --- /dev/null +++ b/drivers/thirdparty/ice/ice_flg_rd.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#include "ice_common.h" +#include "ice_parser_util.h" + +#define ICE_FLG_RD_TABLE_SIZE 64 + +/** + * ice_flg_rd_dump - dump a flag redirect item info + * @hw: pointer to the hardware structure + * @item: flag redirect item to dump + */ +void ice_flg_rd_dump(struct ice_hw *hw, struct ice_flg_rd_item *item) +{ + dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx); + dev_info(ice_hw_to_dev(hw), "expose = %d\n", item->expose); + dev_info(ice_hw_to_dev(hw), "intr_flg_id = %d\n", item->intr_flg_id); +} + +/** The function parses a 8 bits Flag Redirect Table entry with below format: + * BIT 0: Expose (rdi->expose) + * BIT 1-6: Internal Flag ID (rdi->intr_flg_id) + * BIT 7: reserved + */ +static void _flg_rd_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int size) +{ + struct ice_flg_rd_item *rdi = (struct ice_flg_rd_item *)item; + u8 d8 = *(u8 *)data; + + rdi->idx = idx; + rdi->expose = (d8 & 0x1) != 0; + rdi->intr_flg_id = (u8)((d8 >> 1) & 0x3f); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_flg_rd_dump(hw, rdi); +} + +/** + * ice_flg_rd_table_get - create a flag redirect table + * @hw: pointer to the hardware structure + */ +struct ice_flg_rd_item *ice_flg_rd_table_get(struct ice_hw *hw) +{ + return (struct ice_flg_rd_item *) + ice_parser_create_table(hw, ICE_SID_RXPARSER_FLAG_REDIR, + sizeof(struct ice_flg_rd_item), + ICE_FLG_RD_TABLE_SIZE, + ice_parser_sect_item_get, + _flg_rd_parse_item, false); +} + +/** + * ice_flg_redirect - redirect a parser flag to packet flag + * @table: flag redirect table + * @psr_flg: parser flag to redirect + */ +u64 ice_flg_redirect(struct ice_flg_rd_item *table, u64 psr_flg) +{ + u64 flg = 0; + int i; + + for (i = 0; i < 64; i++) { + struct ice_flg_rd_item *item = &table[i]; + + if (!item->expose) + continue; + + if (psr_flg & (1ul << item->intr_flg_id)) + flg |= (1ul << i); + } + + return flg; +} diff --git a/drivers/thirdparty/ice/ice_flg_rd.h b/drivers/thirdparty/ice/ice_flg_rd.h new file mode 100644 index 000000000000..a2513e3a80f9 --- /dev/null +++ b/drivers/thirdparty/ice/ice_flg_rd.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_FLG_RD_H_ +#define _ICE_FLG_RD_H_ + +struct ice_flg_rd_item { + u16 idx; + bool expose; + u8 intr_flg_id; +}; + +void ice_flg_rd_dump(struct ice_hw *hw, struct ice_flg_rd_item *item); +struct ice_flg_rd_item *ice_flg_rd_table_get(struct ice_hw *hw); +u64 ice_flg_redirect(struct ice_flg_rd_item *table, u64 psr_flg); +#endif /* _ICE_FLG_RD_H_ */ diff --git a/drivers/thirdparty/ice/ice_flow.c b/drivers/thirdparty/ice/ice_flow.c index 74802e3d0754..b218d97516b3 100644 --- a/drivers/thirdparty/ice/ice_flow.c +++ b/drivers/thirdparty/ice/ice_flow.c @@ -3,7 +3,7 @@ #include "ice_common.h" #include "ice_flow.h" - +#include "ice_dcf.h" /* Size of known protocol header fields */ #define ICE_FLOW_FLD_SZ_ETH_TYPE 2 @@ -15,6 +15,10 @@ #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8 #define ICE_FLOW_FLD_SZ_IPV4_ID 2 #define ICE_FLOW_FLD_SZ_IPV6_ID 4 +#define ICE_FLOW_FLD_SZ_IP_CHKSUM 2 +#define ICE_FLOW_FLD_SZ_TCP_CHKSUM 2 +#define ICE_FLOW_FLD_SZ_UDP_CHKSUM 2 +#define ICE_FLOW_FLD_SZ_SCTP_CHKSUM 4 #define ICE_FLOW_FLD_SZ_IP_DSCP 1 #define ICE_FLOW_FLD_SZ_IP_TTL 1 #define ICE_FLOW_FLD_SZ_IP_PROT 1 @@ -34,6 +38,8 @@ #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4 #define ICE_FLOW_FLD_SZ_VXLAN_VNI 4 #define ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID 2 +#define ICE_FLOW_FLD_SZ_L2TPV2_SESS_ID 2 +#define ICE_FLOW_FLD_SZ_L2TPV2_LEN_SESS_ID 2 /* Describe properties of a protocol header field */ struct ice_flow_field_info { @@ -98,6 +104,8 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR), /* ICE_FLOW_FIELD_IDX_IPV6_DA */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR), + /* ICE_FLOW_FIELD_IDX_IPV4_CHKSUM */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 10, ICE_FLOW_FLD_SZ_IP_CHKSUM), /* ICE_FLOW_FIELD_IDX_IPV4_FRAG */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4, ICE_FLOW_FLD_SZ_IPV4_ID), @@ -137,6 +145,13 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT), /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS), + /* ICE_FLOW_FIELD_IDX_TCP_CHKSUM */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 16, ICE_FLOW_FLD_SZ_TCP_CHKSUM), + /* ICE_FLOW_FIELD_IDX_UDP_CHKSUM */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 6, ICE_FLOW_FLD_SZ_UDP_CHKSUM), + /* ICE_FLOW_FIELD_IDX_SCTP_CHKSUM */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 8, + ICE_FLOW_FLD_SZ_SCTP_CHKSUM), /* ARP */ /* ICE_FLOW_FIELD_IDX_ARP_SIP */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR), @@ -172,9 +187,15 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12, ICE_FLOW_FLD_SZ_GTP_TEID), + /* ICE_FLOW_FIELD_IDX_GTPU_UP_QFI */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_UP, 22, + ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00), /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12, ICE_FLOW_FLD_SZ_GTP_TEID), + /* ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_DWN, 22, + ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00), /* PPPOE */ /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2, @@ -210,6 +231,14 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { /* ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0, 12, ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID), + /* L2TPV2 */ + /* ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV2, 12, + ICE_FLOW_FLD_SZ_L2TPV2_SESS_ID), + /* L2TPV2_LEN */ + /* ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV2, 14, + ICE_FLOW_FLD_SZ_L2TPV2_LEN_SESS_ID), }; /* Bitmaps indicating relevant packet types for a particular protocol header @@ -220,7 +249,7 @@ static const u32 ice_ptypes_mac_ofos[] = { 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB, 0x0000077E, 0x000003FF, 0x00000000, 0x00000000, 0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00100707, - 0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000, + 0xFFFFF000, 0x000003FF, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -239,28 +268,28 @@ static const u32 ice_ptypes_macvlan_il[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; -/* Packet types for packets with an Outer/First/Single IPv4 header, does NOT - * include IPV4 other PTYPEs +/* Packet types for packets with an Outer/First/Single non-frag IPv4 header, + * does NOT include IPV4 other PTYPEs */ static const u32 ice_ptypes_ipv4_ofos[] = { - 0x1DC00000, 0x24000800, 0x00000000, 0x00000000, + 0x1D800000, 0xBFBF7800, 0x000001DF, 0x00000000, 0x00000000, 0x00000155, 0x00000000, 0x00000000, 0x00000000, 0x000FC000, 0x000002A0, 0x00100000, - 0x00001500, 0x00000000, 0x00000000, 0x00000000, + 0x00015000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; -/* Packet types for packets with an Outer/First/Single IPv4 header, includes - * IPV4 other PTYPEs +/* Packet types for packets with an Outer/First/Single non-frag IPv4 header, + * includes IPV4 other PTYPEs */ static const u32 ice_ptypes_ipv4_ofos_all[] = { - 0x1DC00000, 0x24000800, 0x00000000, 0x00000000, + 0x1D800000, 0x27BF7800, 0x00000000, 0x00000000, 0x00000000, 0x00000155, 0x00000000, 0x00000000, 0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101, - 0x03FFD500, 0x00000000, 0x00000000, 0x00000000, + 0x3FFD5000, 0x00000000, 0x02FBEFBC, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -272,35 +301,35 @@ static const u32 ice_ptypes_ipv4_il[] = { 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B, 0x0000000E, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x001FF800, 0x00100000, - 0xFC0FC000, 0x00000000, 0x00000000, 0x00000000, + 0xC0FC0000, 0x0000000F, 0xBC0BC0BC, 0x00000BC0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; -/* Packet types for packets with an Outer/First/Single IPv6 header, does NOT - * include IVP6 other PTYPEs +/* Packet types for packets with an Outer/First/Single non-frag IPv6 header, + * does NOT include IVP6 other PTYPEs */ static const u32 ice_ptypes_ipv6_ofos[] = { - 0x00000000, 0x00000000, 0x77000000, 0x10002000, + 0x00000000, 0x00000000, 0x76000000, 0x10002000, 0x00000000, 0x000002AA, 0x00000000, 0x00000000, 0x00000000, 0x03F00000, 0x00000540, 0x00000000, - 0x00002A00, 0x00000000, 0x00000000, 0x00000000, + 0x0002A000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; -/* Packet types for packets with an Outer/First/Single IPv6 header, includes - * IPV6 other PTYPEs +/* Packet types for packets with an Outer/First/Single non-frag IPv6 header, + * includes IPV6 other PTYPEs */ static const u32 ice_ptypes_ipv6_ofos_all[] = { - 0x00000000, 0x00000000, 0x77000000, 0x10002000, - 0x00000000, 0x000002AA, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x76000000, 0xFEFDE000, + 0x0000077E, 0x000002AA, 0x00000000, 0x00000000, 0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206, - 0xFC002A00, 0x0000003F, 0x00000000, 0x00000000, + 0xC002A000, 0x000003FF, 0xBC000000, 0x0002FBEF, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -312,16 +341,18 @@ static const u32 ice_ptypes_ipv6_il[] = { 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000, 0x00000770, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x7FE00000, 0x00000000, - 0x03F00000, 0x0000003F, 0x00000000, 0x00000000, + 0x3F000000, 0x000003F0, 0x02F02F00, 0x0002F02F, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; -/* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */ +/* Packet types for packets with an Outer/First/Single + * non-frag IPv4 header - no L4 + */ static const u32 ice_ptypes_ipv4_ofos_no_l4[] = { - 0x10C00000, 0x04000800, 0x00000000, 0x00000000, + 0x10800000, 0x04000800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -343,9 +374,11 @@ static const u32 ice_ptypes_ipv4_il_no_l4[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; -/* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */ +/* Packet types for packets with an Outer/First/Single + * non-frag IPv6 header - no L4 + */ static const u32 ice_ptypes_ipv6_ofos_no_l4[] = { - 0x00000000, 0x00000000, 0x43000000, 0x10002000, + 0x00000000, 0x00000000, 0x42000000, 0x10002000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -386,7 +419,7 @@ static const u32 ice_ptypes_udp_il[] = { 0x81000000, 0x20204040, 0x04000010, 0x80810102, 0x00000040, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00410000, 0x908427E0, 0x00100007, - 0x10410000, 0x00000004, 0x00000000, 0x00000000, + 0x0413F000, 0x00000041, 0x10410410, 0x00004104, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -398,7 +431,7 @@ static const u32 ice_ptypes_tcp_il[] = { 0x04000000, 0x80810102, 0x10000040, 0x02040408, 0x00000102, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00820000, 0x21084000, 0x00000000, - 0x20820000, 0x00000008, 0x00000000, 0x00000000, + 0x08200000, 0x00000082, 0x20820820, 0x00008208, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -410,7 +443,7 @@ static const u32 ice_ptypes_sctp_il[] = { 0x08000000, 0x01020204, 0x20000081, 0x04080810, 0x00000204, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x01040000, 0x00000000, 0x00000000, - 0x41040000, 0x00000010, 0x00000000, 0x00000000, + 0x10400000, 0x00000104, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -434,7 +467,7 @@ static const u32 ice_ptypes_icmp_il[] = { 0x00000000, 0x02040408, 0x40000102, 0x08101020, 0x00000408, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x42108000, 0x00000000, - 0x82080000, 0x00000020, 0x00000000, 0x00000000, + 0x20800000, 0x00000208, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -446,7 +479,7 @@ static const u32 ice_ptypes_gre_of[] = { 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000, 0x0000017E, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xBEFBEFBC, 0x0002FBEF, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -523,6 +556,46 @@ static const struct ice_ptype_attributes ice_attr_gtpu_session[] = { { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION }, { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION }, }; static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = { @@ -546,6 +619,46 @@ static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = { { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH }, }; static const struct ice_ptype_attributes ice_attr_gtpu_down[] = { @@ -569,6 +682,46 @@ static const struct ice_ptype_attributes ice_attr_gtpu_down[] = { { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK }, }; static const struct ice_ptype_attributes ice_attr_gtpu_up[] = { @@ -592,13 +745,53 @@ static const struct ice_ptype_attributes ice_attr_gtpu_up[] = { { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK }, }; static const u32 ice_ptypes_gtpu[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x0000003F, 0xBEFBEFBC, 0x0002FBEF, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -737,7 +930,7 @@ static const u32 ice_ptypes_l2tpv2[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000, + 0xFFFFF000, 0x000003FF, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -748,7 +941,7 @@ static const u32 ice_ptypes_ppp[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0xFFFFF000, 0x0000003F, 0x00000000, 0x00000000, + 0xFFFF0000, 0x000003FF, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -802,6 +995,7 @@ struct ice_flow_prof_params { ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \ ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \ ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP | \ + ICE_FLOW_SEG_HDR_VXLAN | ICE_FLOW_SEG_HDR_GRE | \ ICE_FLOW_SEG_HDR_ECPRI_TP0 | ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0 | \ ICE_FLOW_SEG_HDR_L2TPV2 | ICE_FLOW_SEG_HDR_PPP) @@ -820,7 +1014,7 @@ struct ice_flow_prof_params { * @segs: array of one or more packet segments that describe the flow * @segs_cnt: number of packet segments provided */ -static enum ice_status +static int ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) { u8 i; @@ -829,12 +1023,12 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) /* Multiple L3 headers */ if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK && !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK)) - return ICE_ERR_PARAM; + return -EINVAL; /* Multiple L4 headers */ if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK && !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) - return ICE_ERR_PARAM; + return -EINVAL; } return 0; @@ -895,7 +1089,7 @@ static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg) * This function identifies the packet types associated with the protocol * headers being present in packet segments of the specified flow profile. */ -static enum ice_status +static int ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) { struct ice_flow_prof *prof; @@ -1017,11 +1211,9 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) bitmap_and(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) { - if (!i) { - src = (const unsigned long *)ice_ptypes_gre_of; - bitmap_and(params->ptypes, params->ptypes, - src, ICE_FLOW_PTYPE_MAX); - } + src = (const unsigned long *)ice_ptypes_gre_of; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) { src = (const unsigned long *)ice_ptypes_gtpc; bitmap_and(params->ptypes, params->ptypes, src, @@ -1135,19 +1327,19 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) * This function will allocate an extraction sequence entries for a DWORD size * chunk of the packet flags. */ -static enum ice_status +static int ice_flow_xtract_pkt_flags(struct ice_hw *hw, struct ice_flow_prof_params *params, enum ice_flex_mdid_pkt_flags flags) { - u8 fv_words = hw->blk[params->blk].es.fvw; + u8 fv_words = (u8)hw->blk[params->blk].es.fvw; u8 idx; /* Make sure the number of extraction sequence entries required does not * exceed the block's capacity. */ if (params->es_cnt >= fv_words) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; /* some blocks require a reversed field vector layout */ if (hw->blk[params->blk].es.reverse) @@ -1156,7 +1348,7 @@ ice_flow_xtract_pkt_flags(struct ice_hw *hw, idx = params->es_cnt; params->es[idx].prot_id = ICE_PROT_META_ID; - params->es[idx].off = flags; + params->es[idx].off = (u16)flags; params->es_cnt++; return 0; @@ -1174,13 +1366,13 @@ ice_flow_xtract_pkt_flags(struct ice_hw *hw, * field. It then allocates one or more extraction sequence entries for the * given field, and fill the entries with protocol ID and offset information. */ -static enum ice_status +static int ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, u8 seg, enum ice_flow_field fld, u64 match) { enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX; + u8 fv_words = (u8)hw->blk[params->blk].es.fvw; enum ice_prot_id prot_id = ICE_PROT_ID_INVAL; - u8 fv_words = hw->blk[params->blk].es.fvw; struct ice_flow_fld_info *flds; u16 cnt, ese_bits, i; u16 sib_mask = 0; @@ -1208,7 +1400,10 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, case ICE_FLOW_FIELD_IDX_IPV4_TTL: case ICE_FLOW_FIELD_IDX_IPV4_PROT: prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; - + if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE && + params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU && + seg == 1) + prot_id = ICE_PROT_IPV4_IL_IL; /* TTL and PROT share the same extraction seq. entry. * Each is considered a sibling to the other in terms of sharing * the same extraction sequence entry. @@ -1227,7 +1422,10 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, case ICE_FLOW_FIELD_IDX_IPV6_TTL: case ICE_FLOW_FIELD_IDX_IPV6_PROT: prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; - + if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE && + params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU && + seg == 1) + prot_id = ICE_PROT_IPV6_IL_IL; /* TTL and PROT share the same extraction seq. entry. * Each is considered a sibling to the other in terms of sharing * the same extraction sequence entry. @@ -1245,7 +1443,12 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, break; case ICE_FLOW_FIELD_IDX_IPV4_SA: case ICE_FLOW_FIELD_IDX_IPV4_DA: + case ICE_FLOW_FIELD_IDX_IPV4_CHKSUM: prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; + if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE && + params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU && + seg == 1) + prot_id = ICE_PROT_IPV4_IL_IL; break; case ICE_FLOW_FIELD_IDX_IPV4_ID: prot_id = ICE_PROT_IPV4_OF_OR_S; @@ -1259,6 +1462,10 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA: case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA: prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; + if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE && + params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU && + seg == 1) + prot_id = ICE_PROT_IPV6_IL_IL; break; case ICE_FLOW_FIELD_IDX_IPV6_ID: prot_id = ICE_PROT_IPV6_FRAG; @@ -1266,14 +1473,17 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT: case ICE_FLOW_FIELD_IDX_TCP_DST_PORT: case ICE_FLOW_FIELD_IDX_TCP_FLAGS: + case ICE_FLOW_FIELD_IDX_TCP_CHKSUM: prot_id = ICE_PROT_TCP_IL; break; case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT: case ICE_FLOW_FIELD_IDX_UDP_DST_PORT: + case ICE_FLOW_FIELD_IDX_UDP_CHKSUM: prot_id = ICE_PROT_UDP_IL_OR_S; break; case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT: case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT: + case ICE_FLOW_FIELD_IDX_SCTP_CHKSUM: prot_id = ICE_PROT_SCTP_IL; break; case ICE_FLOW_FIELD_IDX_VXLAN_VNI: @@ -1283,6 +1493,10 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID: case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID: case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI: + case ICE_FLOW_FIELD_IDX_GTPU_UP_QFI: + case ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI: + case ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID: + case ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID: /* GTP is accessed through UDP OF protocol */ prot_id = ICE_PROT_UDP_OF; break; @@ -1331,7 +1545,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, prot_id = ICE_PROT_GRE_OF; break; default: - return ICE_ERR_NOT_IMPL; + return -EOPNOTSUPP; } /* Each extraction sequence entry is a word in size, and extracts a @@ -1339,7 +1553,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, */ ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE; - flds[fld].xtrct.prot_id = prot_id; + flds[fld].xtrct.prot_id = (u8)prot_id; flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) * ICE_FLOW_FV_EXTRACT_SZ; flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits); @@ -1369,7 +1583,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, * does not exceed the block's capability */ if (params->es_cnt >= fv_words) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; /* some blocks require a reversed field vector layout */ if (hw->blk[params->blk].es.reverse) @@ -1377,7 +1591,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, else idx = params->es_cnt; - params->es[idx].prot_id = prot_id; + params->es[idx].prot_id = (u8)prot_id; params->es[idx].off = off; params->mask[idx] = mask | sib_mask; params->es_cnt++; @@ -1395,7 +1609,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, * @params: information about the flow to be processed * @seg: index of packet segment whose raw fields are to be extracted */ -static enum ice_status +static int ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params, u8 seg) { @@ -1408,12 +1622,12 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params, if (params->prof->segs[seg].raws_cnt > ARRAY_SIZE(params->prof->segs[seg].raws)) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; /* Offsets within the segment headers are not supported */ hdrs_sz = ice_flow_calc_seg_sz(params, seg); if (!hdrs_sz) - return ICE_ERR_PARAM; + return -EINVAL; fv_words = hw->blk[params->blk].es.fvw; @@ -1445,7 +1659,7 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params, */ if (params->es_cnt >= hw->blk[params->blk].es.count || params->es_cnt >= ICE_MAX_FV_WORDS) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; /* some blocks require a reversed field vector layout */ if (hw->blk[params->blk].es.reverse) @@ -1471,11 +1685,11 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params, * This function iterates through all matched fields in the given segments, and * creates an extraction sequence for the fields. */ -static enum ice_status +static int ice_flow_create_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof_params *params) { - enum ice_status status = 0; + int status = 0; u8 i; /* For ACL, we also need to extract the direction bit (Rx,Tx) data from @@ -1517,14 +1731,14 @@ ice_flow_create_xtrct_seq(struct ice_hw *hw, * This function will return the specific scenario based on the * params passed to it */ -static enum ice_status +static int ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params) { /* Find the best-fit scenario for the provided match width */ struct ice_acl_scen *cand_scen = NULL, *scen; if (!hw->acl_tbl) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; /* Loop through each scenario and match against the scenario width * to select the specific scenario @@ -1534,7 +1748,7 @@ ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params) (!cand_scen || cand_scen->eff_width > scen->eff_width)) cand_scen = scen; if (!cand_scen) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; params->prof->cfg.scen = cand_scen; @@ -1545,7 +1759,7 @@ ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params) * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries * @params: information about the flow to be processed */ -static enum ice_status +static int ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params) { u16 index, i, range_idx = 0; @@ -1554,10 +1768,10 @@ ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params) for (i = 0; i < params->prof->segs_cnt; i++) { struct ice_flow_seg_info *seg = ¶ms->prof->segs[i]; - u8 j; + u16 j; for_each_set_bit(j, (unsigned long *)&seg->match, - ICE_FLOW_FIELD_IDX_MAX) { + (u16)ICE_FLOW_FIELD_IDX_MAX) { struct ice_flow_fld_info *fld = &seg->fields[j]; fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL; @@ -1569,12 +1783,12 @@ ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params) * words */ if (DIV_ROUND_UP(ice_flds_info[j].size + fld->xtrct.disp, BITS_PER_BYTE * 2) > 1) - return ICE_ERR_PARAM; + return -EINVAL; /* Ranges must define low and high values */ if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL || fld->src.last == ICE_FLOW_FLD_OFF_INVAL) - return ICE_ERR_PARAM; + return -EINVAL; fld->entry.val = range_idx++; } else { @@ -1604,13 +1818,13 @@ ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params) * larger. */ if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS) - return ICE_ERR_PARAM; + return -EINVAL; /* Only 8 range checkers per profile, reject anything trying to use * more */ if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG) - return ICE_ERR_PARAM; + return -EINVAL; /* Store # bytes required for entry for later use */ params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX; @@ -1623,10 +1837,10 @@ ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params) * @hw: pointer to the HW struct * @params: information about the flow to be processed */ -static enum ice_status +static int ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params) { - enum ice_status status; + int status; status = ice_flow_proc_seg_hdrs(params); if (status) @@ -1650,7 +1864,7 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params) return status; break; default: - return ICE_ERR_NOT_IMPL; + return -EOPNOTSUPP; } return status; @@ -1782,12 +1996,12 @@ ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry) * @prof_id: the profile ID handle * @hw_prof_id: pointer to variable to receive the HW profile ID */ -enum ice_status +int ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id, u8 *hw_prof_id) { - enum ice_status status = ICE_ERR_DOES_NOT_EXIST; struct ice_prof_map *map; + int status = -ENOENT; mutex_lock(&hw->blk[blk].es.prof_map_lock); map = ice_search_prof_id(hw, blk, prof_id); @@ -1807,16 +2021,16 @@ ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id, * @prof: pointer to flow profile * @buf: destination buffer function writes partial extraction sequence to * - * returns ICE_SUCCESS if no PF is associated to the given profile - * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile + * returns 0 if no PF is associated to the given profile + * returns -EBUSY if at least one PF is associated to the given profile * returns other error code for real error */ -static enum ice_status +static int ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof, struct ice_aqc_acl_prof_generic_frmt *buf) { - enum ice_status status; u8 prof_id = 0; + int status; status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id); if (status) @@ -1846,7 +2060,7 @@ ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof, buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN) return 0; - return ICE_ERR_IN_USE; + return -EBUSY; } /** @@ -1855,7 +2069,7 @@ ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof, * @acts: array of actions to be performed on a match * @acts_cnt: number of actions */ -static enum ice_status +static int ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts, u8 acts_cnt) { @@ -1866,7 +2080,7 @@ ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts, acts[i].type == ICE_FLOW_ACT_CNTR_BYTES || acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) { struct ice_acl_cntrs cntrs = { 0 }; - enum ice_status status; + int status; /* amount is unused in the dealloc path but the common * parameter check routine wants a value set, as zero @@ -1899,11 +2113,11 @@ ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts, * * Disassociate the scenario from the profile for the PF of the VSI. */ -static enum ice_status +static int ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof) { struct ice_aqc_acl_prof_generic_frmt buf; - enum ice_status status = 0; + int status = 0; u8 prof_id = 0; memset(&buf, 0, sizeof(buf)); @@ -1929,20 +2143,20 @@ ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof) * @blk: classification stage * @entry: flow entry to be removed */ -static enum ice_status +static int ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk, struct ice_flow_entry *entry) { if (!entry) - return ICE_ERR_BAD_PTR; + return -EINVAL; if (blk == ICE_BLK_ACL) { - enum ice_status status; + int status; if (ice_dcf_is_acl_capable(hw)) - return ICE_ERR_IN_USE; + return -EBUSY; if (!entry->prof) - return ICE_ERR_BAD_PTR; + return -EINVAL; status = ice_acl_rem_entry(hw, entry->prof->cfg.scen, entry->scen_entry_idx); @@ -1976,7 +2190,7 @@ ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk, * * Assumption: the caller has acquired the lock to the profile list */ -static enum ice_status +static int ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, @@ -1984,20 +2198,20 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, struct ice_flow_prof **prof) { struct ice_flow_prof_params *params; - enum ice_status status; + int status; u8 i; if (!prof || (acts_cnt && !acts)) - return ICE_ERR_BAD_PTR; + return -EINVAL; params = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params), GFP_KERNEL); if (!params) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; params->prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params->prof), GFP_KERNEL); if (!params->prof) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto free_params; } @@ -2027,12 +2241,11 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, GFP_KERNEL); if (!params->prof->acts) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto out; } } - status = ice_flow_proc_segs(hw, params); if (status) { ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n"); @@ -2040,9 +2253,9 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, } /* Add a HW profile for this flow profile */ - status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes, + status = ice_add_prof(hw, blk, prof_id, params->ptypes, params->attr, params->attr_cnt, params->es, - params->mask); + params->mask, true); if (status) { ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n"); goto out; @@ -2072,11 +2285,11 @@ free_params: * * Assumption: the caller has acquired the lock to the profile list */ -static enum ice_status +static int ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk, struct ice_flow_prof *prof) { - enum ice_status status; + int status; /* Remove all remaining flow entries before removing the flow profile */ if (!list_empty(&prof->entries)) { @@ -2107,7 +2320,7 @@ ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk, * used by any PF */ status = ice_flow_acl_is_prof_in_use(hw, prof, &buf); - if (status && status != ICE_ERR_IN_USE) { + if (status && status != -EBUSY) { return status; } else if (!status) { /* Clear the range-checker value for profile ID */ @@ -2166,13 +2379,13 @@ ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf, * @hw: pointer to the hardware structure * @prof: pointer to flow profile */ -static enum ice_status +static int ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof) { struct ice_aqc_acl_prof_generic_frmt buf; struct ice_flow_fld_info *info; - enum ice_status status; u8 prof_id = 0; + int status; u16 i; memset(&buf, 0, sizeof(buf)); @@ -2182,7 +2395,7 @@ ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof) return status; status = ice_flow_acl_is_prof_in_use(hw, prof, &buf); - if (status && status != ICE_ERR_IN_USE) + if (status && status != -EBUSY) return status; if (!status) { @@ -2235,14 +2448,14 @@ ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof) * be added has the same characteristics as the VSIG and will * thereby have access to all resources added to that VSIG. */ -enum ice_status +int ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle, u16 vsig) { - enum ice_status status; + int status; if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&hw->fl_profs_locks[blk]); status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle), @@ -2262,11 +2475,11 @@ ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle, * Assumption: the caller has acquired the lock to the profile list * and the software VSI handle has been validated */ -static enum ice_status +static int ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk, struct ice_flow_prof *prof, u16 vsi_handle) { - enum ice_status status = 0; + int status = 0; if (!test_bit(vsi_handle, prof->vsis)) { if (blk == ICE_BLK_ACL) { @@ -2298,11 +2511,11 @@ ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk, * Assumption: the caller has acquired the lock to the profile list * and the software VSI handle has been validated */ -static enum ice_status +static int ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk, struct ice_flow_prof *prof, u16 vsi_handle) { - enum ice_status status = 0; + int status = 0; if (test_bit(vsi_handle, prof->vsis)) { status = ice_rem_prof_id_flow(hw, blk, @@ -2319,6 +2532,90 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk, return status; } +#define FLAG_GTP_EH_PDU_LINK BIT_ULL(13) +#define FLAG_GTP_EH_PDU BIT_ULL(14) + +#define FLAG_GTPU_MSK \ + (FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK) +#define FLAG_GTPU_UP \ + (FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK) +#define FLAG_GTPU_DW \ + (FLAG_GTP_EH_PDU) +/** + * ice_flow_set_hw_prof - Set HW flow profile based on the parsed profile info + * @hw: pointer to the HW struct + * @dest_vsi_handle: dest VSI handle + * @fdir_vsi_handle: fdir programming VSI handle + * @prof: stores parsed profile info from raw flow + * @blk: classification stage + */ +int +ice_flow_set_hw_prof(struct ice_hw *hw, u16 dest_vsi_handle, + u16 fdir_vsi_handle, struct ice_parser_profile *prof, + enum ice_block blk) +{ + int id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX); + struct ice_flow_prof_params *params; + u8 fv_words = hw->blk[blk].es.fvw; + int status; + int i, idx; + + params = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params), GFP_KERNEL); + if (!params) + return -ENOMEM; + + for (i = 0; i < ICE_MAX_FV_WORDS; i++) { + params->es[i].prot_id = ICE_PROT_INVALID; + params->es[i].off = ICE_FV_OFFSET_INVAL; + } + + for (i = 0; i < prof->fv_num; i++) { + if (hw->blk[blk].es.reverse) + idx = fv_words - i - 1; + else + idx = i; + params->es[idx].prot_id = prof->fv[i].proto_id; + params->es[idx].off = prof->fv[i].offset; + params->mask[idx] = (((prof->fv[i].msk) << 8) & 0xff00) | + (((prof->fv[i].msk) >> 8) & 0x00ff); + } + + switch (prof->flags) { + case FLAG_GTPU_DW: + params->attr = ice_attr_gtpu_down; + params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down); + break; + case FLAG_GTPU_UP: + params->attr = ice_attr_gtpu_up; + params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up); + break; + default: + if (prof->flags_msk & FLAG_GTPU_MSK) { + params->attr = ice_attr_gtpu_session; + params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session); + } + break; + } + + status = ice_add_prof(hw, blk, id, prof->ptypes, + params->attr, params->attr_cnt, + params->es, params->mask, false); + if (status) + goto free_params; + + status = ice_flow_assoc_hw_prof(hw, blk, dest_vsi_handle, + fdir_vsi_handle, id); + if (status) + goto free_params; + + return 0; + +free_params: + devm_kfree(ice_hw_to_dev(hw), params); + + return status; +} + /** * ice_flow_add_prof - Add a flow profile for packet segments and matched fields * @hw: pointer to the HW struct @@ -2331,22 +2628,22 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk, * @acts_cnt: number of default actions * @prof: stores the returned flow profile added */ -enum ice_status +int ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, struct ice_flow_action *acts, u8 acts_cnt, struct ice_flow_prof **prof) { - enum ice_status status; + int status; if (segs_cnt > ICE_FLOW_SEG_MAX) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; if (!segs_cnt) - return ICE_ERR_PARAM; + return -EINVAL; if (!segs) - return ICE_ERR_BAD_PTR; + return -EINVAL; status = ice_flow_val_hdrs(segs, segs_cnt); if (status) @@ -2370,17 +2667,17 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, * @blk: the block for which the flow profile is to be removed * @prof_id: unique ID of the flow profile to be removed */ -enum ice_status +int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id) { struct ice_flow_prof *prof; - enum ice_status status; + int status; mutex_lock(&hw->fl_profs_locks[blk]); prof = ice_flow_find_prof_id(hw, blk, prof_id); if (!prof) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto out; } @@ -2438,7 +2735,7 @@ u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id) * @acts_cnt: number of actions * @cnt_alloc: indicates if an ACL counter has been allocated. */ -static enum ice_status +static int ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts, u8 acts_cnt, bool *cnt_alloc) { @@ -2449,20 +2746,20 @@ ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts, *cnt_alloc = false; if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; for (i = 0; i < acts_cnt; i++) { if (acts[i].type != ICE_FLOW_ACT_NOP && acts[i].type != ICE_FLOW_ACT_DROP && acts[i].type != ICE_FLOW_ACT_CNTR_PKT && acts[i].type != ICE_FLOW_ACT_FWD_QUEUE) - return ICE_ERR_CFG; + return -EIO; /* If the caller want to add two actions of the same type, then * it is considered invalid configuration. */ - if (test_and_set_bit(acts[i].type, dup_check)) - return ICE_ERR_PARAM; + if (test_and_set_bit((u16)acts[i].type, dup_check)) + return -EINVAL; } /* Checks if ACL counters are needed. */ @@ -2471,7 +2768,7 @@ ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts, acts[i].type == ICE_FLOW_ACT_CNTR_BYTES || acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) { struct ice_acl_cntrs cntrs = { 0 }; - enum ice_status status; + int status; cntrs.amount = 1; cntrs.bank = 0; /* Only bank0 for the moment */ @@ -2522,7 +2819,7 @@ ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info, (*(u16 *)(data + info->src.last)) << info->xtrct.disp; u16 new_low = (*(u16 *)(data + info->src.val)) << info->xtrct.disp; - u8 range_idx = info->entry.val; + u8 range_idx = (u8)info->entry.val; range_buf->checker_cfg[range_idx].low_boundary = cpu_to_be16(new_low); @@ -2620,17 +2917,17 @@ ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf, * along with data from the flow profile. This key/key_inverse pair makes up * the 'entry' for an ACL flow entry. */ -static enum ice_status +static int ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof, struct ice_flow_entry *e, u8 *data, struct ice_flow_action *acts, u8 acts_cnt) { u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk; struct ice_aqc_acl_profile_ranges *range_buf = NULL; - enum ice_status status; bool cnt_alloc; u8 prof_id = 0; u16 i, buf_sz; + int status; status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id); if (status) @@ -2642,7 +2939,7 @@ ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof, if (status) return status; - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; e->acts = devm_kmemdup(ice_hw_to_dev(hw), acts, acts_cnt * sizeof(*acts), GFP_KERNEL); @@ -2679,10 +2976,10 @@ ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof, for (i = 0; i < prof->segs_cnt; i++) { struct ice_flow_seg_info *seg = &prof->segs[i]; - u8 j; + u16 j; for_each_set_bit(j, (unsigned long *)&seg->match, - ICE_FLOW_FIELD_IDX_MAX) { + (u16)ICE_FLOW_FIELD_IDX_MAX) { struct ice_flow_fld_info *info = &seg->fields[j]; if (info->type == ICE_FLOW_FLD_TYPE_RANGE) @@ -2723,7 +3020,6 @@ ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof, /* Format the buffer for direction flags */ dir_flag_msk = BIT(ICE_FLG_PKT_DIR); - if (prof->dir == ICE_FLOW_RX) buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk; @@ -2888,14 +3184,14 @@ ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p) * For this function, we do the union between dst_buf and src_buf * range checker buffer, and we will save the result back to dst_buf */ -static enum ice_status +static int ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf, struct ice_aqc_acl_profile_ranges *src_buf) { u8 i, j; if (!dst_buf || !src_buf) - return ICE_ERR_BAD_PTR; + return -EINVAL; for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) { struct ice_acl_rng_data *cfg_data = NULL, *in_data; @@ -2922,7 +3218,7 @@ ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf, sizeof(struct ice_acl_rng_data)); } else { /* No available slot left to program range checker */ - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; } } @@ -2939,7 +3235,7 @@ ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf, * corresponding ACL scenario. Then, we will perform matching logic to * see if we want to add/modify/do nothing with this new entry. */ -static enum ice_status +static int ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof, struct ice_flow_entry **entry) { @@ -2947,12 +3243,12 @@ ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof, struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf; struct ice_acl_act_entry *acts = NULL; struct ice_flow_entry *exist; - enum ice_status status = 0; struct ice_flow_entry *e; + int status = 0; u8 i; if (!entry || !(*entry) || !prof) - return ICE_ERR_BAD_PTR; + return -EINVAL; e = *entry; @@ -3006,7 +3302,7 @@ ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof, acts = devm_kcalloc(ice_hw_to_dev(hw), e->entry_sz, sizeof(struct ice_acl_act_entry), GFP_KERNEL); if (!acts) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; for (i = 0; i < e->acts_cnt; i++) memcpy(&acts[i], &e->acts[i].data.acl_act, @@ -3041,7 +3337,7 @@ ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof, sizeof(struct ice_flow_action), GFP_KERNEL); if (!exist->acts) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto out; } @@ -3083,11 +3379,11 @@ out: * @prof: pointer to flow profile * @e: double pointer to the flow entry */ -static enum ice_status +static int ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof, struct ice_flow_entry **e) { - enum ice_status status; + int status; mutex_lock(&prof->entries_lock); status = ice_flow_acl_add_scen_entry_sync(hw, prof, e); @@ -3109,7 +3405,7 @@ ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof, * @acts_cnt: number of actions * @entry_h: pointer to buffer that receives the new flow entry's handle */ -enum ice_status +int ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio, void *data, struct ice_flow_action *acts, u8 acts_cnt, @@ -3117,32 +3413,31 @@ ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, { struct ice_flow_entry *e = NULL; struct ice_flow_prof *prof; - enum ice_status status = 0; + int status = 0; /* ACL entries must indicate an action */ if (blk == ICE_BLK_ACL && (!acts || !acts_cnt)) - return ICE_ERR_PARAM; - + return -EINVAL; /* No flow entry data is expected for RSS */ if (!entry_h || (!data && blk != ICE_BLK_RSS)) - return ICE_ERR_BAD_PTR; + return -EINVAL; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&hw->fl_profs_locks[blk]); prof = ice_flow_find_prof_id(hw, blk, prof_id); if (!prof) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; } else { /* Allocate memory for the entry being added and associate * the VSI to the found flow profile */ e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL); if (!e) - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; else status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); } @@ -3163,7 +3458,7 @@ ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, case ICE_BLK_ACL: /* ACL will handle the entry management */ if (ice_dcf_is_acl_capable(hw)) { - status = ICE_ERR_IN_USE; + status = -EBUSY; goto out; } status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts, @@ -3177,7 +3472,7 @@ ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, break; default: - status = ICE_ERR_NOT_IMPL; + status = -EOPNOTSUPP; goto out; } @@ -3206,15 +3501,15 @@ out: * @blk: classification stage * @entry_h: handle to the flow entry to be removed */ -enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, +int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h) { struct ice_flow_entry *entry; struct ice_flow_prof *prof; - enum ice_status status = 0; + int status = 0; if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL) - return ICE_ERR_PARAM; + return -EINVAL; entry = ICE_FLOW_ENTRY_PTR(entry_h); @@ -3377,21 +3672,21 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, * This function removes the flow entries associated to the input * vsi handle and disassociates the vsi from the flow profile. */ -enum ice_status ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle, +int ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle, u64 prof_id) { struct ice_flow_prof *prof = NULL; - enum ice_status status = 0; + int status = 0; if (blk >= ICE_BLK_COUNT || !ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; /* find flow profile pointer with input package block and profile id */ prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id); if (!prof) { ice_debug(hw, ICE_DBG_PKG, "Cannot find flow profile id=%llu\n", prof_id); - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } /* Remove all remaining flow entries before removing the flow profile */ @@ -3445,19 +3740,19 @@ enum ice_status ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk, u16 * header value to set flow field segment for further use in flow * profile entry or removal. */ -static enum ice_status +static int ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt, const struct ice_rss_hash_cfg *cfg) { struct ice_flow_seg_info *seg; u64 val; - u8 i; + u16 i; /* set inner most segment */ seg = &segs[seg_cnt - 1]; for_each_set_bit(i, (const unsigned long *)&cfg->hash_flds, - ICE_FLOW_FIELD_IDX_MAX) + (u16)ICE_FLOW_FIELD_IDX_MAX) ice_flow_set_fld(seg, (enum ice_flow_field)i, ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); @@ -3473,19 +3768,27 @@ ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt, segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_FRAG | ICE_FLOW_SEG_HDR_IPV_OTHER; + else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE) + segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_GRE | + ICE_FLOW_SEG_HDR_IPV_OTHER; + else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE) + segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_GRE | + ICE_FLOW_SEG_HDR_IPV_OTHER; if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS & ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER & ~ICE_FLOW_SEG_HDR_IPV_FRAG) - return ICE_ERR_PARAM; + return -EINVAL; val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS); if (val && !is_power_of_2(val)) - return ICE_ERR_CFG; + return -EIO; val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS); if (val && !is_power_of_2(val)) - return ICE_ERR_CFG; + return -EIO; return 0; } @@ -3523,14 +3826,14 @@ void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle) * the VSI from that profile. If the flow profile has no VSIs it will * be removed. */ -enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) +int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) { const enum ice_block blk = ICE_BLK_RSS; struct ice_flow_prof *p, *t; - enum ice_status status = 0; + int status = 0; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; if (list_empty(&hw->fl_profs[blk])) return 0; @@ -3616,7 +3919,7 @@ ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) * * Assumption: lock has already been acquired for RSS list */ -static enum ice_status +static int ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) { enum ice_rss_cfg_hdr_type hdr_type; @@ -3634,7 +3937,7 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg), GFP_KERNEL); if (!rss_cfg) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match; rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs; @@ -3773,6 +4076,119 @@ ice_rss_update_symm(struct ice_hw *hw, } } +/** + * ice_rss_cfg_raw_symm - configure symmetric hash parameters + * for raw pattern + * @hw: pointer to the hardware structure + * @prof: pointer to parser profile + * @prof_id: profile ID + * + * Calculate symmetric hash parameters based on input protocol type. + */ +static void +ice_rss_cfg_raw_symm(struct ice_hw *hw, + struct ice_parser_profile *prof, u64 prof_id) +{ + u8 src_idx, dst_idx, proto_id; + int len, i = 0; + + while (i < prof->fv_num) { + proto_id = prof->fv[i].proto_id; + + switch (proto_id) { + case ICE_PROT_IPV4_OF_OR_S: + case ICE_PROT_IPV4_IL: + case ICE_PROT_IPV4_IL_IL: + len = ICE_FLOW_FLD_SZ_IPV4_ADDR / + ICE_FLOW_FV_EXTRACT_SZ; + if (prof->fv[i].offset == + ICE_FLOW_FIELD_IPV4_SRC_OFFSET && + prof->fv[i + len].proto_id == proto_id && + prof->fv[i + len].offset == + ICE_FLOW_FIELD_IPV4_DST_OFFSET) { + src_idx = i; + dst_idx = i + len; + i += 2 * len; + break; + } + i++; + continue; + case ICE_PROT_IPV6_OF_OR_S: + case ICE_PROT_IPV6_IL: + case ICE_PROT_IPV6_IL_IL: + len = ICE_FLOW_FLD_SZ_IPV6_ADDR / + ICE_FLOW_FV_EXTRACT_SZ; + if (prof->fv[i].offset == + ICE_FLOW_FIELD_IPV6_SRC_OFFSET && + prof->fv[i + len].proto_id == proto_id && + prof->fv[i + len].offset == + ICE_FLOW_FIELD_IPV6_DST_OFFSET) { + src_idx = i; + dst_idx = i + len; + i += 2 * len; + break; + } + i++; + continue; + case ICE_PROT_TCP_IL: + case ICE_PROT_UDP_IL_OR_S: + case ICE_PROT_SCTP_IL: + len = ICE_FLOW_FLD_SZ_PORT / + ICE_FLOW_FV_EXTRACT_SZ; + if (prof->fv[i].offset == + ICE_FLOW_FIELD_SRC_PORT_OFFSET && + prof->fv[i + len].proto_id == proto_id && + prof->fv[i + len].offset == + ICE_FLOW_FIELD_DST_PORT_OFFSET) { + src_idx = i; + dst_idx = i + len; + i += 2 * len; + break; + } + i++; + continue; + default: + i++; + continue; + } + ice_rss_config_xor(hw, prof_id, src_idx, dst_idx, len); + } +} + +/* Max registers index per packet profile */ +#define ICE_SYMM_REG_INDEX_MAX 6 + +/** + * ice_rss_update_raw_symm - update symmetric hash configuration + * for raw pattern + * @hw: pointer to the hardware structure + * @cfg: configure parameters for raw pattern + * @id: profile tracking ID + * + * Update symmetric hash configuration for raw pattern if required. + * Otherwise only clear to default. + */ +void +ice_rss_update_raw_symm(struct ice_hw *hw, + struct ice_rss_raw_cfg *cfg, u64 id) +{ + struct ice_prof_map *map; + u8 prof_id, m; + + mutex_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock); + map = ice_search_prof_id(hw, ICE_BLK_RSS, id); + if (map) + prof_id = map->prof_id; + mutex_unlock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock); + if (!map) + return; + /* clear to default */ + for (m = 0; m < ICE_SYMM_REG_INDEX_MAX; m++) + wr32(hw, GLQF_HSYMM(prof_id, m), 0); + if (cfg->symm) + ice_rss_cfg_raw_symm(hw, &cfg->prof, prof_id); +} + /** * ice_add_rss_cfg_sync - add an RSS configuration * @hw: pointer to the hardware structure @@ -3781,16 +4197,15 @@ ice_rss_update_symm(struct ice_hw *hw, * * Assumption: lock has already been acquired for RSS list */ -static enum ice_status +static int ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, const struct ice_rss_hash_cfg *cfg) { const enum ice_block blk = ICE_BLK_RSS; struct ice_flow_prof *prof = NULL; struct ice_flow_seg_info *segs; - enum ice_status status; u8 segs_cnt; - + int status; segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ? ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX; @@ -3798,7 +4213,7 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, segs = devm_kcalloc(ice_hw_to_dev(hw), segs_cnt, sizeof(*segs), GFP_KERNEL); if (!segs) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Construct the packet segment info from the hashed fields */ status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg); @@ -3859,7 +4274,7 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, /* if a profile exist but with different symmetric * requirement, just return error. */ - status = ICE_ERR_NOT_SUPPORTED; + status = -EOPNOTSUPP; } goto exit; } @@ -3905,17 +4320,17 @@ exit: * the input fields to hash on, the flow type and use the VSI number to add * a flow entry to the profile. */ -enum ice_status +int ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, const struct ice_rss_hash_cfg *cfg) { struct ice_rss_hash_cfg local_cfg; - enum ice_status status; + int status; if (!ice_is_vsi_valid(hw, vsi_handle) || !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS || cfg->hash_flds == ICE_HASH_INVALID) - return ICE_ERR_PARAM; + return -EINVAL; local_cfg = *cfg; if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) { @@ -3945,22 +4360,22 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, * * Assumption: lock has already been acquired for RSS list */ -static enum ice_status +static int ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, const struct ice_rss_hash_cfg *cfg) { const enum ice_block blk = ICE_BLK_RSS; struct ice_flow_seg_info *segs; struct ice_flow_prof *prof; - enum ice_status status; u8 segs_cnt; + int status; segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ? ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX; segs = devm_kcalloc(ice_hw_to_dev(hw), segs_cnt, sizeof(*segs), GFP_KERNEL); if (!segs) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Construct the packet segment info from the hashed fields */ status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg); @@ -3971,7 +4386,7 @@ ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, vsi_handle, ICE_FLOW_FIND_PROF_CHK_FLDS); if (!prof) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto out; } @@ -4004,17 +4419,17 @@ out: * removed. Calls are made to underlying flow apis which will in * turn build or update buffers for RSS XLT1 section. */ -enum ice_status +int ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, const struct ice_rss_hash_cfg *cfg) { struct ice_rss_hash_cfg local_cfg; - enum ice_status status; + int status; if (!ice_is_vsi_valid(hw, vsi_handle) || !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS || cfg->hash_flds == ICE_HASH_INVALID) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&hw->rss_locks); local_cfg = *cfg; @@ -4077,21 +4492,21 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, * message, convert it to ICE-compatible values, and configure RSS flow * profiles. */ -enum ice_status +int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) { - enum ice_status status = 0; struct ice_rss_hash_cfg hcfg; + int status = 0; u64 hash_flds; if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID || !ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; /* Make sure no unsupported bits are specified */ if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS | ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)) - return ICE_ERR_CFG; + return -EIO; hash_flds = avf_hash; @@ -4151,7 +4566,7 @@ ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) } if (rss_hash == ICE_HASH_INVALID) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; hcfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE; hcfg.hash_flds = rss_hash; @@ -4170,13 +4585,13 @@ ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle */ -enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) +int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) { - enum ice_status status = 0; struct ice_rss_cfg *r; + int status = 0; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&hw->rss_locks); list_for_each_entry(r, &hw->rss_list_head, l_entry) { diff --git a/drivers/thirdparty/ice/ice_flow.h b/drivers/thirdparty/ice/ice_flow.h index d41fcfff3bd3..ca299adac179 100644 --- a/drivers/thirdparty/ice/ice_flow.h +++ b/drivers/thirdparty/ice/ice_flow.h @@ -148,6 +148,23 @@ #define ICE_FLOW_HASH_NAT_T_ESP_IPV6_SPI \ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_NAT_T_ESP_SPI) +#define ICE_FLOW_HASH_L2TPV2_SESS_ID \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID)) +#define ICE_FLOW_HASH_L2TPV2_SESS_ID_ETH \ + (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_L2TPV2_SESS_ID) + +#define ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID)) +#define ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID_ETH \ + (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID) + +#define ICE_FLOW_FIELD_IPV4_SRC_OFFSET 12 +#define ICE_FLOW_FIELD_IPV4_DST_OFFSET 16 +#define ICE_FLOW_FIELD_IPV6_SRC_OFFSET 8 +#define ICE_FLOW_FIELD_IPV6_DST_OFFSET 24 +#define ICE_FLOW_FIELD_SRC_PORT_OFFSET 0 +#define ICE_FLOW_FIELD_DST_PORT_OFFSET 2 + /* Protocol header fields within a packet segment. A segment consists of one or * more protocol headers that make up a logical group of protocol headers. Each * logical group of protocol headers encapsulates or is encapsulated using/by @@ -227,6 +244,7 @@ enum ice_flow_field { ICE_FLOW_FIELD_IDX_IPV4_DA, ICE_FLOW_FIELD_IDX_IPV6_SA, ICE_FLOW_FIELD_IDX_IPV6_DA, + ICE_FLOW_FIELD_IDX_IPV4_CHKSUM, ICE_FLOW_FIELD_IDX_IPV4_ID, ICE_FLOW_FIELD_IDX_IPV6_ID, ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA, @@ -243,6 +261,9 @@ enum ice_flow_field { ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_FLAGS, + ICE_FLOW_FIELD_IDX_TCP_CHKSUM, + ICE_FLOW_FIELD_IDX_UDP_CHKSUM, + ICE_FLOW_FIELD_IDX_SCTP_CHKSUM, /* ARP */ ICE_FLOW_FIELD_IDX_ARP_SIP, ICE_FLOW_FIELD_IDX_ARP_DIP, @@ -263,8 +284,10 @@ enum ice_flow_field { ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, /* GTPU_UP */ ICE_FLOW_FIELD_IDX_GTPU_UP_TEID, + ICE_FLOW_FIELD_IDX_GTPU_UP_QFI, /* GTPU_DWN */ ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID, + ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI, /* PPPOE */ ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID, /* PFCP */ @@ -283,11 +306,14 @@ enum ice_flow_field { ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID, /* UDP_ECPRI_TP0 */ ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID, + /* L2TPV2 SESSION ID*/ + ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID, + /* L2TPV2_LEN SESSION ID */ + ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID, /* The total number of enums must not exceed 64 */ ICE_FLOW_FIELD_IDX_MAX }; - /* Flow headers and fields for AVF support */ enum ice_flow_avf_hdr_field { /* Values 0 - 28 are reserved for future use */ @@ -348,6 +374,10 @@ enum ice_rss_cfg_hdr_type { /* take inner headers as inputset for packet with outer ipv6. */ ICE_RSS_INNER_HEADERS_W_OUTER_IPV6, /* take outer headers first then inner headers as inputset */ + /* take inner as inputset for GTPoGRE with outer ipv4 + gre. */ + ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE, + /* take inner as inputset for GTPoGRE with outer ipv6 + gre. */ + ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE, ICE_RSS_ANY_HEADERS }; @@ -389,7 +419,6 @@ struct ice_flow_seg_xtrct { u16 mask; /* Mask for field */ }; - enum ice_flow_fld_match_type { ICE_FLOW_FLD_TYPE_REG, /* Value, mask */ ICE_FLOW_FLD_TYPE_RANGE, /* Value, mask, last (upper bound) */ @@ -452,8 +481,8 @@ struct ice_flow_entry { u8 acts_cnt; }; -#define ICE_FLOW_ENTRY_HNDL(e) ((u64)e) -#define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(h)) +#define ICE_FLOW_ENTRY_HNDL(e) ((u64)(uintptr_t)(e)) +#define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(uintptr_t)(h)) struct ice_flow_prof { struct list_head l_entry; @@ -484,11 +513,18 @@ struct ice_flow_prof { struct ice_flow_action *acts; }; +struct ice_rss_raw_cfg { + struct ice_parser_profile prof; + bool raw_ena; + bool symm; +}; + struct ice_rss_cfg { struct list_head l_entry; /* bitmap of VSIs added to the RSS entry */ DECLARE_BITMAP(vsis, ICE_MAX_VSI); struct ice_rss_hash_cfg hash; + struct ice_rss_raw_cfg raw; }; enum ice_flow_action_type { @@ -525,26 +561,30 @@ struct ice_flow_action { u64 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, struct ice_flow_seg_info *segs, u8 segs_cnt); -enum ice_status +int ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, struct ice_flow_action *acts, u8 acts_cnt, struct ice_flow_prof **prof); -enum ice_status +int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id); -enum ice_status +int +ice_flow_set_hw_prof(struct ice_hw *hw, u16 dest_vsi_handle, + u16 fdir_vsi_handle, struct ice_parser_profile *prof, + enum ice_block blk); +int ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle, u16 vsig); -enum ice_status +int ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id, u8 *hw_prof); u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id); -enum ice_status +int ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, u64 entry_id, u16 vsi, enum ice_flow_priority prio, void *data, struct ice_flow_action *acts, u8 acts_cnt, u64 *entry_h); -enum ice_status +int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h); void ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld, @@ -555,18 +595,20 @@ ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld, void ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, u16 val_loc, u16 mask_loc); -enum ice_status ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk, +int ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle, u64 prof_id); void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle); -enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle); -enum ice_status +int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle); +int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds); -enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle); -enum ice_status +int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle); +int ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, const struct ice_rss_hash_cfg *cfg); -enum ice_status +int ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, const struct ice_rss_hash_cfg *cfg); +void ice_rss_update_raw_symm(struct ice_hw *hw, + struct ice_rss_raw_cfg *cfg, u64 id); u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs); #endif /* _ICE_FLOW_H_ */ diff --git a/drivers/thirdparty/ice/ice_fltr.c b/drivers/thirdparty/ice/ice_fltr.c index 3acf5656479f..8753806fda64 100644 --- a/drivers/thirdparty/ice/ice_fltr.c +++ b/drivers/thirdparty/ice/ice_fltr.c @@ -54,10 +54,18 @@ ice_fltr_add_entry_to_list(struct device *dev, struct ice_fltr_info *info, * * Set VSI with all associated VLANs to given promiscuous mode(s) */ -enum ice_status +int ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, u8 promisc_mask) { - return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false); + struct ice_pf *pf = (struct ice_pf *)hw->back; + int result; + + result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false); + if (result && result != -EEXIST) + dev_err(ice_pf_to_dev(pf), + "Error setting promisc mode on VSI %i (rc=%d\n", + vsi->vsi_num, result); + return result; } /** @@ -68,10 +76,18 @@ ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, u8 promisc * * Clear VSI with all associated VLANs to given promiscuous mode(s) */ -enum ice_status +int ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, u8 promisc_mask) { - return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true); + struct ice_pf *pf = (struct ice_pf *)hw->back; + int result; + + result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true); + if (result && result != -EEXIST) + dev_err(ice_pf_to_dev(pf), + "Error setting promisc mode on VSI %i (rc=%d\n", + vsi->vsi_num, result); + return result; } /** @@ -82,11 +98,19 @@ ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, u8 promi * @vid: VLAN ID to clear VLAN promiscuous * @lport: logical port number to clear mode */ -enum ice_status +int ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid, u8 lport) { - return ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid); + struct ice_pf *pf = (struct ice_pf *)hw->back; + int result; + + result = ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid); + if (result && result != -EEXIST) + dev_err(ice_pf_to_dev(pf), + "Error clearing promisc mode on VSI %i for VID %u (rc=%d)\n", + ice_get_hw_vsi_num(hw, vsi_handle), vid, result); + return result; } /** @@ -97,11 +121,19 @@ ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, * @vid: VLAN ID to set VLAN promiscuous * @lport: logical port number to set promiscuous mode */ -enum ice_status +int ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid, u8 lport) { - return ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid); + struct ice_pf *pf = (struct ice_pf *)hw->back; + int result; + + result = ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid); + if (result && result != -EEXIST) + dev_err(ice_pf_to_dev(pf), + "Error setting promisc mode on VSI %i for VID %u (rc=%d)\n", + ice_get_hw_vsi_num(hw, vsi_handle), vid, result); + return result; } /** @@ -109,8 +141,7 @@ ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, * @vsi: pointer to VSI struct * @list: list of filters */ -enum ice_status -ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list) +int ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list) { return ice_add_mac(&vsi->back->hw, list); } @@ -120,8 +151,7 @@ ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list) * @vsi: pointer to VSI struct * @list: list of filters */ -enum ice_status -ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list) +int ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list) { return ice_remove_mac(&vsi->back->hw, list); } @@ -131,8 +161,7 @@ ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list) * @vsi: pointer to VSI struct * @list: list of filters */ -static enum ice_status -ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list) +static int ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list) { return ice_add_vlan(&vsi->back->hw, list); } @@ -142,7 +171,7 @@ ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list) * @vsi: pointer to VSI struct * @list: list of filters */ -static enum ice_status +static int ice_fltr_remove_vlan_list(struct ice_vsi *vsi, struct list_head *list) { return ice_remove_vlan(&vsi->back->hw, list); @@ -153,7 +182,7 @@ ice_fltr_remove_vlan_list(struct ice_vsi *vsi, struct list_head *list) * @vsi: pointer to VSI struct * @list: list of filters */ -static enum ice_status +static int ice_fltr_add_mac_vlan_list(struct ice_vsi *vsi, struct list_head *list) { return ice_add_mac_vlan(&vsi->back->hw, list); @@ -164,7 +193,7 @@ ice_fltr_add_mac_vlan_list(struct ice_vsi *vsi, struct list_head *list) * @vsi: pointer to VSI struct * @list: list of filters */ -static enum ice_status +static int ice_fltr_remove_mac_vlan_list(struct ice_vsi *vsi, struct list_head *list) { return ice_remove_mac_vlan(&vsi->back->hw, list); @@ -175,8 +204,7 @@ ice_fltr_remove_mac_vlan_list(struct ice_vsi *vsi, struct list_head *list) * @vsi: pointer to VSI struct * @list: list of filters */ -static enum ice_status -ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list) +static int ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list) { return ice_add_eth_mac(&vsi->back->hw, list); } @@ -186,8 +214,7 @@ ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list) * @vsi: pointer to VSI struct * @list: list of filters */ -static enum ice_status -ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list) +static int ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list) { return ice_remove_eth_mac(&vsi->back->hw, list); } @@ -321,18 +348,17 @@ ice_fltr_add_eth_to_list(struct ice_vsi *vsi, struct list_head *list, * @action: action to be performed on filter match * @mac_action: pointer to add or remove MAC function */ -static enum ice_status +static int ice_fltr_prepare_mac(struct ice_vsi *vsi, const u8 *mac, enum ice_sw_fwd_act_type action, - enum ice_status (*mac_action)(struct ice_vsi *, - struct list_head *)) + int (*mac_action)(struct ice_vsi *, struct list_head *)) { - enum ice_status result; LIST_HEAD(tmp_list); + int result; if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action)) { ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } result = mac_action(vsi, &tmp_list); @@ -347,21 +373,21 @@ ice_fltr_prepare_mac(struct ice_vsi *vsi, const u8 *mac, * @action: action to be performed on filter match * @mac_action: pointer to add or remove MAC function */ -static enum ice_status +static int ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, enum ice_sw_fwd_act_type action, - enum ice_status(*mac_action) + int(*mac_action) (struct ice_vsi *, struct list_head *)) { u8 broadcast[ETH_ALEN]; - enum ice_status result; LIST_HEAD(tmp_list); + int result; eth_broadcast_addr(broadcast); if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action) || ice_fltr_add_mac_to_list(vsi, &tmp_list, broadcast, action)) { ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } result = mac_action(vsi, &tmp_list); @@ -375,16 +401,15 @@ ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, * @vlan: VLAN filter details * @vlan_action: pointer to add or remove VLAN function */ -static enum ice_status +static int ice_fltr_prepare_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan, - enum ice_status (*vlan_action)(struct ice_vsi *, - struct list_head *)) + int (*vlan_action)(struct ice_vsi *, struct list_head *)) { - enum ice_status result; LIST_HEAD(tmp_list); + int result; if (ice_fltr_add_vlan_to_list(vsi, &tmp_list, vlan)) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; result = vlan_action(vsi, &tmp_list); ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); @@ -399,18 +424,18 @@ ice_fltr_prepare_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan, * @action: action to be performed on filter match * @mac_vlan_action: pointer to add or remove MAC VLAN function */ -static enum ice_status +static int ice_fltr_prepare_mac_vlan(struct ice_vsi *vsi, const u8 *mac, u16 vlan_id, enum ice_sw_fwd_act_type action, - enum ice_status (mac_vlan_action)(struct ice_vsi *, - struct list_head *)) + int (mac_vlan_action)(struct ice_vsi *, + struct list_head *)) { - enum ice_status result; LIST_HEAD(tmp_list); + int result; if (ice_fltr_add_mac_vlan_to_list(vsi, &tmp_list, mac, vlan_id, action)) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; result = mac_vlan_action(vsi, &tmp_list); ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); @@ -425,17 +450,16 @@ ice_fltr_prepare_mac_vlan(struct ice_vsi *vsi, const u8 *mac, u16 vlan_id, * @action: action to be performed on filter match * @eth_action: pointer to add or remove ethertype function */ -static enum ice_status +static int ice_fltr_prepare_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, enum ice_sw_fwd_act_type action, - enum ice_status (*eth_action)(struct ice_vsi *, - struct list_head *)) + int (*eth_action)(struct ice_vsi *, struct list_head *)) { - enum ice_status result; LIST_HEAD(tmp_list); + int result; if (ice_fltr_add_eth_to_list(vsi, &tmp_list, ethertype, flag, action)) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; result = eth_action(vsi, &tmp_list); ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); @@ -448,8 +472,9 @@ ice_fltr_prepare_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, * @mac: MAC to add * @action: action to be performed on filter match */ -enum ice_status ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac, - enum ice_sw_fwd_act_type action) +int +ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac, + enum ice_sw_fwd_act_type action) { return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_add_mac_list); } @@ -460,7 +485,7 @@ enum ice_status ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac, * @mac: MAC to add * @action: action to be performed on filter match */ -enum ice_status +int ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, enum ice_sw_fwd_act_type action) { @@ -474,8 +499,9 @@ ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, * @mac: filter MAC to remove * @action: action to remove */ -enum ice_status ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac, - enum ice_sw_fwd_act_type action) +int +ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac, + enum ice_sw_fwd_act_type action) { return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_remove_mac_list); } @@ -485,7 +511,7 @@ enum ice_status ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac, * @vsi: pointer to VSI struct * @vlan: VLAN filter details */ -enum ice_status ice_fltr_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan) +int ice_fltr_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan) { return ice_fltr_prepare_vlan(vsi, vlan, ice_fltr_add_vlan_list); } @@ -495,7 +521,7 @@ enum ice_status ice_fltr_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan) * @vsi: pointer to VSI struct * @vlan: VLAN filter details */ -enum ice_status ice_fltr_remove_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan) +int ice_fltr_remove_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan) { return ice_fltr_prepare_vlan(vsi, vlan, ice_fltr_remove_vlan_list); } @@ -507,7 +533,7 @@ enum ice_status ice_fltr_remove_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan) * @vlan_id: VLAN ID to add * @action: action to be performed on filter match */ -enum ice_status +int ice_fltr_add_mac_vlan(struct ice_vsi *vsi, const u8 *mac, u16 vlan_id, enum ice_sw_fwd_act_type action) { @@ -522,7 +548,7 @@ ice_fltr_add_mac_vlan(struct ice_vsi *vsi, const u8 *mac, u16 vlan_id, * @vlan_id: filter MAC VLAN to remove * @action: action to remove */ -enum ice_status +int ice_fltr_remove_mac_vlan(struct ice_vsi *vsi, const u8 *mac, u16 vlan_id, enum ice_sw_fwd_act_type action) { @@ -537,8 +563,9 @@ ice_fltr_remove_mac_vlan(struct ice_vsi *vsi, const u8 *mac, u16 vlan_id, * @flag: direction of packet to be filtered, Tx or Rx * @action: action to be performed on filter match */ -enum ice_status ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, - enum ice_sw_fwd_act_type action) +int +ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, + enum ice_sw_fwd_act_type action) { return ice_fltr_prepare_eth(vsi, ethertype, flag, action, ice_fltr_add_eth_list); @@ -551,217 +578,11 @@ enum ice_status ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, * @flag: direction of filter * @action: action to remove */ -enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, - u16 flag, enum ice_sw_fwd_act_type action) +int +ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, + enum ice_sw_fwd_act_type action) { return ice_fltr_prepare_eth(vsi, ethertype, flag, action, ice_fltr_remove_eth_list); } -/** - * ice_fltr_update_rule_flags - update lan_en/lb_en flags - * @hw: pointer to hw - * @rule_id: id of rule being updated - * @recipe_id: recipe id of rule - * @act: current action field - * @type: Rx or Tx - * @src: source VSI - * @new_flags: combinations of lb_en and lan_en - */ -static enum ice_status -ice_fltr_update_rule_flags(struct ice_hw *hw, u16 rule_id, u16 recipe_id, - u32 act, u16 type, u16 src, u32 new_flags) -{ - struct ice_aqc_sw_rules_elem *s_rule; - enum ice_status err; - u32 flags_mask; - - s_rule = kzalloc(ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL); - if (!s_rule) - return ICE_ERR_NO_MEMORY; - - flags_mask = ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE; - act &= ~flags_mask; - act |= (flags_mask & new_flags); - - s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(recipe_id); - s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(rule_id); - s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); - - if (type & ICE_FLTR_RX) { - s_rule->pdata.lkup_tx_rx.src = - cpu_to_le16(hw->port_info->lport); - s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); - - } else { - s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(src); - s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); - } - - err = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1, - ice_aqc_opc_update_sw_rules, NULL); - - kfree(s_rule); - return err; -} - -/** - * ice_fltr_build_action - build action for rule - * @vsi_id: id of VSI which is use to build action - */ -static u32 -ice_fltr_build_action(u16 vsi_id) -{ - return ((vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M) | - ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT; -} - -/** - * ice_fltr_find_adv_entry - find advanced rule - * @rules: list of rules - * @rule_id: id of wanted rule - */ -static struct ice_adv_fltr_mgmt_list_entry * -ice_fltr_find_adv_entry(struct list_head *rules, u16 rule_id) -{ - struct ice_adv_fltr_mgmt_list_entry *entry; - - list_for_each_entry(entry, rules, list_entry) { - if (entry->rule_info.fltr_rule_id == rule_id) - return entry; - } - - return NULL; -} - -/** - * ice_fltr_update_adv_rule_flags - update flags on advanced rule - * @vsi: pointer to VSI - * @recipe_id: id of recipe - * @entry: advanced rule entry - * @new_flags: flags to update - */ -static enum ice_status -ice_fltr_update_adv_rule_flags(struct ice_vsi *vsi, u16 recipe_id, - struct ice_adv_fltr_mgmt_list_entry *entry, - u32 new_flags) -{ - struct ice_adv_rule_info *info = &entry->rule_info; - struct ice_sw_act_ctrl *act = &info->sw_act; - u32 action; - - if (act->fltr_act != ICE_FWD_TO_VSI) - return ICE_ERR_NOT_SUPPORTED; - - action = ice_fltr_build_action(act->fwd_id.hw_vsi_id); - - return ice_fltr_update_rule_flags(&vsi->back->hw, info->fltr_rule_id, - recipe_id, action, info->sw_act.flag, - act->src, new_flags); -} - -/** - * ice_fltr_find_regular_entry - find regular rule - * @rules: list of rules - * @rule_id: id of wanted rule - */ -static struct ice_fltr_mgmt_list_entry * -ice_fltr_find_regular_entry(struct list_head *rules, u16 rule_id) -{ - struct ice_fltr_mgmt_list_entry *entry; - - list_for_each_entry(entry, rules, list_entry) { - if (entry->fltr_info.fltr_rule_id == rule_id) - return entry; - } - - return NULL; -} - -/** - * ice_fltr_update_regular_rule - update flags on regular rule - * @vsi: pointer to VSI - * @recipe_id: id of recipe - * @entry: regular rule entry - * @new_flags: flags to update - */ -static enum ice_status -ice_fltr_update_regular_rule(struct ice_vsi *vsi, u16 recipe_id, - struct ice_fltr_mgmt_list_entry *entry, - u32 new_flags) -{ - struct ice_fltr_info *info = &entry->fltr_info; - u32 action; - - if (info->fltr_act != ICE_FWD_TO_VSI) - return ICE_ERR_NOT_SUPPORTED; - - action = ice_fltr_build_action(info->fwd_id.hw_vsi_id); - - return ice_fltr_update_rule_flags(&vsi->back->hw, info->fltr_rule_id, - recipe_id, action, info->flag, - info->src, new_flags); -} - -/** - * ice_fltr_update_flags - update flags on rule - * @vsi: pointer to VSI - * @rule_id: id of rule - * @recipe_id: id of recipe - * @new_flags: flags to update - * - * Function updates flags on regular and advance rule. - * - * Flags should be a combination of ICE_SINGLE_ACT_LB_ENABLE and - * ICE_SINGLE_ACT_LAN_ENABLE. - */ -enum ice_status -ice_fltr_update_flags(struct ice_vsi *vsi, u16 rule_id, u16 recipe_id, - u32 new_flags) -{ - struct ice_adv_fltr_mgmt_list_entry *adv_entry; - struct ice_fltr_mgmt_list_entry *regular_entry; - struct ice_hw *hw = &vsi->back->hw; - struct ice_sw_recipe *recp_list; - struct list_head *fltr_rules; - - recp_list = &hw->switch_info->recp_list[recipe_id]; - if (!recp_list) - return ICE_ERR_DOES_NOT_EXIST; - - fltr_rules = &recp_list->filt_rules; - regular_entry = ice_fltr_find_regular_entry(fltr_rules, rule_id); - if (regular_entry) - return ice_fltr_update_regular_rule(vsi, recipe_id, - regular_entry, new_flags); - - adv_entry = ice_fltr_find_adv_entry(fltr_rules, rule_id); - if (adv_entry) - return ice_fltr_update_adv_rule_flags(vsi, recipe_id, - adv_entry, new_flags); - - return ICE_ERR_DOES_NOT_EXIST; -} - -/** - * ice_fltr_update_flags_dflt_rule - update flags on default rule - * @vsi: pointer to VSI - * @rule_id: id of rule - * @direction: Tx or Rx - * @new_flags: flags to update - * - * Function updates flags on default rule with ICE_SW_LKUP_DFLT. - * - * Flags should be a combination of ICE_SINGLE_ACT_LB_ENABLE and - * ICE_SINGLE_ACT_LAN_ENABLE. - */ -enum ice_status -ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction, - u32 new_flags) -{ - u32 action = ice_fltr_build_action(vsi->vsi_num); - struct ice_hw *hw = &vsi->back->hw; - - return ice_fltr_update_rule_flags(hw, rule_id, ICE_SW_LKUP_DFLT, action, - direction, vsi->vsi_num, new_flags); -} diff --git a/drivers/thirdparty/ice/ice_fltr.h b/drivers/thirdparty/ice/ice_fltr.h index 20723d0b88e3..798f3b38a937 100644 --- a/drivers/thirdparty/ice/ice_fltr.h +++ b/drivers/thirdparty/ice/ice_fltr.h @@ -7,54 +7,47 @@ #include "ice_vlan.h" void ice_fltr_free_list(struct device *dev, struct list_head *h); -enum ice_status -ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, u8 promisc_mask); -enum ice_status -ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, u8 promisc_mask); -enum ice_status +int +ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, + u8 promisc_mask); +int +ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, + u8 promisc_mask); +int ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid, u8 lport); -enum ice_status +int ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid, u8 lport); int ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list, const u8 *mac, enum ice_sw_fwd_act_type action); -enum ice_status +int ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac, enum ice_sw_fwd_act_type action); -enum ice_status +int ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, enum ice_sw_fwd_act_type action); -enum ice_status -ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list); -enum ice_status +int ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list); +int ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac, enum ice_sw_fwd_act_type action); -enum ice_status -ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list); +int ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list); -enum ice_status ice_fltr_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); -enum ice_status ice_fltr_remove_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); -enum ice_status +int ice_fltr_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); +int ice_fltr_remove_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); +int ice_fltr_add_mac_vlan(struct ice_vsi *vsi, const u8 *mac, u16 vlan_id, enum ice_sw_fwd_act_type action); -enum ice_status +int ice_fltr_remove_mac_vlan(struct ice_vsi *vsi, const u8 *mac, u16 vlan_id, enum ice_sw_fwd_act_type action); -enum ice_status +int ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, enum ice_sw_fwd_act_type action); -enum ice_status +int ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, enum ice_sw_fwd_act_type action); void ice_fltr_remove_all(struct ice_vsi *vsi); - -enum ice_status -ice_fltr_update_flags(struct ice_vsi *vsi, u16 rule_id, u16 recipe_id, - u32 new_flags); -enum ice_status -ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction, - u32 new_flags); #endif diff --git a/drivers/thirdparty/ice/ice_fw_update.c b/drivers/thirdparty/ice/ice_fw_update.c index 8cb0d88351fb..413de6bee58c 100644 --- a/drivers/thirdparty/ice/ice_fw_update.c +++ b/drivers/thirdparty/ice/ice_fw_update.c @@ -21,6 +21,18 @@ struct ice_fwu_priv { /* Track which NVM banks to activate at the end of the update */ u8 activate_flags; + + /* Track the firmware response of the required reset to complete the + * flash update. + * + * 0 - ICE_AQC_NVM_POR_FLAG - A full power on is required + * 1 - ICE_AQC_NVM_PERST_FLAG - A cold PCIe reset is required + * 2 - ICE_AQC_NVM_EMPR_FLAG - An EMP reset is required + */ + u8 reset_level; + + /* Track if EMP reset is available */ + u8 emp_reset_available; }; /** @@ -45,8 +57,8 @@ ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length) struct device *dev = context->dev; struct ice_pf *pf = priv->pf; struct ice_hw *hw = &pf->hw; - enum ice_status status; u8 *package_data; + int status; dev_dbg(dev, "Sending PLDM record package data to firmware\n"); @@ -59,9 +71,8 @@ ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length) kfree(package_data); if (status) { - dev_err(dev, "Failed to send record package data to firmware, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + dev_err(dev, "Failed to send record package data to firmware, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Failed to record package data to firmware"); return -EIO; } @@ -209,8 +220,8 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon struct device *dev = context->dev; struct ice_pf *pf = priv->pf; struct ice_hw *hw = &pf->hw; - enum ice_status status; size_t length; + int status; switch (component->identifier) { case NVM_COMP_ID_OROM: @@ -246,9 +257,8 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon kfree(comp_tbl); if (status) { - dev_err(dev, "Failed to transfer component table to firmware, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + dev_err(dev, "Failed to transfer component table to firmware, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Failed to transfer component table to firmware"); return -EIO; } @@ -265,6 +275,7 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon * @block_size: size of the block to write, up to 4k * @block: pointer to block of data to write * @last_cmd: whether this is the last command + * @reset_level: storage for reset level required * @extack: netlink extended ACK structure * * Write a block of data to a flash module, and await for the completion @@ -272,18 +283,23 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon * * Note this function assumes the caller has acquired the NVM resource. * + * On successful return, reset level indicates the device reset required to + * complete the update. + * + * 0 - ICE_AQC_NVM_POR_FLAG - A full power on is required + * 1 - ICE_AQC_NVM_PERST_FLAG - A cold PCIe reset is required + * 2 - ICE_AQC_NVM_EMPR_FLAG - An EMP reset is required + * * Returns: zero on success, or a negative error code on failure. */ -static int -ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, - u16 block_size, u8 *block, bool last_cmd, - struct netlink_ext_ack *extack) +int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, + u16 block_size, u8 *block, bool last_cmd, + u8 *reset_level, struct netlink_ext_ack *extack) { u16 completion_module, completion_retval; struct device *dev = ice_pf_to_dev(pf); struct ice_rq_event_info event; struct ice_hw *hw = &pf->hw; - enum ice_status status; u32 completion_offset; int err; @@ -292,11 +308,11 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, dev_dbg(dev, "Writing block of %u bytes for module 0x%02x at offset %u\n", block_size, module, offset); - status = ice_aq_update_nvm(hw, module, offset, block_size, block, - last_cmd, 0, NULL); - if (status) { - dev_err(dev, "Failed to flash module 0x%02x with block of size %u at offset %u, err %s aq_err %s\n", - module, block_size, offset, ice_stat_str(status), + err = ice_aq_update_nvm(hw, module, offset, block_size, block, + last_cmd, 0, NULL); + if (err) { + dev_err(dev, "Failed to flash module 0x%02x with block of size %u at offset %u, err %d aq_err %s\n", + module, block_size, offset, err, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Failed to program flash module"); return -EIO; @@ -310,8 +326,9 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, */ err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write, 15*HZ, &event); if (err) { - dev_err(dev, "Timed out while trying to flash module 0x%02x with block of size %u at offset %u, err %d\n", - module, block_size, offset, err); + ice_dev_err_errno(dev, err, + "Timed out while trying to flash module 0x%02x with block of size %u at offset %u", + module, block_size, offset); NL_SET_ERR_MSG_MOD(extack, "Timed out waiting for firmware"); return -EIO; } @@ -337,13 +354,31 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, } if (completion_retval) { - dev_err(dev, "Firmware ailed to flash module 0x%02x with block of size %u at offset %u, err %s\n", + dev_err(dev, "Firmware failed to flash module 0x%02x with block of size %u at offset %u, err %s\n", module, block_size, offset, ice_aq_str((enum ice_aq_err)completion_retval)); NL_SET_ERR_MSG_MOD(extack, "Firmware failed to program flash module"); return -EIO; } + /* For the last command to write the NVM bank, newer versions of + * firmware indicate the required level of reset to complete + * activation of firmware. If the firmware supports this, cache the + * response for indicating to the user later. Otherwise, assume that + * a full power cycle is required. + */ + if (reset_level && last_cmd && module == ICE_SR_1ST_NVM_BANK_PTR) { + if (hw->dev_caps.common_cap.pcie_reset_avoidance) { + *reset_level = (event.desc.params.nvm.cmd_flags & + ICE_AQC_NVM_RESET_LVL_M); + dev_dbg(dev, "Firmware reported required reset level as %u\n", + *reset_level); + } else { + *reset_level = ICE_AQC_NVM_POR_FLAG; + dev_dbg(dev, "Firmware doesn't support indicating required reset level. Assuming a power cycle is required\n"); + } + } + return 0; } @@ -354,6 +389,7 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, * @component: the name of the component being updated * @image: buffer of image data to write to the NVM * @length: length of the buffer + * @reset_level: storage for reset level required * @extack: netlink extended ACK structure * * Loop over the data for a given NVM module and program it in 4 Kb @@ -366,7 +402,7 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, */ static int ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component, - const u8 *image, u32 length, + const u8 *image, u32 length, u8 *reset_level, struct netlink_ext_ack *extack) { struct device *dev = ice_pf_to_dev(pf); @@ -400,7 +436,8 @@ ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component, memcpy(block, image + offset, block_size); err = ice_write_one_nvm_block(pf, module, offset, block_size, - block, last_cmd, extack); + block, last_cmd, reset_level, + extack); if (err) break; @@ -423,6 +460,11 @@ ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component, return err; } +/* Length in seconds to wait before timing out when erasing a flash module. + * Yes, erasing really can take minutes to complete. + */ +#define ICE_FW_ERASE_TIMEOUT 300 + /** * ice_erase_nvm_module - Erase an NVM module and await firmware completion * @pf: the PF data structure @@ -446,7 +488,6 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component, struct ice_rq_event_info event; struct ice_hw *hw = &pf->hw; struct devlink *devlink; - enum ice_status status; int err; dev_dbg(dev, "Beginning erase of flash component '%s', module 0x%02x\n", component, module); @@ -455,20 +496,19 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component, devlink = priv_to_devlink(pf); - devlink_flash_update_status_notify(devlink, "Erasing", component, 0, 0); + devlink_flash_update_timeout_notify(devlink, "Erasing", component, ICE_FW_ERASE_TIMEOUT); - status = ice_aq_erase_nvm(hw, module, NULL); - if (status) { - dev_err(dev, "Failed to erase %s (module 0x%02x), err %s aq_err %s\n", - component, module, ice_stat_str(status), + err = ice_aq_erase_nvm(hw, module, NULL); + if (err) { + dev_err(dev, "Failed to erase %s (module 0x%02x), err %d aq_err %s\n", + component, module, err, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Failed to erase flash module"); err = -EIO; goto out_notify_devlink; } - /* Yes, this really can take minutes to complete */ - err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_erase, 300 * HZ, &event); + err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_erase, ICE_FW_ERASE_TIMEOUT * HZ, &event); if (err) { dev_err(dev, "Timed out waiting for firmware to respond with erase completion for %s (module 0x%02x), err %d\n", component, module, err); @@ -513,6 +553,7 @@ out_notify_devlink: * ice_switch_flash_banks - Tell firmware to switch NVM banks * @pf: Pointer to the PF data structure * @activate_flags: flags used for the activation command + * @emp_reset_available: on return, indicates if EMP reset is available * @extack: netlink extended ACK structure * * Notify firmware to activate the newly written flash banks, and wait for the @@ -520,27 +561,44 @@ out_notify_devlink: * * Returns: zero on success or an error code on failure. */ -static int ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags, - struct netlink_ext_ack *extack) +static int +ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags, + u8 *emp_reset_available, struct netlink_ext_ack *extack) { struct device *dev = ice_pf_to_dev(pf); struct ice_rq_event_info event; struct ice_hw *hw = &pf->hw; - enum ice_status status; u16 completion_retval; + u8 response_flags; int err; memset(&event, 0, sizeof(event)); - status = ice_nvm_write_activate(hw, activate_flags); - if (status) { - dev_err(dev, "Failed to switch active flash banks, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + err = ice_nvm_write_activate(hw, activate_flags, &response_flags); + if (err) { + dev_err(dev, "Failed to switch active flash banks, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Failed to switch active flash banks"); return -EIO; } + /* Newer versions of firmware have support to indicate whether an EMP + * reset to reload firmware is available. For older firmware, EMP + * reset is always available. + */ + if (emp_reset_available) { + if (hw->dev_caps.common_cap.reset_restrict_support) { + *emp_reset_available = + response_flags & ICE_AQC_NVM_EMPR_ENA; + dev_dbg(dev, "Firmware indicated that EMP reset is %s\n", + *emp_reset_available ? + "available" : "not available"); + } else { + *emp_reset_available = ICE_AQC_NVM_EMPR_ENA; + dev_dbg(dev, "Firmware does not support restricting EMP reset availability\n"); + } + } + /* In most cases, we expect firmware to respond with a completion * within a few milliseconds. However, it has been observed in * practice that firmware may sometimes take longer. The wait time @@ -550,8 +608,8 @@ static int ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags, err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write_activate, 30*HZ, &event); if (err) { - dev_err(dev, "Timed out waiting for firmware to switch active flash banks, err %d\n", - err); + ice_dev_err_errno(dev, err, + "Timed out waiting for firmware to switch active flash banks"); NL_SET_ERR_MSG_MOD(extack, "Timed out waiting for firmware"); return err; } @@ -587,6 +645,7 @@ ice_flash_component(struct pldmfw *context, struct pldmfw_component *component) struct netlink_ext_ack *extack = priv->extack; struct ice_pf *pf = priv->pf; const char *name; + u8 *reset_level; u16 module; u8 flag; int err; @@ -595,16 +654,19 @@ ice_flash_component(struct pldmfw *context, struct pldmfw_component *component) case NVM_COMP_ID_OROM: module = ICE_SR_1ST_OROM_BANK_PTR; flag = ICE_AQC_NVM_ACTIV_SEL_OROM; + reset_level = NULL; name = "fw.undi"; break; case NVM_COMP_ID_NVM: module = ICE_SR_1ST_NVM_BANK_PTR; flag = ICE_AQC_NVM_ACTIV_SEL_NVM; + reset_level = &priv->reset_level; name = "fw.mgmt"; break; case NVM_COMP_ID_NETLIST: module = ICE_SR_NETLIST_BANK_PTR; flag = ICE_AQC_NVM_ACTIV_SEL_NETLIST; + reset_level = NULL; name = "fw.netlist"; break; default: @@ -624,7 +686,8 @@ ice_flash_component(struct pldmfw *context, struct pldmfw_component *component) return err; return ice_write_nvm_module(pf, module, name, component->component_data, - component->component_size, extack); + component->component_size, reset_level, + extack); } /** @@ -642,33 +705,130 @@ static int ice_finalize_update(struct pldmfw *context) struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context); struct netlink_ext_ack *extack = priv->extack; struct ice_pf *pf = priv->pf; + struct devlink *devlink; int err; /* Finally, notify firmware to activate the written NVM banks */ - err = ice_switch_flash_banks(pf, priv->activate_flags, extack); + err = ice_switch_flash_banks(pf, priv->activate_flags, + &priv->emp_reset_available, extack); if (err) return err; - /* Perform an immediate reset only if PRESERVE_ALL is selected */ - if ((priv->activate_flags & ICE_AQC_NVM_PRESERVATION_M) == ICE_AQC_NVM_PRESERVE_ALL) { - struct device *dev = ice_pf_to_dev(pf); - struct ice_hw *hw = &pf->hw; - enum ice_status status; + devlink = priv_to_devlink(pf); - status = ice_aq_nvm_update_empr(hw); - if (status) { - dev_err(dev, "Failed to trigger immediate device reset, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - NL_SET_ERR_MSG_MOD(extack, "Failed to trigger immediate device reset"); - return -EIO; - } + /* If the required reset is EMPR, but EMPR is disabled, report that + * a reboot is required instead. + */ + if (priv->reset_level == ICE_AQC_NVM_EMPR_FLAG && + !priv->emp_reset_available) { + dev_dbg(ice_pf_to_dev(pf), "Firmware indicated EMP reset as sufficient, but EMP reset is disabled\n"); + priv->reset_level = ICE_AQC_NVM_PERST_FLAG; } + switch (priv->reset_level) { + case ICE_AQC_NVM_EMPR_FLAG: + devlink_flash_update_status_notify(devlink, + "Activate new firmware by devlink reload", + NULL, 0, 0); + break; + case ICE_AQC_NVM_PERST_FLAG: + devlink_flash_update_status_notify(devlink, + "Activate new firmware by rebooting the system", + NULL, 0, 0); + break; + case ICE_AQC_NVM_POR_FLAG: + default: + devlink_flash_update_status_notify(devlink, + "Activate new firmware by power cycling the system", + NULL, 0, 0); + break; + } + + pf->fw_emp_reset_disabled = !priv->emp_reset_available; + return 0; } -static const struct pldmfw_ops ice_fwu_ops = { +/* these are u32 so that we can store PCI_ANY_ID */ +struct ice_pldm_pci_record_id { + int vendor; + int device; + int subsystem_vendor; + int subsystem_device; +}; + +/** + * ice_op_pci_match_record - Check if a PCI device matches the record + * @context: PLDM fw update structure + * @record: list of records extracted from the PLDM image + * + * Determine if the PCI device associated with this device matches the record + * data provided. + * + * Searches the descriptor TLVs and extracts the relevant descriptor data into + * a pldm_pci_record_id. This is then compared against the PCI device ID + * information. + * + * Returns: true if the device matches the record, false otherwise. + */ +static bool ice_op_pci_match_record(struct pldmfw *context, + struct pldmfw_record *record) +{ + struct pci_dev *pdev = to_pci_dev(context->dev); + struct ice_pldm_pci_record_id id = { + .vendor = PCI_ANY_ID, + .device = PCI_ANY_ID, + .subsystem_vendor = PCI_ANY_ID, + .subsystem_device = PCI_ANY_ID, + }; + struct pldmfw_desc_tlv *desc; + + list_for_each_entry(desc, &record->descs, entry) { + u16 value; + int *ptr; + + switch (desc->type) { + case PLDM_DESC_ID_PCI_VENDOR_ID: + ptr = &id.vendor; + break; + case PLDM_DESC_ID_PCI_DEVICE_ID: + ptr = &id.device; + break; + case PLDM_DESC_ID_PCI_SUBVENDOR_ID: + ptr = &id.subsystem_vendor; + break; + case PLDM_DESC_ID_PCI_SUBDEV_ID: + ptr = &id.subsystem_device; + break; + default: + /* Skip unrelated TLVs */ + continue; + } + + value = get_unaligned_le16(desc->data); + /* A value of zero for one of the descriptors is sometimes + * used when the record should ignore this field when matching + * device. For example if the record applies to any subsystem + * device or vendor. + */ + if (value) + *ptr = (int)value; + else + *ptr = PCI_ANY_ID; + } + + /* the E822 device can have a generic device ID so check for that */ + if ((id.vendor == PCI_ANY_ID || id.vendor == pdev->vendor) && + (id.device == PCI_ANY_ID || id.device == pdev->device || + id.device == ICE_DEV_ID_E822_SI_DFLT) && + (id.subsystem_vendor == PCI_ANY_ID || id.subsystem_vendor == pdev->subsystem_vendor) && + (id.subsystem_device == PCI_ANY_ID || id.subsystem_device == pdev->subsystem_device)) + return true; + else + return false; +} + +static const struct pldmfw_ops ice_fwu_ops_e810 = { .match_record = &pldmfw_op_pci_match_record, .send_package_data = &ice_send_package_data, .send_component_table = &ice_send_component_table, @@ -676,97 +836,31 @@ static const struct pldmfw_ops ice_fwu_ops = { .finalize_update = &ice_finalize_update, }; -/** - * ice_flash_pldm_image - Write a PLDM-formatted firmware image to the device - * @pf: private device driver structure - * @fw: firmware object pointing to the relevant firmware file - * @preservation: preservation level to request from firmware - * @extack: netlink extended ACK structure - * - * Parse the data for a given firmware file, verifying that it is a valid PLDM - * formatted image that matches this device. - * - * Extract the device record Package Data and Component Tables and send them - * to the firmware. Extract and write the flash data for each of the three - * main flash components, "fw.mgmt", "fw.undi", and "fw.netlist". Notify - * firmware once the data is written to the inactive banks. - * - * Returns: zero on success or a negative error code on failure. - */ -int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw, - u8 preservation, struct netlink_ext_ack *extack) -{ - struct device *dev = ice_pf_to_dev(pf); - struct ice_hw *hw = &pf->hw; - struct ice_fwu_priv priv; - enum ice_status status; - int err; - - switch (preservation) { - case ICE_AQC_NVM_PRESERVE_ALL: - case ICE_AQC_NVM_PRESERVE_SELECTED: - case ICE_AQC_NVM_NO_PRESERVATION: - case ICE_AQC_NVM_FACTORY_DEFAULT: - break; - default: - WARN(1, "Unexpected preservation level request %u", preservation); - return -EINVAL; - } - - memset(&priv, 0, sizeof(priv)); - - priv.context.ops = &ice_fwu_ops; - priv.context.dev = dev; - priv.extack = extack; - priv.pf = pf; - priv.activate_flags = preservation; - - status = ice_acquire_nvm(hw, ICE_RES_WRITE); - if (status) { - dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock"); - return -EIO; - } - - err = pldmfw_flash_image(&priv.context, fw); - if (err == -ENOENT) { - dev_err(dev, "Firmware image has no record matching this device\n"); - NL_SET_ERR_MSG_MOD(extack, "Firmware image has no record matching this device"); - } else if (err) { - /* Do not set a generic extended ACK message here. A more - * specific message may already have been set by one of our - * ops. - */ - dev_err(dev, "Failed to flash PLDM image, err %d", err); - } - - ice_release_nvm(hw); - - return err; -} +static const struct pldmfw_ops ice_fwu_ops_e822 = { + .match_record = &ice_op_pci_match_record, + .send_package_data = &ice_send_package_data, + .send_component_table = &ice_send_component_table, + .flash_component = &ice_flash_component, + .finalize_update = &ice_finalize_update, +}; /** - * ice_check_for_pending_update - Check for a pending flash update + * ice_get_pending_updates - Check if the component has a pending update * @pf: the PF driver structure - * @component: if not NULL, the name of the component being updated - * @extack: Netlink extended ACK structure + * @pending: on return, bitmap of updates pending + * @extack: Netlink extended ACK * - * Check whether the device already has a pending flash update. If such an - * update is found, cancel it so that the requested update may proceed. + * Check if the device has any pending updates on any flash components. * - * Returns: zero on success, or a negative error code on failure. + * Returns: zero on success, or a negative error code on failure. Updates + * pending with the bitmap of pending updates. */ -int ice_check_for_pending_update(struct ice_pf *pf, const char *component, - struct netlink_ext_ack *extack) +int ice_get_pending_updates(struct ice_pf *pf, u8 *pending, + struct netlink_ext_ack *extack) { - struct devlink *devlink = priv_to_devlink(pf); struct device *dev = ice_pf_to_dev(pf); struct ice_hw_dev_caps *dev_caps; struct ice_hw *hw = &pf->hw; - enum ice_status status; - u8 pending = 0; int err; dev_caps = kzalloc(sizeof(*dev_caps), GFP_KERNEL); @@ -778,30 +872,59 @@ int ice_check_for_pending_update(struct ice_pf *pf, const char *component, * may have changed, e.g. if an update was previously completed and * the system has not yet rebooted. */ - status = ice_discover_dev_caps(hw, dev_caps); - if (status) { + err = ice_discover_dev_caps(hw, dev_caps); + if (err) { NL_SET_ERR_MSG_MOD(extack, "Unable to read device capabilities"); kfree(dev_caps); - return -EIO; + return err; } + *pending = 0; + if (dev_caps->common_cap.nvm_update_pending_nvm) { dev_info(dev, "The fw.mgmt flash component has a pending update\n"); - pending |= ICE_AQC_NVM_ACTIV_SEL_NVM; + *pending |= ICE_AQC_NVM_ACTIV_SEL_NVM; } if (dev_caps->common_cap.nvm_update_pending_orom) { dev_info(dev, "The fw.undi flash component has a pending update\n"); - pending |= ICE_AQC_NVM_ACTIV_SEL_OROM; + *pending |= ICE_AQC_NVM_ACTIV_SEL_OROM; } if (dev_caps->common_cap.nvm_update_pending_netlist) { dev_info(dev, "The fw.netlist flash component has a pending update\n"); - pending |= ICE_AQC_NVM_ACTIV_SEL_NETLIST; + *pending |= ICE_AQC_NVM_ACTIV_SEL_NETLIST; } kfree(dev_caps); + return 0; +} + +/** + * ice_cancel_pending_update - Cancel any pending update for a component + * @pf: the PF driver structure + * @component: if not NULL, the name of the component being updated + * @extack: Netlink extended ACK structure + * + * Cancel any pending update for the specified component. If component is + * NULL, all device updates will be canceled. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int +ice_cancel_pending_update(struct ice_pf *pf, const char *component, + struct netlink_ext_ack *extack) +{ + struct devlink *devlink = priv_to_devlink(pf); + struct ice_hw *hw = &pf->hw; + u8 pending; + int err; + + err = ice_get_pending_updates(pf, &pending, extack); + if (err) + return err; + /* If the flash_update request is for a specific component, ignore all * of the other components. */ @@ -827,17 +950,141 @@ int ice_check_for_pending_update(struct ice_pf *pf, const char *component, "Canceling previous pending update", component, 0, 0); - status = ice_acquire_nvm(hw, ICE_RES_WRITE); - if (status) { - dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + err = ice_acquire_nvm(hw, ICE_RES_WRITE); + if (err) { + dev_err(ice_pf_to_dev(pf), "Failed to acquire device flash lock, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock"); - return -EIO; + return err; } pending |= ICE_AQC_NVM_REVERT_LAST_ACTIV; - err = ice_switch_flash_banks(pf, pending, extack); + err = ice_switch_flash_banks(pf, pending, NULL, extack); + + ice_release_nvm(hw); + + /* Since we've canceled the pending update, we no longer know if EMP + * reset is restricted. + */ + pf->fw_emp_reset_disabled = false; + + return err; +} + +/** + * ice_flash_pldm_image - Write a PLDM-formatted firmware image to the device + * @devlink: pointer to devlink associated with the device to update + * @params: devlink flash update parameters + * @extack: netlink extended ACK structure + * + * Parse the data for a given firmware file, verifying that it is a valid PLDM + * formatted image that matches this device. + * + * Extract the device record Package Data and Component Tables and send them + * to the firmware. Extract and write the flash data for each of the three + * main flash components, "fw.mgmt", "fw.undi", and "fw.netlist". Notify + * firmware once the data is written to the inactive banks. + * + * Returns: zero on success or a negative error code on failure. + */ +int ice_flash_pldm_image(struct devlink *devlink, + struct devlink_flash_update_params *params, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_priv(devlink); + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; +#ifndef HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW + const struct firmware *fw; +#endif + struct ice_fwu_priv priv; + u8 preservation; + int err; + + if (ice_get_fw_mode(hw) == ICE_FW_MODE_REC) { + /* The devlink flash update process does not currently support + * updating when in recovery mode. + */ + NL_SET_ERR_MSG_MOD(extack, "Device firmware is in recovery mode. Unable to perform flash update."); + return -EOPNOTSUPP; + } + + switch (params->overwrite_mask) { + case 0: + /* preserve all settings and identifiers */ + preservation = ICE_AQC_NVM_PRESERVE_ALL; + break; + case DEVLINK_FLASH_OVERWRITE_SETTINGS: + /* overwrite settings, but preserve vital information such as + * device identifiers. + */ + preservation = ICE_AQC_NVM_PRESERVE_SELECTED; + break; + case (DEVLINK_FLASH_OVERWRITE_SETTINGS | + DEVLINK_FLASH_OVERWRITE_IDENTIFIERS): + /* overwrite both settings and identifiers, preserve nothing */ + preservation = ICE_AQC_NVM_NO_PRESERVATION; + break; + default: + NL_SET_ERR_MSG_MOD(extack, "Requested overwrite mask is not supported"); + return -EOPNOTSUPP; + } + + if (!hw->dev_caps.common_cap.nvm_unified_update) { + NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update"); + return -EOPNOTSUPP; + } + + memset(&priv, 0, sizeof(priv)); + + /* the E822 device needs a slightly different ops */ + if (hw->mac_type == ICE_MAC_GENERIC) + priv.context.ops = &ice_fwu_ops_e822; + else + priv.context.ops = &ice_fwu_ops_e810; + priv.context.dev = dev; + priv.extack = extack; + priv.pf = pf; + priv.activate_flags = preservation; + + devlink_flash_update_status_notify(devlink, "Preparing to flash", NULL, 0, 0); + + err = ice_cancel_pending_update(pf, NULL, extack); + if (err) + return err; + + err = ice_acquire_nvm(hw, ICE_RES_WRITE); + if (err) { + dev_err(dev, "Failed to acquire device flash lock, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); + NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock"); + return err; + } + +#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW + err = pldmfw_flash_image(&priv.context, params->fw); +#else + err = request_firmware(&fw, params->file_name, dev); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Unable to read file from disk"); + ice_release_nvm(hw); + return err; + } + + err = pldmfw_flash_image(&priv.context, fw); + + release_firmware(fw); +#endif + if (err == -ENOENT) { + dev_err(dev, "Firmware image has no record matching this device\n"); + NL_SET_ERR_MSG_MOD(extack, "Firmware image has no record matching this device"); + } else if (err) { + /* Do not set a generic extended ACK message here. A more + * specific message may already have been set by one of our + * ops. + */ + dev_err(dev, "Failed to flash PLDM image, err %d", err); + } ice_release_nvm(hw); diff --git a/drivers/thirdparty/ice/ice_fw_update.h b/drivers/thirdparty/ice/ice_fw_update.h index 0e083c0f9695..2148906d0dfe 100644 --- a/drivers/thirdparty/ice/ice_fw_update.h +++ b/drivers/thirdparty/ice/ice_fw_update.h @@ -4,9 +4,13 @@ #ifndef _ICE_FW_UPDATE_H_ #define _ICE_FW_UPDATE_H_ -int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw, - u8 preservation, struct netlink_ext_ack *extack); -int ice_check_for_pending_update(struct ice_pf *pf, const char *component, - struct netlink_ext_ack *extack); +int ice_flash_pldm_image(struct devlink *devlink, + struct devlink_flash_update_params *params, + struct netlink_ext_ack *extack); +int ice_get_pending_updates(struct ice_pf *pf, u8 *pending, + struct netlink_ext_ack *extack); +int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, + u16 block_size, u8 *block, bool last_cmd, + u8 *reset_level, struct netlink_ext_ack *extack); #endif diff --git a/drivers/thirdparty/ice/ice_fwlog.c b/drivers/thirdparty/ice/ice_fwlog.c index cee8cf7ef36b..3f6ac43145e6 100644 --- a/drivers/thirdparty/ice/ice_fwlog.c +++ b/drivers/thirdparty/ice/ice_fwlog.c @@ -15,7 +15,7 @@ static void cache_cfg(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) } /** - * valid_module_entries - validate all the module entry IDs and log levels + * valid_module_entries - validate all the module entry IDs and log levels * @hw: pointer to the HW structure * @entries: entries to validate * @num_entries: number of entries to validate @@ -92,11 +92,11 @@ static bool valid_cfg(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) * ice_init_hw(). Firmware logging will be configured based on these settings * and also the PF will be registered on init. */ -enum ice_status +int ice_fwlog_init(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) { if (!valid_cfg(hw, cfg)) - return ICE_ERR_PARAM; + return -EINVAL; cache_cfg(hw, cfg); @@ -111,20 +111,20 @@ ice_fwlog_init(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) * @options: options from ice_fwlog_cfg->options structure * @log_resolution: logging resolution */ -static enum ice_status +static int ice_aq_fwlog_set(struct ice_hw *hw, struct ice_fwlog_module_entry *entries, u16 num_entries, u16 options, u16 log_resolution) { struct ice_aqc_fw_log_cfg_resp *fw_modules; struct ice_aqc_fw_log *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; u16 i; fw_modules = devm_kcalloc(ice_hw_to_dev(hw), num_entries, sizeof(*fw_modules), GFP_KERNEL); if (!fw_modules) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; for (i = 0; i < num_entries; i++) { fw_modules[i].module_identifier = @@ -180,16 +180,16 @@ bool ice_fwlog_supported(struct ice_hw *hw) * ice_fwlog_register. Note, that ice_fwlog_register does not need to be called * for init. */ -enum ice_status +int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) { - enum ice_status status; + int status; if (!ice_fwlog_supported(hw)) - return ICE_ERR_NOT_SUPPORTED; + return -EOPNOTSUPP; if (!valid_cfg(hw, cfg)) - return ICE_ERR_PARAM; + return -EINVAL; status = ice_aq_fwlog_set(hw, cfg->module_entries, ICE_AQC_FW_LOG_ID_MAX, cfg->options, @@ -240,23 +240,23 @@ update_cached_entries(struct ice_hw *hw, struct ice_fwlog_module_entry *entries, * Only the entries passed in will be affected. All other firmware logging * settings will be unaffected. */ -enum ice_status +int ice_fwlog_update_modules(struct ice_hw *hw, struct ice_fwlog_module_entry *entries, u16 num_entries) { struct ice_fwlog_cfg *cfg; - enum ice_status status; + int status; if (!ice_fwlog_supported(hw)) - return ICE_ERR_NOT_SUPPORTED; + return -EOPNOTSUPP; if (!valid_module_entries(hw, entries, num_entries)) - return ICE_ERR_PARAM; + return -EINVAL; cfg = devm_kcalloc(ice_hw_to_dev(hw), 1, sizeof(*cfg), GFP_KERNEL); if (!cfg) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; status = ice_fwlog_get(hw, cfg); if (status) @@ -273,11 +273,11 @@ status_out: } /** - *ice_aq_fwlog_register - Register PF for firmware logging events (0xFF31) + * ice_aq_fwlog_register - Register PF for firmware logging events (0xFF31) * @hw: pointer to the HW structure * @reg: true to register and false to unregister */ -static enum ice_status ice_aq_fwlog_register(struct ice_hw *hw, bool reg) +static int ice_aq_fwlog_register(struct ice_hw *hw, bool reg) { struct ice_aq_desc desc; @@ -296,12 +296,12 @@ static enum ice_status ice_aq_fwlog_register(struct ice_hw *hw, bool reg) * After this call the PF will start to receive firmware logging based on the * configuration set in ice_fwlog_set. */ -enum ice_status ice_fwlog_register(struct ice_hw *hw) +int ice_fwlog_register(struct ice_hw *hw) { - enum ice_status status; + int status; if (!ice_fwlog_supported(hw)) - return ICE_ERR_NOT_SUPPORTED; + return -EOPNOTSUPP; status = ice_aq_fwlog_register(hw, true); if (status) @@ -316,12 +316,12 @@ enum ice_status ice_fwlog_register(struct ice_hw *hw) * ice_fwlog_unregister - Unregister the PF from firmware logging * @hw: pointer to the HW structure */ -enum ice_status ice_fwlog_unregister(struct ice_hw *hw) +int ice_fwlog_unregister(struct ice_hw *hw) { - enum ice_status status; + int status; if (!ice_fwlog_supported(hw)) - return ICE_ERR_NOT_SUPPORTED; + return -EOPNOTSUPP; status = ice_aq_fwlog_register(hw, false); if (status) @@ -337,14 +337,14 @@ enum ice_status ice_fwlog_unregister(struct ice_hw *hw) * @hw: pointer to the HW structure * @cfg: firmware logging configuration to populate */ -static enum ice_status +static int ice_aq_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) { struct ice_aqc_fw_log_cfg_resp *fw_modules; struct ice_aqc_fw_log *cmd; struct ice_aq_desc desc; - enum ice_status status; u16 i, module_id_cnt; + int status; void *buf; memset(cfg, 0, sizeof(*cfg)); @@ -352,7 +352,7 @@ ice_aq_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) buf = devm_kcalloc(ice_hw_to_dev(hw), 1, ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_query); cmd = &desc.params.fw_log; @@ -411,7 +411,7 @@ status_out: void ice_fwlog_set_support_ena(struct ice_hw *hw) { struct ice_fwlog_cfg *cfg; - enum ice_status status; + int status; hw->fwlog_support_ena = false; @@ -438,16 +438,16 @@ void ice_fwlog_set_support_ena(struct ice_hw *hw) * @hw: pointer to the HW structure * @cfg: config to populate based on current firmware logging settings */ -enum ice_status +int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) { - enum ice_status status; + int status; if (!ice_fwlog_supported(hw)) - return ICE_ERR_NOT_SUPPORTED; + return -EOPNOTSUPP; if (!cfg) - return ICE_ERR_PARAM; + return -EINVAL; status = ice_aq_fwlog_get(hw, cfg); if (status) diff --git a/drivers/thirdparty/ice/ice_fwlog.h b/drivers/thirdparty/ice/ice_fwlog.h index 0914cb7c627b..54c68d9caf29 100644 --- a/drivers/thirdparty/ice/ice_fwlog.h +++ b/drivers/thirdparty/ice/ice_fwlog.h @@ -43,20 +43,20 @@ struct ice_fwlog_cfg { /* options used to configure firmware logging */ u16 options; /* minimum number of log events sent per Admin Receive Queue event */ - u8 log_resolution; + u16 log_resolution; }; void ice_fwlog_set_support_ena(struct ice_hw *hw); bool ice_fwlog_supported(struct ice_hw *hw); -enum ice_status ice_fwlog_init(struct ice_hw *hw, struct ice_fwlog_cfg *cfg); -enum ice_status ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg); -enum ice_status ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg); -enum ice_status +int ice_fwlog_init(struct ice_hw *hw, struct ice_fwlog_cfg *cfg); +int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg); +int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg); +int ice_fwlog_update_modules(struct ice_hw *hw, struct ice_fwlog_module_entry *entries, u16 num_entries); -enum ice_status ice_fwlog_register(struct ice_hw *hw); -enum ice_status ice_fwlog_unregister(struct ice_hw *hw); +int ice_fwlog_register(struct ice_hw *hw); +int ice_fwlog_unregister(struct ice_hw *hw); void ice_fwlog_event_dump(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf); #endif /* _ICE_FWLOG_H_ */ diff --git a/drivers/thirdparty/ice/ice_gnss.c b/drivers/thirdparty/ice/ice_gnss.c new file mode 100644 index 000000000000..f38cbe89ad9a --- /dev/null +++ b/drivers/thirdparty/ice/ice_gnss.c @@ -0,0 +1,575 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#include "ice.h" +#include "ice_lib.h" + +/** + * ice_gnss_do_write - Write data to internal GNSS + * @pf: board private structure + * @buf: command buffer + * @size: command buffer size + * + * Write UBX command data to the GNSS receiver + */ +static unsigned int +ice_gnss_do_write(struct ice_pf *pf, unsigned char *buf, unsigned int size) +{ + struct ice_aqc_link_topo_addr link_topo; + struct ice_hw *hw = &pf->hw; + unsigned int offset = 0; + int err; + + memset(&link_topo, 0, sizeof(struct ice_aqc_link_topo_addr)); + link_topo.topo_params.index = ICE_E810T_GNSS_I2C_BUS; + link_topo.topo_params.node_type_ctx |= + ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE << + ICE_AQC_LINK_TOPO_NODE_CTX_S; + + /* It's not possible to write a single byte to u-blox. + * Write all bytes in a loop until there are 6 or less bytes left. If + * there are exactly 6 bytes left, the last write would be only a byte. + * In this case, do 4+2 bytes writes instead of 5+1. Otherwise, do the + * last 2 to 5 bytes write. + */ + while (size - offset > ICE_GNSS_UBX_WRITE_BYTES + 1) { + err = ice_aq_write_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR, + cpu_to_le16(buf[offset]), + ICE_MAX_I2C_WRITE_BYTES, + &buf[offset + 1], NULL); + if (err) + goto exit; + + offset += ICE_GNSS_UBX_WRITE_BYTES; + } + + /* Single byte would be written. Write 4 bytes instead of 5. */ + if (size - offset == ICE_GNSS_UBX_WRITE_BYTES + 1) { + err = ice_aq_write_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR, + cpu_to_le16(buf[offset]), + ICE_MAX_I2C_WRITE_BYTES - 1, + &buf[offset + 1], NULL); + if (err) + goto exit; + + offset += ICE_GNSS_UBX_WRITE_BYTES - 1; + } + + /* Do the last write, 2 to 5 bytes. */ + err = ice_aq_write_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR, + cpu_to_le16(buf[offset]), size - offset - 1, + &buf[offset + 1], NULL); + if (!err) + offset = size; + +exit: + if (err) + dev_err(ice_pf_to_dev(pf), "GNSS failed to write, offset=%u, size=%u, status=%d\n", + offset, size, err); + + return offset; +} + +/** + * ice_gnss_write_pending - Write all pending data to internal GNSS + * @work: GNSS write work structure + */ +static void ice_gnss_write_pending(struct kthread_work *work) +{ + struct gnss_serial *gnss = container_of(work, struct gnss_serial, + write_work); + struct ice_pf *pf = gnss->back; + + if (!list_empty(&gnss->queue)) { + struct gnss_write_buf *write_buf = NULL; + unsigned int bytes; + + write_buf = list_first_entry(&gnss->queue, + struct gnss_write_buf, queue); + + bytes = ice_gnss_do_write(pf, write_buf->buf, write_buf->size); + dev_dbg(ice_pf_to_dev(pf), "%u bytes written to GNSS\n", bytes); + + list_del(&write_buf->queue); + kfree(write_buf->buf); + kfree(write_buf); + } +} + +/** + * ice_gnss_read - Read data from internal GNSS module + * @work: GNSS read work structure + * + * Read the data from internal GNSS receiver, number of bytes read will be + * returned in *read_data parameter. + */ +static void ice_gnss_read(struct kthread_work *work) +{ + struct gnss_serial *gnss = container_of(work, struct gnss_serial, + read_work.work); + struct ice_aqc_link_topo_addr link_topo; + unsigned int i, bytes_read, data_len; + struct tty_port *port; + struct ice_pf *pf; + struct ice_hw *hw; + __be16 data_len_b; + char *buf = NULL; + u8 i2c_params; + int err = 0; + + pf = gnss->back; + if (!pf || !gnss->tty || !gnss->tty->port) + return; + + hw = &pf->hw; + port = gnss->tty->port; + + buf = (char *)get_zeroed_page(GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + goto exit; + } + + memset(&link_topo, 0, sizeof(struct ice_aqc_link_topo_addr)); + link_topo.topo_params.index = ICE_E810T_GNSS_I2C_BUS; + link_topo.topo_params.node_type_ctx |= + ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE << + ICE_AQC_LINK_TOPO_NODE_CTX_S; + + i2c_params = ICE_GNSS_UBX_DATA_LEN_WIDTH | + ICE_AQC_I2C_USE_REPEATED_START; + + /* Read data length in a loop, when it's not 0 the data is ready */ + for (i = 0; i < ICE_MAX_UBX_READ_TRIES; i++) { + err = ice_aq_read_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR, + cpu_to_le16(ICE_GNSS_UBX_DATA_LEN_H), + i2c_params, (u8 *)&data_len_b, NULL); + if (err) + goto exit_buf; + + data_len = be16_to_cpu(data_len_b); + if (data_len != 0 && data_len != U16_MAX) + break; + + msleep(20); + } + + data_len = min_t(typeof(data_len), data_len, PAGE_SIZE); + data_len = tty_buffer_request_room(port, data_len); + if (!data_len) { + err = -ENOMEM; + goto exit_buf; + } + + /* Read received data */ + for (i = 0; i < data_len; i += bytes_read) { + unsigned int bytes_left = data_len - i; + + bytes_read = min_t(typeof(bytes_left), bytes_left, + ICE_MAX_I2C_DATA_SIZE); + + err = ice_aq_read_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR, + cpu_to_le16(ICE_GNSS_UBX_EMPTY_DATA), + bytes_read, &buf[i], NULL); + if (err) + goto exit_buf; + } + + /* Send the data to the tty layer for users to read. This doesn't + * actually push the data through unless tty->low_latency is set. + */ + tty_insert_flip_string(port, buf, i); + tty_flip_buffer_push(port); + +exit_buf: + free_page((unsigned long)buf); + kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, + ICE_GNSS_TIMER_DELAY_TIME); +exit: + if (err) + dev_dbg(ice_pf_to_dev(pf), "GNSS failed to read err=%d\n", err); +} + +/** + * ice_gnss_struct_init - Initialize GNSS structure for the TTY + * @pf: Board private structure + * @index: TTY device index + */ +static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf, int index) +{ + struct device *dev = ice_pf_to_dev(pf); + struct kthread_worker *kworker; + struct gnss_serial *gnss; + + gnss = kzalloc(sizeof(*gnss), GFP_KERNEL); + if (!gnss) + return NULL; + + mutex_init(&gnss->gnss_mutex); + gnss->open_count = 0; + gnss->back = pf; + pf->gnss_serial = gnss; + + kthread_init_delayed_work(&gnss->read_work, ice_gnss_read); + INIT_LIST_HEAD(&gnss->queue); + kthread_init_work(&gnss->write_work, ice_gnss_write_pending); + /* Allocate a kworker for handling work required for the GNSS TTY + * writes. + */ + kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev)); + if (IS_ERR(kworker)) { + kfree(gnss); + return NULL; + } + + gnss->kworker = kworker; + + return gnss; +} + +/** + * ice_gnss_tty_open - Initialize GNSS structures on TTY device open + * @tty: pointer to the tty_struct + * @filp: pointer to the file + * + * This routine is mandatory. If this routine is not filled in, the attempted + * open will fail with ENODEV. + */ +static int ice_gnss_tty_open(struct tty_struct *tty, struct file *filp) +{ + struct gnss_serial *gnss; + struct ice_pf *pf; + + pf = (struct ice_pf *)tty->driver->driver_state; + if (!pf) + return -EFAULT; + + /* Clear the pointer in case something fails */ + tty->driver_data = NULL; + /* Get the serial object associated with this tty pointer */ + gnss = pf->gnss_serial; + + if (!gnss) { + /* Initialize GNSS struct on the first device open */ + gnss = ice_gnss_struct_init(pf, tty->index); + if (!gnss) + return -ENOMEM; + } + + mutex_lock(&gnss->gnss_mutex); + + /* Save our structure within the tty structure */ + tty->driver_data = gnss; + gnss->tty = tty; + gnss->open_count++; + kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, 0); + + mutex_unlock(&gnss->gnss_mutex); + + return 0; +} + +/** + * ice_gnss_tty_close - Cleanup GNSS structures on tty device close + * @tty: pointer to the tty_struct + * @filp: pointer to the file + */ +static void ice_gnss_tty_close(struct tty_struct *tty, struct file *filp) +{ + struct gnss_serial *gnss = tty->driver_data; + struct ice_pf *pf; + + if (!gnss) + return; + + pf = (struct ice_pf *)tty->driver->driver_state; + if (!pf) + return; + + mutex_lock(&gnss->gnss_mutex); + + if (!gnss->open_count) { + /* Port was never opened */ + dev_err(ice_pf_to_dev(pf), "GNSS port not opened\n"); + goto exit; + } + + gnss->open_count--; + if (gnss->open_count <= 0) { + /* Port is in shutdown state */ + kthread_cancel_delayed_work_sync(&gnss->read_work); + } +exit: + mutex_unlock(&gnss->gnss_mutex); +} + +/** + * ice_gnss_tty_write - Write GNSS data + * @tty: pointer to the tty_struct + * @buf: pointer to the user data + * @count: the number of characters that was able to be sent to the hardware (or + * queued to be sent at a later time) + * + * The write function call is called by the user when there is data to be sent + * to the hardware. First the tty core receives the call, and then it passes the + * data on to the tty driver’s write function. The tty core also tells the tty + * driver the size of the data being sent. + * If any errors happen during the write call, a negative error value should be + * returned instead of the number of characters that were written. + */ +static int +ice_gnss_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) +{ + struct gnss_write_buf *write_buf; + struct gnss_serial *gnss; + unsigned char *cmd_buf; + struct ice_pf *pf; + int err = count; + + /* We cannot write a single byte using our I2C implementation. */ + if (count <= 1 || count > ICE_GNSS_TTY_WRITE_BUF) + return -EINVAL; + + gnss = tty->driver_data; + if (!gnss) + return -EFAULT; + + pf = (struct ice_pf *)tty->driver->driver_state; + if (!pf) + return -EFAULT; + + mutex_lock(&gnss->gnss_mutex); + + if (!gnss->open_count) { + err = -EINVAL; + goto exit; + } + + cmd_buf = kcalloc(count, sizeof(*buf), GFP_KERNEL); + if (!cmd_buf) { + err = -ENOMEM; + goto exit; + } + + memcpy(cmd_buf, buf, count); + + /* Send the data out to a hardware port */ + write_buf = kzalloc(sizeof(*write_buf), GFP_KERNEL); + if (!write_buf) { + err = -ENOMEM; + goto exit; + } + + write_buf->buf = cmd_buf; + write_buf->size = count; + INIT_LIST_HEAD(&write_buf->queue); + list_add_tail(&write_buf->queue, &gnss->queue); + kthread_queue_work(gnss->kworker, &gnss->write_work); +exit: + mutex_unlock(&gnss->gnss_mutex); + return err; +} + +/** + * ice_gnss_tty_write_room - Returns the numbers of characters to be written. + * @tty: pointer to the tty_struct + * + * This routine returns the numbers of characters the tty driver will accept + * for queuing to be written. This number is subject to change as output buffers + * get emptied, or if the output flow control is acted. + */ +#ifdef HAVE_TTY_WRITE_ROOM_UINT +static unsigned int ice_gnss_tty_write_room(struct tty_struct *tty) +#else +static int ice_gnss_tty_write_room(struct tty_struct *tty) +#endif /* !HAVE_TTY_WRITE_ROOM_UINT */ +{ + struct gnss_serial *gnss = tty->driver_data; + + if (!gnss) +#ifndef HAVE_TTY_WRITE_ROOM_UINT + return 0; +#else + return -EFAULT; +#endif /* !HAVE_TTY_WRITE_ROOM_UINT */ + + mutex_lock(&gnss->gnss_mutex); + + if (!gnss->open_count) { + mutex_unlock(&gnss->gnss_mutex); +#ifndef HAVE_TTY_WRITE_ROOM_UINT + return 0; +#else + return -EFAULT; +#endif /* !HAVE_TTY_WRITE_ROOM_UINT */ + } + + mutex_unlock(&gnss->gnss_mutex); + return ICE_GNSS_TTY_WRITE_BUF; +} + +/** + * ice_gnss_tty_set_termios - mock for set_termios tty operations + * @tty: pointer to the tty_struct + * @new_termios: pointer to the new termios parameters + */ +static void +ice_gnss_tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios) +{ + /** + * Some 3rd party tools (ex. ubxtool) want to change the TTY parameters. + * In our virtual interface (I2C communication over FW AQ) we don't have + * to change anything, but we need to implement it to unblock tools. + */ +} + +static const struct tty_operations tty_gps_ops = { + .open = ice_gnss_tty_open, + .close = ice_gnss_tty_close, + .write = ice_gnss_tty_write, + .write_room = ice_gnss_tty_write_room, + .set_termios = ice_gnss_tty_set_termios, +}; + +/** + * ice_gnss_create_tty_driver - Create a TTY driver for GNSS + * @pf: Board private structure + */ +static struct tty_driver *ice_gnss_create_tty_driver(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + const int ICE_TTYDRV_NAME_MAX = 12; + struct tty_driver *tty_driver; + char *ttydrv_name; + int err; + + tty_driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW); + if (IS_ERR(tty_driver)) { + dev_err(dev, "Failed to allocate memory for GNSS TTY\n"); + return NULL; + } + + ttydrv_name = kzalloc(ICE_TTYDRV_NAME_MAX, GFP_KERNEL); + if (!ttydrv_name) { + tty_driver_kref_put(tty_driver); + return NULL; + } + + snprintf(ttydrv_name, ICE_TTYDRV_NAME_MAX, "ttyGNSS_%02x%02x", + (u8)pf->pdev->bus->number, (u8)PCI_SLOT(pf->pdev->devfn)); + + /* Initialize the tty driver*/ + tty_driver->owner = THIS_MODULE; + tty_driver->driver_name = dev_driver_string(dev); + tty_driver->name = (const char *)ttydrv_name; + tty_driver->type = TTY_DRIVER_TYPE_SERIAL; + tty_driver->subtype = SERIAL_TYPE_NORMAL; + tty_driver->init_termios = tty_std_termios; + tty_driver->init_termios.c_iflag &= ~INLCR; + tty_driver->init_termios.c_iflag |= IGNCR; + tty_driver->init_termios.c_oflag &= ~OPOST; + tty_driver->init_termios.c_lflag &= ~ICANON; + tty_driver->init_termios.c_cflag &= ~(CSIZE | CBAUD | CBAUDEX); + /* baud rate 9600 */ + tty_termios_encode_baud_rate(&tty_driver->init_termios, 9600, 9600); + tty_driver->driver_state = pf; + tty_set_operations(tty_driver, &tty_gps_ops); + + pf->gnss_tty_port = + kzalloc(sizeof(*pf->gnss_tty_port), GFP_KERNEL); + pf->gnss_serial = NULL; + + tty_port_init(pf->gnss_tty_port); + tty_port_link_device(pf->gnss_tty_port, tty_driver, 0); + + err = tty_register_driver(tty_driver); + if (err) { + dev_err(dev, "Failed to register TTY driver err=%d\n", err); + + tty_port_destroy(pf->gnss_tty_port); + kfree(pf->gnss_tty_port); + kfree(ttydrv_name); + tty_driver_kref_put(tty_driver); + + return NULL; + } + + dev_info(dev, "%s registered\n", ttydrv_name); + + return tty_driver; +} + +/** + * ice_gnss_init - Initialize GNSS TTY support + * @pf: Board private structure + */ +void ice_gnss_init(struct ice_pf *pf) +{ + struct tty_driver *tty_driver; + + tty_driver = ice_gnss_create_tty_driver(pf); + if (!tty_driver) + return; + + pf->ice_gnss_tty_driver = tty_driver; + + set_bit(ICE_FLAG_GNSS, pf->flags); + dev_info(ice_pf_to_dev(pf), "GNSS TTY init successful\n"); +} + +/** + * ice_gnss_exit - Disable GNSS TTY support + * @pf: Board private structure + */ +void ice_gnss_exit(struct ice_pf *pf) +{ + if (!test_bit(ICE_FLAG_GNSS, pf->flags) || !pf->ice_gnss_tty_driver) + return; + + if (pf->gnss_tty_port) { + tty_port_destroy(pf->gnss_tty_port); + kfree(pf->gnss_tty_port); + } + + if (pf->gnss_serial) { + struct gnss_serial *gnss = pf->gnss_serial; + + kthread_cancel_work_sync(&gnss->write_work); + kthread_cancel_delayed_work_sync(&gnss->read_work); + kfree(gnss); + pf->gnss_serial = NULL; + } + + tty_unregister_driver(pf->ice_gnss_tty_driver); + kfree(pf->ice_gnss_tty_driver->name); + tty_driver_kref_put(pf->ice_gnss_tty_driver); + pf->ice_gnss_tty_driver = NULL; +} + +/** + * ice_gnss_is_gps_present - Check if GPS HW is present + * @hw: pointer to HW struct + */ +bool ice_gnss_is_gps_present(struct ice_hw *hw) +{ +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) + if (!hw->func_caps.ts_func_info.src_tmr_owned) + return false; + + if (ice_is_pca9575_present(hw)) { + int status; + u8 data; + + status = ice_read_pca9575_reg_e810t(hw, ICE_PCA9575_P0_IN, + &data); + if (status || !!(data & ICE_E810T_P0_GNSS_PRSNT_N)) + return false; + } else { + return false; + } + + return true; +#else /* CONFIG_PTP_1588_CLOCK */ + return false; +#endif /* CONFIG_PTP_1588_CLOCK */ +} diff --git a/drivers/thirdparty/ice/ice_gnss.h b/drivers/thirdparty/ice/ice_gnss.h new file mode 100644 index 000000000000..908861654c98 --- /dev/null +++ b/drivers/thirdparty/ice/ice_gnss.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_GNSS_H_ +#define _ICE_GNSS_H_ + +#include +#include + +#define ICE_E810T_GNSS_I2C_BUS 0x2 +#define ICE_GNSS_TIMER_DELAY_TIME (HZ / 10) /* 0.1 second per message */ +#define ICE_GNSS_TTY_WRITE_BUF 250 +#define ICE_MAX_I2C_DATA_SIZE (ICE_AQC_I2C_DATA_SIZE_M >> \ + ICE_AQC_I2C_DATA_SIZE_S) +#define ICE_MAX_I2C_WRITE_BYTES 4 + +/* ublox specific deifinitions */ +#define ICE_GNSS_UBX_I2C_BUS_ADDR 0x42 +/* Data length register is big endian */ +#define ICE_GNSS_UBX_DATA_LEN_H 0xFD +#define ICE_GNSS_UBX_DATA_LEN_WIDTH 2 +#define ICE_GNSS_UBX_EMPTY_DATA 0xFF +/* For ublox writes are performed without address so the first byte to write is + * passed as I2C addr parameter. + */ +#define ICE_GNSS_UBX_WRITE_BYTES (ICE_MAX_I2C_WRITE_BYTES + 1) +#define ICE_MAX_UBX_READ_TRIES 255 +#define ICE_MAX_UBX_ACK_READ_TRIES 4095 + +struct gnss_write_buf { + struct list_head queue; + unsigned int size; + unsigned char *buf; +}; + +/** + * struct gnss_serial - data used to initialize GNSS TTY port + * @back: back pointer to PF + * @tty: pointer to the tty for this device + * @open_count: number of times this port has been opened + * @gnss_mutex: gnss_mutex used to protect GNSS serial operations + * @kworker: kwork thread for handling periodic work + * @read_work: read_work function for handling GNSS reads + * @write_work: write_work function for handling GNSS writes + * @queue: write buffers queue + * @buf: write buffer for a single u8, negative if empty + */ +struct gnss_serial { + struct ice_pf *back; + struct tty_struct *tty; + int open_count; + struct mutex gnss_mutex; /* protects GNSS serial structure */ + struct kthread_worker *kworker; + struct kthread_delayed_work read_work; + struct kthread_work write_work; + struct list_head queue; +}; + +void ice_gnss_init(struct ice_pf *pf); +void ice_gnss_exit(struct ice_pf *pf); +bool ice_gnss_is_gps_present(struct ice_hw *hw); +#endif /* _ICE_GNSS_H_ */ diff --git a/drivers/thirdparty/ice/ice_hw_autogen.h b/drivers/thirdparty/ice/ice_hw_autogen.h index a42c1af118a8..9ca0fb864706 100644 --- a/drivers/thirdparty/ice/ice_hw_autogen.h +++ b/drivers/thirdparty/ice/ice_hw_autogen.h @@ -1,12 +1,20 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2018-2021, Intel Corporation. */ -/* Machine-generated file; do not edit */ +/* Machine generated file. Do not edit. */ + #ifndef _ICE_HW_AUTOGEN_H_ #define _ICE_HW_AUTOGEN_H_ - - +#define GL_HIDA(_i) (0x00082000 + ((_i) * 4)) +#define GL_HIBA(_i) (0x00081000 + ((_i) * 4)) +#define GL_HICR 0x00082040 +#define GL_HICR_EN 0x00082044 +#define GLGEN_CSR_DEBUG_C 0x00075750 +#define GLNVM_GENS 0x000B6100 +#define GLNVM_FLA 0x000B6108 +#define GL_HIDA_MAX_INDEX 15 +#define GL_HIBA_MAX_INDEX 1023 #define GL_RDPU_CNTRL 0x00052054 /* Reset Source: CORER */ #define GL_RDPU_CNTRL_RX_PAD_EN_S 0 #define GL_RDPU_CNTRL_RX_PAD_EN_M BIT(0) @@ -449,8 +457,8 @@ #define PF0INT_OICR_CPM_PAGE_RSV3_M BIT(23) #define PF0INT_OICR_CPM_PAGE_STORM_DETECT_S 24 #define PF0INT_OICR_CPM_PAGE_STORM_DETECT_M BIT(24) -#define PF0INT_OICR_CPM_PAGE_LINK_STAT_CHANGE_S 25 -#define PF0INT_OICR_CPM_PAGE_LINK_STAT_CHANGE_M BIT(25) +#define PF0INT_OICR_CPM_PAGE_LINK_STAT_CHANGE_S 25 +#define PF0INT_OICR_CPM_PAGE_LINK_STAT_CHANGE_M BIT(25) #define PF0INT_OICR_CPM_PAGE_HMC_ERR_S 26 #define PF0INT_OICR_CPM_PAGE_HMC_ERR_M BIT(26) #define PF0INT_OICR_CPM_PAGE_PE_PUSH_S 27 @@ -513,8 +521,8 @@ #define PF0INT_OICR_HLP_PAGE_RSV3_M BIT(23) #define PF0INT_OICR_HLP_PAGE_STORM_DETECT_S 24 #define PF0INT_OICR_HLP_PAGE_STORM_DETECT_M BIT(24) -#define PF0INT_OICR_HLP_PAGE_LINK_STAT_CHANGE_S 25 -#define PF0INT_OICR_HLP_PAGE_LINK_STAT_CHANGE_M BIT(25) +#define PF0INT_OICR_HLP_PAGE_LINK_STAT_CHANGE_S 25 +#define PF0INT_OICR_HLP_PAGE_LINK_STAT_CHANGE_M BIT(25) #define PF0INT_OICR_HLP_PAGE_HMC_ERR_S 26 #define PF0INT_OICR_HLP_PAGE_HMC_ERR_M BIT(26) #define PF0INT_OICR_HLP_PAGE_PE_PUSH_S 27 @@ -562,8 +570,8 @@ #define PF0INT_OICR_PSM_PAGE_RSV3_M BIT(23) #define PF0INT_OICR_PSM_PAGE_STORM_DETECT_S 24 #define PF0INT_OICR_PSM_PAGE_STORM_DETECT_M BIT(24) -#define PF0INT_OICR_PSM_PAGE_LINK_STAT_CHANGE_S 25 -#define PF0INT_OICR_PSM_PAGE_LINK_STAT_CHANGE_M BIT(25) +#define PF0INT_OICR_PSM_PAGE_LINK_STAT_CHANGE_S 25 +#define PF0INT_OICR_PSM_PAGE_LINK_STAT_CHANGE_M BIT(25) #define PF0INT_OICR_PSM_PAGE_HMC_ERR_S 26 #define PF0INT_OICR_PSM_PAGE_HMC_ERR_M BIT(26) #define PF0INT_OICR_PSM_PAGE_PE_PUSH_S 27 @@ -703,8 +711,8 @@ #define GL_ACL_PROFILE_BWSB_SEL_WSB_SRC_OFF_M ICE_M(0x1F, 8) #define GL_ACL_PROFILE_DWSB_SEL(_i) (0x00391088 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define GL_ACL_PROFILE_DWSB_SEL_MAX_INDEX 15 -#define GL_ACL_PROFILE_DWSB_SEL_DWORD_SEL_OFF_S 0 -#define GL_ACL_PROFILE_DWSB_SEL_DWORD_SEL_OFF_M ICE_M(0xF, 0) +#define GL_ACL_PROFILE_DWSB_SEL_DWORD_SEL_OFF_S 0 +#define GL_ACL_PROFILE_DWSB_SEL_DWORD_SEL_OFF_M ICE_M(0xF, 0) #define GL_ACL_PROFILE_PF_CFG(_i) (0x003910C8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GL_ACL_PROFILE_PF_CFG_MAX_INDEX 7 #define GL_ACL_PROFILE_PF_CFG_SCEN_SEL_S 0 @@ -862,8 +870,8 @@ #define GLLAN_TCLAN_CACHE_CTL_FETCH_CL_ALIGN_M BIT(6) #define GLLAN_TCLAN_CACHE_CTL_MIN_ALLOC_THRESH_S 7 #define GLLAN_TCLAN_CACHE_CTL_MIN_ALLOC_THRESH_M ICE_M(0x7F, 7) -#define GLLAN_TCLAN_CACHE_CTL_CACHE_ENTRY_CNT_S 14 -#define GLLAN_TCLAN_CACHE_CTL_CACHE_ENTRY_CNT_M ICE_M(0xFF, 14) +#define GLLAN_TCLAN_CACHE_CTL_CACHE_ENTRY_CNT_S 14 +#define GLLAN_TCLAN_CACHE_CTL_CACHE_ENTRY_CNT_M ICE_M(0xFF, 14) #define GLLAN_TCLAN_CACHE_CTL_CACHE_DESC_LIM_S 22 #define GLLAN_TCLAN_CACHE_CTL_CACHE_DESC_LIM_M ICE_M(0x3FF, 22) #define GLTCLAN_CQ_CNTX0(_CQ) (0x000F0800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ @@ -2206,8 +2214,8 @@ #define PRTDCB_TX_DSCP2UP_CTL 0x00040980 /* Reset Source: CORER */ #define PRTDCB_TX_DSCP2UP_CTL_DSCP2UP_ENA_S 0 #define PRTDCB_TX_DSCP2UP_CTL_DSCP2UP_ENA_M BIT(0) -#define PRTDCB_TX_DSCP2UP_CTL_DSCP_DEFAULT_UP_S 1 -#define PRTDCB_TX_DSCP2UP_CTL_DSCP_DEFAULT_UP_M ICE_M(0x7, 1) +#define PRTDCB_TX_DSCP2UP_CTL_DSCP_DEFAULT_UP_S 1 +#define PRTDCB_TX_DSCP2UP_CTL_DSCP_DEFAULT_UP_M ICE_M(0x7, 1) #define PRTDCB_TX_DSCP2UP_IPV4_LUT(_i) (0x000409A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: CORER */ #define PRTDCB_TX_DSCP2UP_IPV4_LUT_MAX_INDEX 7 #define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_0_S 0 @@ -2355,8 +2363,8 @@ #define TPB_PRTTCB_LL_DWRR_REG_CREDITS_CREDITS_S 0 #define TPB_PRTTCB_LL_DWRR_REG_CREDITS_CREDITS_M ICE_M(0x3FFFF, 0) #define TPB_PRTTCB_LL_DWRR_WB_CREDITS 0x00099320 /* Reset Source: CORER */ -#define TPB_PRTTCB_LL_DWRR_WB_CREDITS_CREDITS_S 0 -#define TPB_PRTTCB_LL_DWRR_WB_CREDITS_CREDITS_M ICE_M(0x3FFFF, 0) +#define TPB_PRTTCB_LL_DWRR_WB_CREDITS_CREDITS_S 0 +#define TPB_PRTTCB_LL_DWRR_WB_CREDITS_CREDITS_M ICE_M(0x3FFFF, 0) #define TPB_WB_RL_TC_CFG(_i) (0x00099360 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define TPB_WB_RL_TC_CFG_MAX_INDEX 31 #define TPB_WB_RL_TC_CFG_TOKENS_S 0 @@ -2421,8 +2429,8 @@ #define GL_ACLEXT_FORCE_L1CDID_MAX_INDEX 2 #define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_S 0 #define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_M ICE_M(0xF, 0) -#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31 -#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31) +#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31 +#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31) #define GL_ACLEXT_FORCE_PID(_i) (0x00210000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_ACLEXT_FORCE_PID_MAX_INDEX 2 #define GL_ACLEXT_FORCE_PID_STATIC_PID_S 0 @@ -2615,8 +2623,8 @@ #define GL_PREEXT_FORCE_L1CDID_MAX_INDEX 2 #define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_S 0 #define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_M ICE_M(0xF, 0) -#define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31 -#define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31) +#define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31 +#define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31) #define GL_PREEXT_FORCE_PID(_i) (0x0020F000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_FORCE_PID_MAX_INDEX 2 #define GL_PREEXT_FORCE_PID_STATIC_PID_S 0 @@ -2817,8 +2825,8 @@ #define GL_PSTEXT_FORCE_L1CDID_MAX_INDEX 2 #define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_S 0 #define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_M ICE_M(0xF, 0) -#define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31 -#define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31) +#define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31 +#define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31) #define GL_PSTEXT_FORCE_PID(_i) (0x0020E000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_FORCE_PID_MAX_INDEX 2 #define GL_PSTEXT_FORCE_PID_STATIC_PID_S 0 @@ -2985,10 +2993,10 @@ #define GLFLXP_RX_CMD_LX_PROT_IDX_L4_OFFSET_INDEX_M ICE_M(0x7, 4) #define GLFLXP_RX_CMD_LX_PROT_IDX_PAYLOAD_OFFSET_INDEX_S 8 #define GLFLXP_RX_CMD_LX_PROT_IDX_PAYLOAD_OFFSET_INDEX_M ICE_M(0x7, 8) -#define GLFLXP_RX_CMD_LX_PROT_IDX_L3_PROTOCOL_S 12 -#define GLFLXP_RX_CMD_LX_PROT_IDX_L3_PROTOCOL_M ICE_M(0x3, 12) -#define GLFLXP_RX_CMD_LX_PROT_IDX_L4_PROTOCOL_S 14 -#define GLFLXP_RX_CMD_LX_PROT_IDX_L4_PROTOCOL_M ICE_M(0x3, 14) +#define GLFLXP_RX_CMD_LX_PROT_IDX_L3_PROTOCOL_S 12 +#define GLFLXP_RX_CMD_LX_PROT_IDX_L3_PROTOCOL_M ICE_M(0x3, 12) +#define GLFLXP_RX_CMD_LX_PROT_IDX_L4_PROTOCOL_S 14 +#define GLFLXP_RX_CMD_LX_PROT_IDX_L4_PROTOCOL_M ICE_M(0x3, 14) #define GLFLXP_RX_CMD_PROTIDS(_i, _j) (0x0045A000 + ((_i) * 4 + (_j) * 1024)) /* _i=0...255, _j=0...5 */ /* Reset Source: CORER */ #define GLFLXP_RX_CMD_PROTIDS_MAX_INDEX 255 #define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_S 0 @@ -3067,8 +3075,8 @@ #define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_M ICE_M(0xFF, 0) #define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_S 8 #define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_M ICE_M(0x1F, 8) -#define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_1_S 16 -#define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_1_M ICE_M(0xFF, 16) +#define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_1_S 16 +#define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_1_M ICE_M(0xFF, 16) #define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_1_S 24 #define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_1_M ICE_M(0x1F, 24) #define QRXFLXP_CNTXT(_QRX) (0x00480000 + ((_QRX) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */ @@ -3281,18 +3289,18 @@ #define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MDSTART_M ICE_M(0xF, 5) #define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MD_LEN_S 9 #define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MD_LEN_M ICE_M(0x1F, 9) -#define GLGEN_ANA_PROFIL_CTRL_NUM_CTRL_DOMAIN_S 14 -#define GLGEN_ANA_PROFIL_CTRL_NUM_CTRL_DOMAIN_M ICE_M(0x3, 14) +#define GLGEN_ANA_PROFIL_CTRL_NUM_CTRL_DOMAIN_S 14 +#define GLGEN_ANA_PROFIL_CTRL_NUM_CTRL_DOMAIN_M ICE_M(0x3, 14) #define GLGEN_ANA_PROFIL_CTRL_DEF_PROF_ID_S 16 #define GLGEN_ANA_PROFIL_CTRL_DEF_PROF_ID_M ICE_M(0xF, 16) -#define GLGEN_ANA_PROFIL_CTRL_SEL_DEF_PROF_ID_S 20 -#define GLGEN_ANA_PROFIL_CTRL_SEL_DEF_PROF_ID_M BIT(20) +#define GLGEN_ANA_PROFIL_CTRL_SEL_DEF_PROF_ID_S 20 +#define GLGEN_ANA_PROFIL_CTRL_SEL_DEF_PROF_ID_M BIT(20) #define GLGEN_ANA_TX_ABORT_PTYPE 0x0020D21C /* Reset Source: CORER */ #define GLGEN_ANA_TX_ABORT_PTYPE_ABORT_S 0 #define GLGEN_ANA_TX_ABORT_PTYPE_ABORT_M ICE_M(0x3FF, 0) #define GLGEN_ANA_TX_ALU_ACCSS_OUT_OF_PKT 0x0020D208 /* Reset Source: CORER */ -#define GLGEN_ANA_TX_ALU_ACCSS_OUT_OF_PKT_NPC_S 0 -#define GLGEN_ANA_TX_ALU_ACCSS_OUT_OF_PKT_NPC_M ICE_M(0xFF, 0) +#define GLGEN_ANA_TX_ALU_ACCSS_OUT_OF_PKT_NPC_S 0 +#define GLGEN_ANA_TX_ALU_ACCSS_OUT_OF_PKT_NPC_M ICE_M(0xFF, 0) #define GLGEN_ANA_TX_CFG_CTRL 0x0020D104 /* Reset Source: CORER */ #define GLGEN_ANA_TX_CFG_CTRL_LINE_IDX_S 0 #define GLGEN_ANA_TX_CFG_CTRL_LINE_IDX_M ICE_M(0x3FFFF, 0) @@ -3318,10 +3326,10 @@ #define GLGEN_ANA_TX_CFG_RDDATA_RD_DATA_S 0 #define GLGEN_ANA_TX_CFG_RDDATA_RD_DATA_M ICE_M(0xFFFFFFFF, 0) #define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT 0x0020D15C /* Reset Source: CORER */ -#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_HIT_S 0 -#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_HIT_M BIT(0) -#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_RSV_S 1 -#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_RSV_M ICE_M(0x7, 1) +#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_HIT_S 0 +#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_HIT_M BIT(0) +#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_RSV_S 1 +#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_RSV_M ICE_M(0x7, 1) #define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_ADDR_S 4 #define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_ADDR_M ICE_M(0x1FF, 4) #define GLGEN_ANA_TX_CFG_WRDATA 0x0020D108 /* Reset Source: CORER */ @@ -3640,8 +3648,8 @@ #define GLHMC_FWSDDATAHIGH_PMSDDATAHIGH_S 0 #define GLHMC_FWSDDATAHIGH_PMSDDATAHIGH_M ICE_M(0xFFFFFFFF, 0) #define GLHMC_FWSDDATAHIGH_FPMAT 0x00102078 /* Reset Source: CORER */ -#define GLHMC_FWSDDATAHIGH_FPMAT_PMSDDATAHIGH_S 0 -#define GLHMC_FWSDDATAHIGH_FPMAT_PMSDDATAHIGH_M ICE_M(0xFFFFFFFF, 0) +#define GLHMC_FWSDDATAHIGH_FPMAT_PMSDDATAHIGH_S 0 +#define GLHMC_FWSDDATAHIGH_FPMAT_PMSDDATAHIGH_M ICE_M(0xFFFFFFFF, 0) #define GLHMC_FWSDDATALOW 0x00522074 /* Reset Source: CORER */ #define GLHMC_FWSDDATALOW_PMSDVALID_S 0 #define GLHMC_FWSDDATALOW_PMSDVALID_M BIT(0) @@ -4039,8 +4047,8 @@ #define GLHMC_VFPEMRCNT_FPMPEMRSZ_M ICE_M(0x1FFFFFFF, 0) #define GLHMC_VFPEOOISCBASE(_i) (0x0052E600 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEOOISCBASE_MAX_INDEX 31 -#define GLHMC_VFPEOOISCBASE_GLHMC_PEOOISCBASE_S 0 -#define GLHMC_VFPEOOISCBASE_GLHMC_PEOOISCBASE_M ICE_M(0xFFFFFFFF, 0) +#define GLHMC_VFPEOOISCBASE_GLHMC_PEOOISCBASE_S 0 +#define GLHMC_VFPEOOISCBASE_GLHMC_PEOOISCBASE_M ICE_M(0xFFFFFFFF, 0) #define GLHMC_VFPEOOISCCNT(_i) (0x0052E700 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEOOISCCNT_MAX_INDEX 31 #define GLHMC_VFPEOOISCCNT_GLHMC_PEOOISCCNT_S 0 @@ -4087,8 +4095,8 @@ #define GLHMC_VFPERRFCNT_GLHMC_PERRFCNT_M ICE_M(0xFFFFFFFF, 0) #define GLHMC_VFPERRFFLBASE(_i) (0x0052EA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPERRFFLBASE_MAX_INDEX 31 -#define GLHMC_VFPERRFFLBASE_GLHMC_PERRFFLBASE_S 0 -#define GLHMC_VFPERRFFLBASE_GLHMC_PERRFFLBASE_M ICE_M(0xFFFFFFFF, 0) +#define GLHMC_VFPERRFFLBASE_GLHMC_PERRFFLBASE_S 0 +#define GLHMC_VFPERRFFLBASE_GLHMC_PERRFFLBASE_M ICE_M(0xFFFFFFFF, 0) #define GLHMC_VFPETIMERBASE(_i) (0x0052DA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPETIMERBASE_MAX_INDEX 31 #define GLHMC_VFPETIMERBASE_FPMPETIMERBASE_S 0 @@ -4115,8 +4123,8 @@ #define GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_M ICE_M(0xFFFFFFFF, 0) #define GLHMC_VFSDDATAHIGH_FPMAT(_i) (0x00108200 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFSDDATAHIGH_FPMAT_MAX_INDEX 31 -#define GLHMC_VFSDDATAHIGH_FPMAT_PMSDDATAHIGH_S 0 -#define GLHMC_VFSDDATAHIGH_FPMAT_PMSDDATAHIGH_M ICE_M(0xFFFFFFFF, 0) +#define GLHMC_VFSDDATAHIGH_FPMAT_PMSDDATAHIGH_S 0 +#define GLHMC_VFSDDATAHIGH_FPMAT_PMSDDATAHIGH_M ICE_M(0xFFFFFFFF, 0) #define GLHMC_VFSDDATALOW(_i) (0x00528100 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFSDDATALOW_MAX_INDEX 31 #define GLHMC_VFSDDATALOW_PMSDVALID_S 0 @@ -4222,8 +4230,8 @@ #define PFHMC_ERRORINFO_FPMAT_PMF_ISVF_M BIT(7) #define PFHMC_ERRORINFO_FPMAT_HMC_ERROR_TYPE_S 8 #define PFHMC_ERRORINFO_FPMAT_HMC_ERROR_TYPE_M ICE_M(0xF, 8) -#define PFHMC_ERRORINFO_FPMAT_HMC_OBJECT_TYPE_S 16 -#define PFHMC_ERRORINFO_FPMAT_HMC_OBJECT_TYPE_M ICE_M(0x1F, 16) +#define PFHMC_ERRORINFO_FPMAT_HMC_OBJECT_TYPE_S 16 +#define PFHMC_ERRORINFO_FPMAT_HMC_OBJECT_TYPE_M ICE_M(0x1F, 16) #define PFHMC_ERRORINFO_FPMAT_ERROR_DETECTED_S 31 #define PFHMC_ERRORINFO_FPMAT_ERROR_DETECTED_M BIT(31) #define PFHMC_PDINV 0x00520300 /* Reset Source: PFR */ @@ -4310,8 +4318,8 @@ #define GL_MDCK_TDAT_TCLAN_TSO_SUM_BUFFS_LT_SUM_HDRS_M BIT(11) #define GL_MDCK_TDAT_TCLAN_TSO_ZERO_MSS_TLEN_HDRS_S 12 #define GL_MDCK_TDAT_TCLAN_TSO_ZERO_MSS_TLEN_HDRS_M BIT(12) -#define GL_MDCK_TDAT_TCLAN_TSO_CTX_DESC_IPSEC_S 13 -#define GL_MDCK_TDAT_TCLAN_TSO_CTX_DESC_IPSEC_M BIT(13) +#define GL_MDCK_TDAT_TCLAN_TSO_CTX_DESC_IPSEC_S 13 +#define GL_MDCK_TDAT_TCLAN_TSO_CTX_DESC_IPSEC_M BIT(13) #define GL_MDCK_TDAT_TCLAN_SSO_COMS_NOT_WHOLE_PKT_NUM_IN_QUANTA_S 14 #define GL_MDCK_TDAT_TCLAN_SSO_COMS_NOT_WHOLE_PKT_NUM_IN_QUANTA_M BIT(14) #define GL_MDCK_TDAT_TCLAN_COMS_QUANTA_BYTES_EXCEED_PKTLEN_X_64_S 15 @@ -5206,10 +5214,10 @@ #define PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E36C0 /* Reset Source: GLOBR */ #define PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_S 0 #define PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_M BIT(0) -#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3220 /* Reset Source: GLOBR */ +#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3220 /* Reset Source: GLOBR */ #define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_S 0 #define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_M ICE_M(0xFFFFFFFF, 0) -#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3240 /* Reset Source: GLOBR */ +#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3240 /* Reset Source: GLOBR */ #define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_S 0 #define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_M ICE_M(0xFFFF, 0) #define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E3180 /* Reset Source: GLOBR */ @@ -5310,10 +5318,10 @@ #define GL_MDCK_EN_TX_PQM_SSO_PKTCNT_EXCEED_M BIT(17) #define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_ZERO_S 18 #define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_ZERO_M BIT(18) -#define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_EXCEED_S 19 -#define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_EXCEED_M BIT(19) -#define GL_MDCK_EN_TX_PQM_TAIL_GT_RING_LENGTH_S 20 -#define GL_MDCK_EN_TX_PQM_TAIL_GT_RING_LENGTH_M BIT(20) +#define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_EXCEED_S 19 +#define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_EXCEED_M BIT(19) +#define GL_MDCK_EN_TX_PQM_TAIL_GT_RING_LENGTH_S 20 +#define GL_MDCK_EN_TX_PQM_TAIL_GT_RING_LENGTH_M BIT(20) #define GL_MDCK_EN_TX_PQM_RESERVED_DBL_TYPE_S 21 #define GL_MDCK_EN_TX_PQM_RESERVED_DBL_TYPE_M BIT(21) #define GL_MDCK_EN_TX_PQM_ILLEGAL_HEAD_DROP_DBL_S 22 @@ -5332,8 +5340,8 @@ #define GL_MDCK_TX_TDPU 0x00049348 /* Reset Source: CORER */ #define GL_MDCK_TX_TDPU_TTL_ERR_ITR_DIS_S 0 #define GL_MDCK_TX_TDPU_TTL_ERR_ITR_DIS_M BIT(0) -#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_S 1 -#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M BIT(1) +#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_S 1 +#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M BIT(1) #define GL_MDCK_TX_TDPU_PCIE_UR_ITR_DIS_S 2 #define GL_MDCK_TX_TDPU_PCIE_UR_ITR_DIS_M BIT(2) #define GL_MDCK_TX_TDPU_MAL_OFFSET_ITR_DIS_S 3 @@ -5346,8 +5354,8 @@ #define GL_MDCK_TX_TDPU_L2_ACCEPT_FAIL_ITR_DIS_M BIT(6) #define GL_MDCK_TX_TDPU_NIC_DSI_ITR_DIS_S 7 #define GL_MDCK_TX_TDPU_NIC_DSI_ITR_DIS_M BIT(7) -#define GL_MDCK_TX_TDPU_MAL_IPSEC_CMD_ITR_DIS_S 8 -#define GL_MDCK_TX_TDPU_MAL_IPSEC_CMD_ITR_DIS_M BIT(8) +#define GL_MDCK_TX_TDPU_MAL_IPSEC_CMD_ITR_DIS_S 8 +#define GL_MDCK_TX_TDPU_MAL_IPSEC_CMD_ITR_DIS_M BIT(8) #define GL_MDCK_TX_TDPU_DSCP_CHECK_FAIL_ITR_DIS_S 9 #define GL_MDCK_TX_TDPU_DSCP_CHECK_FAIL_ITR_DIS_M BIT(9) #define GL_MDCK_TX_TDPU_NIC_IPSEC_ITR_DIS_S 10 @@ -5429,8 +5437,8 @@ #define VP_MDET_TX_TDPU_VALID_M BIT(0) #define GENERAL_MNG_FW_DBG_CSR(_i) (0x000B6180 + ((_i) * 4)) /* _i=0...9 */ /* Reset Source: POR */ #define GENERAL_MNG_FW_DBG_CSR_MAX_INDEX 9 -#define GENERAL_MNG_FW_DBG_CSR_GENERAL_FW_DBG_S 0 -#define GENERAL_MNG_FW_DBG_CSR_GENERAL_FW_DBG_M ICE_M(0xFFFFFFFF, 0) +#define GENERAL_MNG_FW_DBG_CSR_GENERAL_FW_DBG_S 0 +#define GENERAL_MNG_FW_DBG_CSR_GENERAL_FW_DBG_M ICE_M(0xFFFFFFFF, 0) #define GL_FWRESETCNT 0x00083100 /* Reset Source: POR */ #define GL_FWRESETCNT_FWRESETCNT_S 0 #define GL_FWRESETCNT_FWRESETCNT_M ICE_M(0xFFFFFFFF, 0) @@ -5842,8 +5850,8 @@ #define GL_XLR_MARKER_TRIG_RCU_PRS 0x002001C0 /* Reset Source: CORER */ #define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_NUM_S 0 #define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_NUM_M ICE_M(0x3FF, 0) -#define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_TYPE_S 10 -#define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_TYPE_M ICE_M(0x3, 10) +#define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_TYPE_S 10 +#define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_TYPE_M ICE_M(0x3, 10) #define GL_XLR_MARKER_TRIG_RCU_PRS_PF_NUM_S 12 #define GL_XLR_MARKER_TRIG_RCU_PRS_PF_NUM_M ICE_M(0x7, 12) #define GL_XLR_MARKER_TRIG_RCU_PRS_PORT_NUM_S 16 @@ -6722,11 +6730,11 @@ #define GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_S 0 #define GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_M ICE_M(0xFFFFFFFF, 0) #define GLPES_TCPRXFOURHOLEHI 0x0055E03C /* Reset Source: CORER */ -#define GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_S 0 -#define GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_M ICE_M(0xFFFFFF, 0) +#define GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_S 0 +#define GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_M ICE_M(0xFFFFFF, 0) #define GLPES_TCPRXFOURHOLELO 0x0055E038 /* Reset Source: CORER */ -#define GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_S 0 -#define GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_M ICE_M(0xFFFFFFFF, 0) +#define GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_S 0 +#define GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_M ICE_M(0xFFFFFFFF, 0) #define GLPES_TCPRXONEHOLEHI 0x0055E024 /* Reset Source: CORER */ #define GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_S 0 #define GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_M ICE_M(0xFFFFFF, 0) @@ -8207,7 +8215,7 @@ #define TPB_PRTTPB_STAT_PKT_SENT_PKTCNT_S 0 #define TPB_PRTTPB_STAT_PKT_SENT_PKTCNT_M ICE_M(0xFFFFFFFF, 0) #define TPB_PRTTPB_STAT_TC_BYTES_SENT(_i) (0x00099094 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ -#define TPB_PRTTPB_STAT_TC_BYTES_SENT_MAX_INDEX 63 +#define TPB_PRTTPB_STAT_TC_BYTES_SENT_MAX_INDEX 63 #define TPB_PRTTPB_STAT_TC_BYTES_SENT_TCCNT_S 0 #define TPB_PRTTPB_STAT_TC_BYTES_SENT_TCCNT_M ICE_M(0xFFFFFFFF, 0) #define EMP_SWT_PRUNIND 0x00204020 /* Reset Source: CORER */ @@ -9449,5 +9457,5 @@ #define VFPE_WQEALLOC1_PEQPID_M ICE_M(0x3FFFF, 0) #define VFPE_WQEALLOC1_WQE_DESC_INDEX_S 20 #define VFPE_WQEALLOC1_WQE_DESC_INDEX_M ICE_M(0xFFF, 20) +#endif /* !_ICE_HW_AUTOGEN_H_ */ -#endif diff --git a/drivers/thirdparty/ice/ice_idc.c b/drivers/thirdparty/ice/ice_idc.c index 512bca2f98d0..1a4ce41d8ac5 100644 --- a/drivers/thirdparty/ice/ice_idc.c +++ b/drivers/thirdparty/ice/ice_idc.c @@ -7,362 +7,62 @@ #include "ice_fltr.h" #include "ice_dcb_lib.h" #include "ice_ptp.h" +#include "ice_ieps.h" -DEFINE_IDA(ice_peer_index_ida); +static DEFINE_IDA(ice_cdev_info_ida); - - -static struct mfd_cell ice_mfd_cells[] = ASSIGN_PEER_INFO; +static struct cdev_info_id ice_cdev_ids[] = ASSIGN_IIDC_INFO; /** - * ice_is_vsi_state_nominal - * @vsi: pointer to the VSI struct + * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct + * @cdev_info: pointer to iidc_core_dev_info struct * - * returns true if VSI state is nominal, false otherwise + * This function has to be called with a device_lock on the + * cdev_info->adev.dev to avoid race conditions for auxiliary + * driver unload, and the mutex pf->adev_mutex locked to avoid + * plug/unplug race conditions.. */ -static bool ice_is_vsi_state_nominal(struct ice_vsi *vsi) +struct iidc_auxiliary_drv +*ice_get_auxiliary_drv(struct iidc_core_dev_info *cdev_info) { - if (!vsi) - return false; - - if (test_bit(ICE_VSI_DOWN, vsi->state) || - test_bit(ICE_VSI_NEEDS_RESTART, vsi->state)) - return false; - - return true; -} - - -/** - * ice_peer_state_change - manage state machine for peer - * @peer_obj: pointer to peer's configuration - * @new_state: the state requested to transition into - * @locked: boolean to determine if call made with mutex held - * - * This function handles all state transitions for peer objects. - * - * The state machine is as follows: - * - * +<-----------------------+<-----------------------------+ - * |<-------+<----------+ + - * \/ + + + - * INIT --------------> PROBED --> OPENING CLOSED --> REMOVED - * + + - * OPENED --> CLOSING - * + + - * PREP_RST + - * + + - * PREPPED + - * +---------->+ - * - * NOTE: there is an error condition that can take a peer from OPENING - * to REMOVED. - */ -static void -ice_peer_state_change(struct ice_peer_obj_int *peer_obj, long new_state, - bool locked) -{ - struct device *dev; - - dev = bus_find_device_by_name(&platform_bus_type, NULL, - peer_obj->plat_name); - - if (!locked) - mutex_lock(&peer_obj->peer_obj_state_mutex); - - switch (new_state) { - case ICE_PEER_OBJ_STATE_INIT: - if (test_and_clear_bit(ICE_PEER_OBJ_STATE_REMOVED, - peer_obj->state)) { - set_bit(ICE_PEER_OBJ_STATE_INIT, peer_obj->state); - dev_dbg(dev, "state transition from _REMOVED to _INIT\n"); - } else { - set_bit(ICE_PEER_OBJ_STATE_INIT, peer_obj->state); - if (dev) - dev_dbg(dev, "state set to _INIT\n"); - } - break; - case ICE_PEER_OBJ_STATE_PROBED: - if (test_and_clear_bit(ICE_PEER_OBJ_STATE_INIT, - peer_obj->state)) { - set_bit(ICE_PEER_OBJ_STATE_PROBED, peer_obj->state); - dev_dbg(dev, "state transition from _INIT to _PROBED\n"); - } else if (test_and_clear_bit(ICE_PEER_OBJ_STATE_REMOVED, - peer_obj->state)) { - set_bit(ICE_PEER_OBJ_STATE_PROBED, peer_obj->state); - dev_dbg(dev, "state transition from _REMOVED to _PROBED\n"); - } else if (test_and_clear_bit(ICE_PEER_OBJ_STATE_OPENING, - peer_obj->state)) { - set_bit(ICE_PEER_OBJ_STATE_PROBED, peer_obj->state); - dev_dbg(dev, "state transition from _OPENING to _PROBED\n"); - } - break; - case ICE_PEER_OBJ_STATE_OPENING: - if (test_and_clear_bit(ICE_PEER_OBJ_STATE_PROBED, - peer_obj->state)) { - set_bit(ICE_PEER_OBJ_STATE_OPENING, peer_obj->state); - dev_dbg(dev, "state transition from _PROBED to _OPENING\n"); - } else if (test_and_clear_bit(ICE_PEER_OBJ_STATE_CLOSED, - peer_obj->state)) { - set_bit(ICE_PEER_OBJ_STATE_OPENING, peer_obj->state); - dev_dbg(dev, "state transition from _CLOSED to _OPENING\n"); - } - break; - case ICE_PEER_OBJ_STATE_OPENED: - if (test_and_clear_bit(ICE_PEER_OBJ_STATE_OPENING, - peer_obj->state)) { - set_bit(ICE_PEER_OBJ_STATE_OPENED, peer_obj->state); - dev_dbg(dev, "state transition from _OPENING to _OPENED\n"); - } - break; - case ICE_PEER_OBJ_STATE_PREP_RST: - if (test_and_clear_bit(ICE_PEER_OBJ_STATE_OPENED, - peer_obj->state)) { - set_bit(ICE_PEER_OBJ_STATE_PREP_RST, peer_obj->state); - dev_dbg(dev, "state transition from _OPENED to _PREP_RST\n"); - } - break; - case ICE_PEER_OBJ_STATE_PREPPED: - if (test_and_clear_bit(ICE_PEER_OBJ_STATE_PREP_RST, - peer_obj->state)) { - set_bit(ICE_PEER_OBJ_STATE_PREPPED, peer_obj->state); - dev_dbg(dev, "state transition _PREP_RST to _PREPPED\n"); - } - break; - case ICE_PEER_OBJ_STATE_CLOSING: - if (test_and_clear_bit(ICE_PEER_OBJ_STATE_OPENED, - peer_obj->state)) { - set_bit(ICE_PEER_OBJ_STATE_CLOSING, peer_obj->state); - dev_dbg(dev, "state transition from _OPENED to _CLOSING\n"); - } - if (test_and_clear_bit(ICE_PEER_OBJ_STATE_PREPPED, - peer_obj->state)) { - set_bit(ICE_PEER_OBJ_STATE_CLOSING, peer_obj->state); - dev_dbg(dev, "state transition _PREPPED to _CLOSING\n"); - } - /* NOTE - up to peer to handle this situation correctly */ - if (test_and_clear_bit(ICE_PEER_OBJ_STATE_PREP_RST, - peer_obj->state)) { - set_bit(ICE_PEER_OBJ_STATE_CLOSING, peer_obj->state); - dev_warn(dev, - "WARN: Peer state _PREP_RST to _CLOSING\n"); - } - break; - case ICE_PEER_OBJ_STATE_CLOSED: - if (test_and_clear_bit(ICE_PEER_OBJ_STATE_CLOSING, - peer_obj->state)) { - set_bit(ICE_PEER_OBJ_STATE_CLOSED, peer_obj->state); - dev_dbg(dev, "state transition from _CLOSING to _CLOSED\n"); - } - break; - case ICE_PEER_OBJ_STATE_REMOVED: - if (test_and_clear_bit(ICE_PEER_OBJ_STATE_OPENED, - peer_obj->state) || - test_and_clear_bit(ICE_PEER_OBJ_STATE_CLOSED, - peer_obj->state)) { - set_bit(ICE_PEER_OBJ_STATE_REMOVED, peer_obj->state); - dev_dbg(dev, "state from _OPENED/_CLOSED to _REMOVED\n"); - /* Clear registration for events when peer removed */ - bitmap_zero(peer_obj->events, ICE_PEER_OBJ_STATE_NBITS); - } - if (test_and_clear_bit(ICE_PEER_OBJ_STATE_OPENING, - peer_obj->state)) { - set_bit(ICE_PEER_OBJ_STATE_REMOVED, peer_obj->state); - dev_warn(dev, "Peer failed to open, set to _REMOVED"); - } - break; - default: - break; - } - - if (!locked) - mutex_unlock(&peer_obj->peer_obj_state_mutex); - - put_device(dev); -} - -/** - * ice_peer_close - close a peer object - * @peer_obj_int: peer object to close - * @data: pointer to opaque data - * - * This function will also set the state bit for the peer to CLOSED. This - * function is meant to be called from a ice_for_each_peer(). - */ -int ice_peer_close(struct ice_peer_obj_int *peer_obj_int, void *data) -{ - enum ice_close_reason reason = *(enum ice_close_reason *)(data); - struct ice_peer_obj *peer_obj; + struct auxiliary_device *adev; struct ice_pf *pf; - int i; - peer_obj = ice_get_peer_obj(peer_obj_int); - /* return 0 so ice_for_each_peer will continue closing other peers */ - if (!ice_validate_peer_obj(peer_obj)) - return 0; - pf = pci_get_drvdata(peer_obj->pdev); + if (!cdev_info) + return NULL; + pf = pci_get_drvdata(cdev_info->pdev); - if (test_bit(ICE_DOWN, pf->state) || - test_bit(ICE_SUSPENDED, pf->state) || - test_bit(ICE_NEEDS_RESTART, pf->state)) - return 0; + lockdep_assert_held(&pf->adev_mutex); - mutex_lock(&peer_obj_int->peer_obj_state_mutex); + adev = cdev_info->adev; + if (!adev || !adev->dev.driver) + return NULL; - /* no peer driver, already closed, closing or opening nothing to do */ - if (test_bit(ICE_PEER_OBJ_STATE_CLOSED, peer_obj_int->state) || - test_bit(ICE_PEER_OBJ_STATE_CLOSING, peer_obj_int->state) || - test_bit(ICE_PEER_OBJ_STATE_OPENING, peer_obj_int->state) || - test_bit(ICE_PEER_OBJ_STATE_PROBED, peer_obj_int->state) || - test_bit(ICE_PEER_OBJ_STATE_REMOVED, peer_obj_int->state)) - goto peer_close_out; - - /* Set the peer state to CLOSING */ - ice_peer_state_change(peer_obj_int, ICE_PEER_OBJ_STATE_CLOSING, true); - - for (i = 0; i < ICE_EVENT_NBITS; i++) - bitmap_zero(peer_obj_int->current_events[i].type, - ICE_EVENT_NBITS); - - if (peer_obj->peer_ops && peer_obj->peer_ops->close) - peer_obj->peer_ops->close(peer_obj, reason); - - /* Set the peer state to CLOSED */ - ice_peer_state_change(peer_obj_int, ICE_PEER_OBJ_STATE_CLOSED, true); - -peer_close_out: - mutex_unlock(&peer_obj_int->peer_obj_state_mutex); - - return 0; + return container_of(adev->dev.driver, struct iidc_auxiliary_drv, + adrv.driver); } /** - * ice_close_peer_for_reset - queue work to close peer for reset - * @peer_obj_int: pointer peer object internal struct - * @data: pointer to opaque data used for reset type - */ -int ice_close_peer_for_reset(struct ice_peer_obj_int *peer_obj_int, void *data) -{ - struct ice_peer_obj *peer_obj; - enum ice_reset_req reset; - - peer_obj = ice_get_peer_obj(peer_obj_int); - if (!ice_validate_peer_obj(peer_obj) || - (!test_bit(ICE_PEER_OBJ_STATE_OPENED, peer_obj_int->state) && - !test_bit(ICE_PEER_OBJ_STATE_PREPPED, peer_obj_int->state))) - return 0; - - reset = *(enum ice_reset_req *)data; - - switch (reset) { - case ICE_RESET_EMPR: - peer_obj_int->rst_type = ICE_REASON_EMPR_REQ; - break; - case ICE_RESET_GLOBR: - peer_obj_int->rst_type = ICE_REASON_GLOBR_REQ; - break; - case ICE_RESET_CORER: - peer_obj_int->rst_type = ICE_REASON_CORER_REQ; - break; - case ICE_RESET_PFR: - peer_obj_int->rst_type = ICE_REASON_PFR_REQ; - break; - default: - /* reset type is invalid */ - return 1; - } - queue_work(peer_obj_int->ice_peer_wq, &peer_obj_int->peer_close_task); - return 0; -} - -/** - * ice_check_peer_drv_for_events - check peer_drv for events to report - * @peer_obj: peer object to report to - */ -static void ice_check_peer_drv_for_events(struct ice_peer_obj *peer_obj) -{ - const struct ice_peer_ops *p_ops = peer_obj->peer_ops; - struct ice_peer_obj_int *peer_obj_int; - struct ice_peer_drv_int *peer_drv_int; - int i; - - peer_obj_int = peer_to_ice_obj_int(peer_obj); - if (!peer_obj_int) - return; - peer_drv_int = peer_obj_int->peer_drv_int; - - for_each_set_bit(i, peer_obj_int->events, ICE_EVENT_NBITS) { - struct ice_event *curr = &peer_drv_int->current_events[i]; - - if (!bitmap_empty(curr->type, ICE_EVENT_NBITS) && - p_ops->event_handler) - p_ops->event_handler(peer_obj, curr); - } -} - -/** - * ice_check_peer_for_events - check peer_objs for events new peer reg'd for - * @src_peer_int: peer to check for events - * @data: ptr to opaque data, to be used for the peer struct that opened - * - * This function is to be called when a peer object is opened. - * - * Since a new peer opening would have missed any events that would - * have happened before its opening, we need to walk the peers and see - * if any of them have events that the new peer cares about - * - * This function is meant to be called by a ice_for_each_peer. - */ -static int -ice_check_peer_for_events(struct ice_peer_obj_int *src_peer_int, void *data) -{ - struct ice_peer_obj *new_peer = (struct ice_peer_obj *)data; - const struct ice_peer_ops *p_ops = new_peer->peer_ops; - struct ice_peer_obj_int *new_peer_int; - struct ice_peer_obj *src_peer; - unsigned long i; - - src_peer = ice_get_peer_obj(src_peer_int); - if (!ice_validate_peer_obj(new_peer) || - !ice_validate_peer_obj(src_peer)) - return 0; - - new_peer_int = peer_to_ice_obj_int(new_peer); - - for_each_set_bit(i, new_peer_int->events, ICE_EVENT_NBITS) { - struct ice_event *curr = &src_peer_int->current_events[i]; - - if (!bitmap_empty(curr->type, ICE_EVENT_NBITS) && - new_peer->peer_obj_id != src_peer->peer_obj_id && - p_ops->event_handler) - p_ops->event_handler(new_peer, curr); - } - - return 0; -} - -/** - * ice_for_each_peer - iterate across and call function for each peer obj + * ice_for_each_aux - iterate across and call function for each aux driver * @pf: pointer to private board struct * @data: data to pass to function on each call * @fn: pointer to function to call for each peer */ int -ice_for_each_peer(struct ice_pf *pf, void *data, - int (*fn)(struct ice_peer_obj_int *, void *)) +ice_for_each_aux(struct ice_pf *pf, void *data, + int (*fn)(struct iidc_core_dev_info *, void *)) { unsigned int i; - if (!pf->peers) + if (!pf->cdev_infos) return 0; - for (i = 0; i < ARRAY_SIZE(ice_mfd_cells); i++) { - struct ice_peer_obj_int *peer_obj_int; - - peer_obj_int = pf->peers[i]; - if (peer_obj_int) { - int ret = fn(peer_obj_int, data); + for (i = 0; i < ARRAY_SIZE(ice_cdev_ids); i++) { + struct iidc_core_dev_info *cdev_info; + cdev_info = pf->cdev_infos[i]; + if (cdev_info) { + int ret = fn(cdev_info, data); if (ret) return ret; } @@ -372,219 +72,121 @@ ice_for_each_peer(struct ice_pf *pf, void *data, } /** - * ice_finish_init_peer_obj - complete peer object initialization - * @peer_obj_int: ptr to peer object internal struct - * @data: ptr to opaque data - * - * This function completes remaining initialization of peer objects + * ice_send_event_to_aux - send event to a specific aux driver + * @cdev_info: pointer to iidc_core_dev_info struct for this aux + * @data: opaque pointer used to pass event struct */ -int -ice_finish_init_peer_obj(struct ice_peer_obj_int *peer_obj_int, - void __always_unused *data) +static int +ice_send_event_to_aux(struct iidc_core_dev_info *cdev_info, void *data) { - struct ice_peer_obj *peer_obj; - struct ice_peer_drv *peer_drv; - struct device *dev; - struct ice_pf *pf; - int ret = 0; - - peer_obj = ice_get_peer_obj(peer_obj_int); - /* peer_obj will not always be populated at the time of this check */ - if (!ice_validate_peer_obj(peer_obj)) - return ret; - - peer_drv = peer_obj->peer_drv; - pf = pci_get_drvdata(peer_obj->pdev); - dev = ice_pf_to_dev(pf); - /* There will be several assessments of the peer_obj's state in this - * chunk of logic. We need to hold the peer_obj_int's state mutex - * for the entire part so that the flow progresses without another - * context changing things mid-flow - */ - mutex_lock(&peer_obj_int->peer_obj_state_mutex); - - if (!peer_obj->peer_ops) { - dev_err(dev, "peer_ops not defined in peer obj\n"); - goto init_unlock; - } - - if (!peer_obj->peer_ops->open) { - dev_err(dev, "peer_ops:open not defined in peer obj\n"); - goto init_unlock; - } - - if (!peer_obj->peer_ops->close) { - dev_err(dev, "peer_ops:close not defined in peer obj\n"); - goto init_unlock; - } - - /* Peer driver expected to set driver_id during registration */ - if (!peer_drv->driver_id) { - dev_err(dev, "Peer driver did not set driver_id\n"); - goto init_unlock; - } - - if ((test_bit(ICE_PEER_OBJ_STATE_CLOSED, peer_obj_int->state) || - test_bit(ICE_PEER_OBJ_STATE_PROBED, peer_obj_int->state)) && - ice_pf_state_is_nominal(pf)) { - /* If the RTNL is locked, we defer opening the peer - * until the next time this function is called by the - * service task. - */ - if (rtnl_is_locked()) - goto init_unlock; - ice_peer_state_change(peer_obj_int, ICE_PEER_OBJ_STATE_OPENING, - true); - ret = peer_obj->peer_ops->open(peer_obj); - if (ret == -EAGAIN) { - dev_err(dev, "Peer %d failed to open\n", - peer_obj->peer_obj_id); - ice_peer_state_change(peer_obj_int, - ICE_PEER_OBJ_STATE_PROBED, true); - goto init_unlock; - } else if (ret) { - ice_peer_state_change(peer_obj_int, - ICE_PEER_OBJ_STATE_REMOVED, true); - peer_obj->peer_ops = NULL; - goto init_unlock; - } - - ice_peer_state_change(peer_obj_int, ICE_PEER_OBJ_STATE_OPENED, - true); - ret = ice_for_each_peer(pf, peer_obj, - ice_check_peer_for_events); - ice_check_peer_drv_for_events(peer_obj); - } - - if (test_bit(ICE_PEER_OBJ_STATE_PREPPED, peer_obj_int->state)) { - enum ice_close_reason reason = ICE_REASON_CORER_REQ; - int i; - - ice_peer_state_change(peer_obj_int, ICE_PEER_OBJ_STATE_CLOSING, - true); - for (i = 0; i < ICE_EVENT_NBITS; i++) - bitmap_zero(peer_obj_int->current_events[i].type, - ICE_EVENT_NBITS); - - peer_obj->peer_ops->close(peer_obj, reason); - - ice_peer_state_change(peer_obj_int, ICE_PEER_OBJ_STATE_CLOSED, - true); - } - -init_unlock: - mutex_unlock(&peer_obj_int->peer_obj_state_mutex); - - return ret; -} - -/** - * ice_unreg_peer_obj - unregister specified peer object - * @peer_obj_int: ptr to peer object internal - * @data: ptr to opaque data - * - * This function invokes object unregistration, removes ID associated with - * the specified object. - */ -int ice_unreg_peer_obj(struct ice_peer_obj_int *peer_obj_int, void __always_unused *data) -{ - struct ice_peer_drv_int *peer_drv_int; - struct ice_peer_obj *peer_obj; - struct pci_dev *pdev; - struct device *dev; + struct iidc_event *event = (struct iidc_event *)data; + struct iidc_auxiliary_drv *iadrv; struct ice_pf *pf; - if (!peer_obj_int) - return 0; + if (WARN_ON_ONCE(!in_task())) + return -EINVAL; - peer_obj = ice_get_peer_obj(peer_obj_int); - pdev = peer_obj->pdev; - if (!pdev) - return 0; + if (!cdev_info) + return -EINVAL; + pf = pci_get_drvdata(cdev_info->pdev); - pf = pci_get_drvdata(pdev); if (!pf) + return -EINVAL; + mutex_lock(&pf->adev_mutex); + + if (!cdev_info->adev || !event) { + mutex_unlock(&pf->adev_mutex); return 0; - dev = ice_pf_to_dev(pf); - - mfd_remove_devices(&pdev->dev); - - peer_drv_int = peer_obj_int->peer_drv_int; - - if (peer_obj_int->ice_peer_wq) { - if (peer_obj_int->peer_prep_task.func) - cancel_work_sync(&peer_obj_int->peer_prep_task); - - if (peer_obj_int->peer_close_task.func) - cancel_work_sync(&peer_obj_int->peer_close_task); - destroy_workqueue(peer_obj_int->ice_peer_wq); } - devm_kfree(dev, peer_drv_int); - - devm_kfree(dev, peer_obj_int); + device_lock(&cdev_info->adev->dev); + iadrv = ice_get_auxiliary_drv(cdev_info); + if (iadrv && iadrv->event_handler) + iadrv->event_handler(cdev_info, event); + device_unlock(&cdev_info->adev->dev); + mutex_unlock(&pf->adev_mutex); return 0; } /** - * ice_unroll_peer - destroy peers and peer_wq in case of error - * @peer_obj_int: ptr to peer object internal struct + * ice_send_event_to_aux_no_lock - send event to aux dev without taking dev_lock + * @cdev: pointer to iidc_core_dev_info struct + * @data: opaque poiner used to pass event struct + */ +void ice_send_event_to_aux_no_lock(struct iidc_core_dev_info *cdev, void *data) +{ + struct iidc_event *event = (struct iidc_event *)data; + struct iidc_auxiliary_drv *iadrv; + + iadrv = ice_get_auxiliary_drv(cdev); + if (iadrv && iadrv->event_handler) + iadrv->event_handler(cdev, event); +} + +/** + * ice_send_event_to_auxs - send event to all auxiliary drivers + * @pf: pointer to PF struct + * @event: pointer to iidc_event to propagate + * + * event struct to be populated by caller + */ +void ice_send_event_to_auxs(struct ice_pf *pf, struct iidc_event *event) +{ + if (!event || !pf) + return; + + if (bitmap_weight(event->type, IIDC_EVENT_NBITS) != 1) { + dev_warn(ice_pf_to_dev(pf), "Event with not exactly one type bit set\n"); + return; + } + + ice_for_each_aux(pf, event, ice_send_event_to_aux); +} + +/** + * ice_unroll_cdev_info - destroy cdev_info resources + * @cdev_info: ptr to cdev_info struct * @data: ptr to opaque data * - * This function releases resources in the event of a failure in creating - * peer objects or their individual work_queues. Meant to be called from - * a ice_for_each_peer invocation + * This function releases resources for cdev_info objects. + * Meant to be called from a ice_for_each_aux invocation */ -int ice_unroll_peer(struct ice_peer_obj_int *peer_obj_int, void __always_unused *data) +int ice_unroll_cdev_info(struct iidc_core_dev_info *cdev_info, + void __always_unused *data) { - struct ice_peer_obj *peer_obj; - struct ice_pf *pf; - - peer_obj = ice_get_peer_obj(peer_obj_int); - if (!peer_obj || !peer_obj->pdev) + if (!cdev_info) return 0; - pf = pci_get_drvdata(peer_obj->pdev); - if (!pf) - return 0; - - if (peer_obj_int->ice_peer_wq) - destroy_workqueue(peer_obj_int->ice_peer_wq); - - if (peer_obj_int->peer_drv_int) - devm_kfree(ice_pf_to_dev(pf), peer_obj_int->peer_drv_int); - - devm_kfree(ice_pf_to_dev(pf), peer_obj_int); + kfree(cdev_info); return 0; } #ifdef CONFIG_PM /** - * ice_peer_refresh_msix - load new values into ice_peer_obj structs + * ice_cdev_info_refresh_msix - load new values into iidc_core_dev_info structs * @pf: pointer to private board struct */ -void ice_peer_refresh_msix(struct ice_pf *pf) +void ice_cdev_info_refresh_msix(struct ice_pf *pf) { - struct ice_peer_obj *peer; + struct iidc_core_dev_info *cdev_info; unsigned int i; - if (!pf->peers) + if (!pf->cdev_infos) return; - for (i = 0; i < ARRAY_SIZE(ice_mfd_cells); i++) { - if (!pf->peers[i]) + for (i = 0; i < ARRAY_SIZE(ice_cdev_ids); i++) { + if (!pf->cdev_infos[i]) continue; - peer = ice_get_peer_obj(pf->peers[i]); - if (!peer) - continue; + cdev_info = pf->cdev_infos[i]; - switch (peer->peer_obj_id) { - case ICE_PEER_RDMA_ID: - peer->msix_count = pf->num_rdma_msix; - peer->msix_entries = &pf->msix_entries[pf->rdma_base_vector]; + switch (cdev_info->cdev_info_id) { + case IIDC_RDMA_ID: + cdev_info->msix_count = pf->num_rdma_msix; + cdev_info->msix_entries = + &pf->msix_entries[pf->rdma_base_vector]; break; default: break; @@ -594,526 +196,260 @@ void ice_peer_refresh_msix(struct ice_pf *pf) #endif /* CONFIG_PM */ /** - * ice_find_vsi - Find the VSI from VSI ID - * @pf: The PF pointer to search in - * @vsi_num: The VSI ID to search for - */ -static struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num) -{ - int i; - - ice_for_each_vsi(pf, i) - if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num) - return pf->vsi[i]; - return NULL; -} - -/** - * ice_peer_alloc_rdma_qsets - Allocate Leaf Nodes for RDMA Qset - * @peer_obj: peer that is requesting the Leaf Nodes - * @res: Resources to be allocated - * @partial_acceptable: If partial allocation is acceptable to the peer + * ice_alloc_rdma_qsets - Allocate Leaf Nodes for RDMA Qset + * @cdev_info: aux driver that is requesting the Leaf Nodes + * @qset: Resource to be allocated * * This function allocates Leaf Nodes for given RDMA Qset resources * for the peer object. */ static int -ice_peer_alloc_rdma_qsets(struct ice_peer_obj *peer_obj, struct ice_res *res, - int __always_unused partial_acceptable) +ice_alloc_rdma_qsets(struct iidc_core_dev_info *cdev_info, + struct iidc_rdma_qset_params *qset) { u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS]; - enum ice_status status; +#ifdef HAVE_NETDEV_UPPER_INFO + struct ice_lag *lag; +#endif /* HAVE_NETDEV_UPPER_INFO */ struct ice_vsi *vsi; struct device *dev; struct ice_pf *pf; - int i, ret = 0; - u32 *qset_teid; - u16 *qs_handle; + u32 qset_teid; + u16 qs_handle; + int i, status; - if (!ice_validate_peer_obj(peer_obj) || !res) + if (!cdev_info || !qset) return -EINVAL; - pf = pci_get_drvdata(peer_obj->pdev); + pf = pci_get_drvdata(cdev_info->pdev); dev = ice_pf_to_dev(pf); - if (!test_bit(ICE_FLAG_IWARP_ENA, pf->flags)) + if (!ice_chk_rdma_cap(pf)) return -EINVAL; - if (res->cnt_req > ICE_MAX_TXQ_PER_TXQG) - return -EINVAL; - - qset_teid = kcalloc(res->cnt_req, sizeof(*qset_teid), GFP_KERNEL); - if (!qset_teid) - return -ENOMEM; - - qs_handle = kcalloc(res->cnt_req, sizeof(*qs_handle), GFP_KERNEL); - if (!qs_handle) { - kfree(qset_teid); - return -ENOMEM; - } - ice_for_each_traffic_class(i) max_rdmaqs[i] = 0; - for (i = 0; i < res->cnt_req; i++) { - struct ice_rdma_qset_params *qset; + max_rdmaqs[qset->tc]++; + qs_handle = qset->qs_handle; - qset = &res->res[i].res.qsets; - if (qset->vsi_id != peer_obj->pf_vsi_num) { - dev_err(dev, "RDMA QSet invalid VSI requested\n"); - ret = -EINVAL; - goto out; - } - max_rdmaqs[qset->tc]++; - qs_handle[i] = qset->qs_handle; - } - - vsi = ice_find_vsi(pf, peer_obj->pf_vsi_num); + vsi = ice_find_vsi(pf, qset->vport_id); if (!vsi) { dev_err(dev, "RDMA QSet invalid VSI\n"); - ret = -EINVAL; - goto out; + return -EINVAL; } status = ice_cfg_vsi_rdma(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_rdmaqs); if (status) { dev_err(dev, "Failed VSI RDMA qset config\n"); - ret = -EINVAL; - goto out; + return status; } - for (i = 0; i < res->cnt_req; i++) { - struct ice_rdma_qset_params *qset; + status = ice_ena_vsi_rdma_qset(vsi->port_info, vsi->idx, qset->tc, + &qs_handle, 1, &qset_teid); + if (status) { + dev_err(dev, "Failed VSI RDMA qset enable\n"); + return status; + } + vsi->qset_handle[qset->tc] = qset->qs_handle; + qset->teid = qset_teid; - qset = &res->res[i].res.qsets; - status = ice_ena_vsi_rdma_qset(vsi->port_info, vsi->idx, - qset->tc, &qs_handle[i], 1, - &qset_teid[i]); - if (status) { - dev_err(dev, "Failed VSI RDMA qset enable\n"); - ret = -EINVAL; - goto out; +#ifdef HAVE_NETDEV_UPPER_INFO + lag = pf->lag; + if (lag && lag->bonded) { + mutex_lock(&pf->lag_mutex); + lag->rdma_qset[qset->tc] = *qset; + + if (cdev_info->rdma_active_port != pf->hw.port_info->lport && + cdev_info->rdma_active_port != ICE_LAG_INVALID_PORT) { + struct net_device *tmp_nd; + + rcu_read_lock(); + for_each_netdev_rcu(&init_net, tmp_nd) { + struct ice_netdev_priv *tmp_ndp; + struct ice_lag *tmp_lag; + struct ice_vsi *tmp_vsi; + struct ice_hw *tmp_hw; + + if (!netif_is_ice(tmp_nd)) + continue; + + tmp_ndp = netdev_priv(tmp_nd); + tmp_vsi = tmp_ndp->vsi; + tmp_lag = tmp_vsi->back->lag; + + if (!tmp_lag->bonded || + tmp_lag->bond_id != lag->bond_id) + continue; + + tmp_hw = &tmp_vsi->back->hw; + + if (cdev_info->rdma_active_port == + tmp_hw->port_info->lport) + status = ice_lag_move_node_sync(&pf->hw, + tmp_hw, + tmp_vsi, + qset); + } + rcu_read_unlock(); } - vsi->qset_handle[qset->tc] = qset->qs_handle; - qset->teid = qset_teid[i]; + mutex_unlock(&pf->lag_mutex); } -out: - kfree(qset_teid); - kfree(qs_handle); - return ret; +#endif /* HAVE_NETDEV_UPPER_INFO */ + return status; } /** - * ice_peer_free_rdma_qsets - Free leaf nodes for RDMA Qset - * @peer_obj: peer that requested qsets to be freed - * @res: Resource to be freed + * ice_free_rdma_qsets - Free leaf nodes for RDMA Qset + * @cdev_info: aux driver that requested qsets to be freed + * @qset: Resource to be freed */ static int -ice_peer_free_rdma_qsets(struct ice_peer_obj *peer_obj, struct ice_res *res) +ice_free_rdma_qsets(struct iidc_core_dev_info *cdev_info, + struct iidc_rdma_qset_params *qset) { - enum ice_status status; - int count, i, ret = 0; struct ice_vsi *vsi; struct device *dev; struct ice_pf *pf; u16 vsi_id; - u32 *teid; - u16 *q_id; + int status; + u32 teid; + u16 q_id; - if (!ice_validate_peer_obj(peer_obj) || !res) + if (!cdev_info || !qset) return -EINVAL; - pf = pci_get_drvdata(peer_obj->pdev); + pf = pci_get_drvdata(cdev_info->pdev); dev = ice_pf_to_dev(pf); - count = res->res_allocated; - if (count > ICE_MAX_TXQ_PER_TXQG) - return -EINVAL; - - teid = kcalloc(count, sizeof(*teid), GFP_KERNEL); - if (!teid) - return -ENOMEM; - - q_id = kcalloc(count, sizeof(*q_id), GFP_KERNEL); - if (!q_id) { - kfree(teid); - return -ENOMEM; - } - - vsi_id = res->res[0].res.qsets.vsi_id; + vsi_id = qset->vport_id; vsi = ice_find_vsi(pf, vsi_id); if (!vsi) { dev_err(dev, "RDMA Invalid VSI\n"); - ret = -EINVAL; - goto rdma_free_out; + return -EINVAL; } - for (i = 0; i < count; i++) { - struct ice_rdma_qset_params *qset; + if (qset->vport_id != vsi_id) { + dev_err(dev, "RDMA Invalid VSI ID\n"); + return -EINVAL; + } + q_id = qset->qs_handle; + teid = qset->teid; - qset = &res->res[i].res.qsets; - if (qset->vsi_id != vsi_id) { - dev_err(dev, "RDMA Invalid VSI ID\n"); - ret = -EINVAL; - goto rdma_free_out; + vsi->qset_handle[qset->tc] = 0; + +#ifdef HAVE_NETDEV_UPPER_INFO + if (pf->lag && pf->lag->bonded) { + mutex_lock(&pf->lag_mutex); + + if (cdev_info->rdma_active_port != pf->hw.port_info->lport && + cdev_info->rdma_active_port != ICE_LAG_INVALID_PORT) { + struct net_device *tmp_nd; + + rcu_read_lock(); + for_each_netdev_rcu(&init_net, tmp_nd) { + struct ice_netdev_priv *tmp_ndp; + struct ice_lag *tmp_lag; + struct ice_vsi *tmp_vsi; + struct ice_hw *tmp_hw; + + if (!netif_is_ice(tmp_nd)) + continue; + + tmp_ndp = netdev_priv(tmp_nd); + tmp_vsi = tmp_ndp->vsi; + tmp_lag = tmp_vsi->back->lag; + tmp_hw = &tmp_vsi->back->hw; + + if (!tmp_lag->bonded || + tmp_lag->bond_id != pf->lag->bond_id) + continue; + + if (cdev_info->rdma_active_port == + tmp_hw->port_info->lport) + ice_lag_move_node_sync(tmp_hw, &pf->hw, + pf->vsi[0], + qset); + } + rcu_read_unlock(); } - q_id[i] = qset->qs_handle; - teid[i] = qset->teid; - - vsi->qset_handle[qset->tc] = 0; + mutex_unlock(&pf->lag_mutex); } - status = ice_dis_vsi_rdma_qset(vsi->port_info, count, teid, q_id); +#endif /* HAVE_NETDEV_UPPER_INFO */ + status = ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id); if (status) - ret = -EINVAL; + return -EINVAL; -rdma_free_out: - kfree(teid); - kfree(q_id); - - return ret; +#ifdef HAVE_NETDEV_UPPER_INFO + memset(&pf->lag->rdma_qset[qset->tc], 0, sizeof(*qset)); +#endif /* HAVE_NETDEV_UPPER_INFO */ + return 0; } /** - * ice_peer_alloc_res - Allocate requested resources for peer objects - * @peer_obj: peer that is requesting resources - * @res: Resources to be allocated - * @partial_acceptable: If partial allocation is acceptable to the peer - * - * This function allocates requested resources for the peer object. + * ice_cdev_info_alloc_res - Allocate requested resources for aux driver + * @cdev_info: struct for aux driver that is requesting resources + * @qset: Resource to be allocated */ static int -ice_peer_alloc_res(struct ice_peer_obj *peer_obj, struct ice_res *res, - int partial_acceptable) +ice_cdev_info_alloc_res(struct iidc_core_dev_info *cdev_info, + struct iidc_rdma_qset_params *qset) { struct ice_pf *pf; - int ret; - if (!ice_validate_peer_obj(peer_obj) || !res) + if (!cdev_info || !qset) return -EINVAL; - pf = pci_get_drvdata(peer_obj->pdev); + pf = pci_get_drvdata(cdev_info->pdev); if (!ice_pf_state_is_nominal(pf)) return -EBUSY; - switch (res->res_type) { - case ICE_RDMA_QSETS_TXSCHED: - ret = ice_peer_alloc_rdma_qsets(peer_obj, res, - partial_acceptable); - break; - default: - ret = -EINVAL; - break; - } - - return ret; + return ice_alloc_rdma_qsets(cdev_info, qset); } /** - * ice_peer_free_res - Free given resources - * @peer_obj: peer that is requesting freeing of resources - * @res: Resources to be freed - * - * Free/Release resources allocated to given peer onjects. + * ice_cdev_info_free_res - Free resources associated with aux driver + * @cdev_info: struct for aux driver that is requesting freeing of resources + * @qset: Resource to be freed */ static int -ice_peer_free_res(struct ice_peer_obj *peer_obj, struct ice_res *res) +ice_cdev_info_free_res(struct iidc_core_dev_info *cdev_info, + struct iidc_rdma_qset_params *qset) { - int ret; - - if (!ice_validate_peer_obj(peer_obj) || !res) + if (!cdev_info || !qset) return -EINVAL; - switch (res->res_type) { - case ICE_RDMA_QSETS_TXSCHED: - ret = ice_peer_free_rdma_qsets(peer_obj, res); - break; - default: - ret = -EINVAL; - break; - } - - return ret; + return ice_free_rdma_qsets(cdev_info, qset); } /** - * ice_peer_reg_for_notif - register a peer to receive specific notifications - * @peer_obj: peer that is registering for event notifications - * @events: mask of event types peer is registering for - */ -static void -ice_peer_reg_for_notif(struct ice_peer_obj *peer_obj, struct ice_event *events) -{ - struct ice_peer_obj_int *peer_obj_int; - struct ice_pf *pf; - - if (!ice_validate_peer_obj(peer_obj) || !events) - return; - - peer_obj_int = peer_to_ice_obj_int(peer_obj); - pf = pci_get_drvdata(peer_obj->pdev); - - bitmap_or(peer_obj_int->events, peer_obj_int->events, events->type, - ICE_EVENT_NBITS); - - /* Check to see if any events happened previous to peer registering */ - ice_for_each_peer(pf, peer_obj, ice_check_peer_for_events); - ice_check_peer_drv_for_events(peer_obj); -} - -/** - * ice_peer_unreg_for_notif - unreg a peer from receiving certain notifications - * @peer_obj: peer that is unregistering from event notifications - * @events: mask of event types peer is unregistering for - */ -static void -ice_peer_unreg_for_notif(struct ice_peer_obj *peer_obj, - struct ice_event *events) -{ - struct ice_peer_obj_int *peer_obj_int; - - if (!ice_validate_peer_obj(peer_obj) || !events) - return; - - peer_obj_int = peer_to_ice_obj_int(peer_obj); - - bitmap_andnot(peer_obj_int->events, peer_obj_int->events, events->type, - ICE_EVENT_NBITS); -} - -/** - * ice_peer_check_for_reg - check to see if any peers are reg'd for event - * @peer_obj_int: ptr to peer object internal struct - * @data: ptr to opaque data, to be used for ice_event to report - * - * This function is to be called by ice_for_each_peer to handle an - * event reported by a peer or the ice driver. - */ -int ice_peer_check_for_reg(struct ice_peer_obj_int *peer_obj_int, void *data) -{ - struct ice_event *event = (struct ice_event *)data; - DECLARE_BITMAP(comp_events, ICE_EVENT_NBITS); - struct ice_peer_obj *peer_obj; - bool check = true; - - peer_obj = ice_get_peer_obj(peer_obj_int); - - if (!ice_validate_peer_obj(peer_obj) || !data) - /* If invalid obj, in this case return 0 instead of error - * because caller ignores this return value - */ - return 0; - - if (event->reporter) - check = event->reporter->peer_obj_id != peer_obj->peer_obj_id; - - if (bitmap_and(comp_events, event->type, peer_obj_int->events, - ICE_EVENT_NBITS) && - (test_bit(ICE_PEER_OBJ_STATE_OPENED, peer_obj_int->state) || - test_bit(ICE_PEER_OBJ_STATE_PREP_RST, peer_obj_int->state) || - test_bit(ICE_PEER_OBJ_STATE_PREPPED, peer_obj_int->state)) && - check && - peer_obj->peer_ops->event_handler) - peer_obj->peer_ops->event_handler(peer_obj, event); - - return 0; -} - -/** - * ice_peer_report_state_change - accept report of a peer state change - * @peer_obj: peer that is sending notification about state change - * @event: ice_event holding info on what the state change is - * - * We also need to parse the list of peers to see if anyone is registered - * for notifications about this state change event, and if so, notify them. - */ -static void -ice_peer_report_state_change(struct ice_peer_obj *peer_obj, - struct ice_event *event) -{ - struct ice_peer_obj_int *peer_obj_int; - struct ice_peer_drv_int *peer_drv_int; - unsigned int e_type; - int drv_event = 0; - struct ice_pf *pf; - - if (!ice_validate_peer_obj(peer_obj) || !event) - return; - - pf = pci_get_drvdata(peer_obj->pdev); - peer_obj_int = peer_to_ice_obj_int(peer_obj); - peer_drv_int = peer_obj_int->peer_drv_int; - - e_type = find_first_bit(event->type, ICE_EVENT_NBITS); - if (!e_type) - return; - - switch (e_type) { - /* Check for peer_drv events */ - case ICE_EVENT_MBX_CHANGE: - drv_event = 1; - if (event->info.mbx_rdy) - set_bit(ICE_PEER_DRV_STATE_MBX_RDY, - peer_drv_int->state); - else - clear_bit(ICE_PEER_DRV_STATE_MBX_RDY, - peer_drv_int->state); - break; - - /* Check for peer_obj events */ - case ICE_EVENT_API_CHANGE: - if (event->info.api_rdy) { - set_bit(ICE_PEER_OBJ_STATE_API_RDY, - peer_obj_int->state); - } else { - clear_bit(ICE_PEER_OBJ_STATE_API_RDY, - peer_obj_int->state); - } - break; - - default: - return; - } - - /* store the event and state to notify any new peers opening */ - if (drv_event) - memcpy(&peer_drv_int->current_events[e_type], event, - sizeof(*event)); - else - memcpy(&peer_obj_int->current_events[e_type], event, - sizeof(*event)); - - ice_for_each_peer(pf, event, ice_peer_check_for_reg); -} - -/** - * ice_peer_unregister - request to unregister peer - * @peer_obj: peer object - * - * This function triggers close/remove on peer_obj allowing peer - * to unregister. - */ -static int ice_peer_unregister(struct ice_peer_obj *peer_obj) -{ - enum ice_close_reason reason = ICE_REASON_PEER_DRV_UNREG; - struct ice_peer_obj_int *peer_obj_int; - struct ice_pf *pf; - int ret; - - if (!ice_validate_peer_obj(peer_obj)) - return -EINVAL; - - pf = pci_get_drvdata(peer_obj->pdev); - if (ice_is_reset_in_progress(pf->state)) - return -EBUSY; - - peer_obj_int = peer_to_ice_obj_int(peer_obj); - - ret = ice_peer_close(peer_obj_int, &reason); - if (ret) - return ret; - - switch (peer_obj->peer_obj_id) { - case ICE_PEER_RDMA_ID: - pf->rdma_peer = NULL; - break; - default: - break; - } - - peer_obj->peer_ops = NULL; - - ice_peer_state_change(peer_obj_int, ICE_PEER_OBJ_STATE_REMOVED, false); - return 0; -} - -/** - * ice_peer_register - Called by peer to open communication with LAN - * @peer_obj: ptr to peer object - * - * registering peer is expected to populate the ice_peerdrv->name field - * before calling this function. - */ -static int ice_peer_register(struct ice_peer_obj *peer_obj) -{ - struct ice_peer_drv_int *peer_drv_int; - struct ice_peer_obj_int *peer_obj_int; - struct ice_peer_drv *peer_drv; - - - if (!peer_obj) { - pr_err("Failed to reg peer_obj: peer_obj ptr NULL\n"); - return -EINVAL; - } - - if (!peer_obj->pdev) { - pr_err("Failed to reg peer_obj: peer_obj pdev NULL\n"); - return -EINVAL; - } - - if (!peer_obj->peer_ops || !peer_obj->ops) { - pr_err("Failed to reg peer_obj: peer_obj peer_ops/ops NULL\n"); - return -EINVAL; - } - - peer_drv = peer_obj->peer_drv; - if (!peer_drv) { - pr_err("Failed to reg peer_obj: peer drv NULL\n"); - return -EINVAL; - } - - - if (peer_drv->ver.major != ICE_PEER_MAJOR_VER || - peer_drv->ver.minor != ICE_PEER_MINOR_VER) { - pr_err("failed to register due to version mismatch:\n"); - pr_err("expected major ver %d, caller specified major ver %d\n", - ICE_PEER_MAJOR_VER, peer_drv->ver.major); - pr_err("expected minor ver %d, caller specified minor ver %d\n", - ICE_PEER_MINOR_VER, peer_drv->ver.minor); - return -EINVAL; - } - - peer_obj_int = peer_to_ice_obj_int(peer_obj); - peer_drv_int = peer_obj_int->peer_drv_int; - if (!peer_drv_int) { - pr_err("Failed to match peer_drv_int to peer_obj\n"); - return -EINVAL; - } - - peer_drv_int->peer_drv = peer_drv; - - ice_peer_state_change(peer_obj_int, ICE_PEER_OBJ_STATE_PROBED, false); - - return 0; -} - - -/** - * ice_peer_request_reset - accept request from peer to perform a reset - * @peer_obj: peer object that is requesting a reset + * ice_cdev_info_request_reset - accept request from peer to perform a reset + * @cdev_info: struct for aux driver that is requesting a reset * @reset_type: type of reset the peer is requesting */ static int -ice_peer_request_reset(struct ice_peer_obj *peer_obj, enum ice_peer_reset_type reset_type) +ice_cdev_info_request_reset(struct iidc_core_dev_info *cdev_info, + enum iidc_reset_type reset_type) { enum ice_reset_req reset; struct ice_pf *pf; - if (!ice_validate_peer_obj(peer_obj)) + if (!cdev_info) return -EINVAL; - pf = pci_get_drvdata(peer_obj->pdev); + pf = pci_get_drvdata(cdev_info->pdev); switch (reset_type) { - case ICE_PEER_PFR: + case IIDC_PFR: reset = ICE_RESET_PFR; break; - case ICE_PEER_CORER: + case IIDC_CORER: reset = ICE_RESET_CORER; break; - case ICE_PEER_GLOBR: + case IIDC_GLOBR: reset = ICE_RESET_GLOBR; break; default: @@ -1125,53 +461,25 @@ ice_peer_request_reset(struct ice_peer_obj *peer_obj, enum ice_peer_reset_type r } /** - * ice_peer_is_vsi_ready - query if VSI in nominal state - * @peer_obj: pointer to ice_peer_obj struct - */ -static int ice_peer_is_vsi_ready(struct ice_peer_obj *peer_obj) -{ - struct ice_netdev_priv *np; - struct ice_vsi *vsi; - - /* If the peer_obj or associated values are not valid, then return - * 0 as there is no ready port associated with the values passed in - * as parameters. - */ - - if (!peer_obj || !peer_obj->pdev || !pci_get_drvdata(peer_obj->pdev) || - !peer_to_ice_obj_int(peer_obj)) - return 0; - - if (!peer_obj->netdev) - return 0; - - np = netdev_priv(peer_obj->netdev); - vsi = np->vsi; - - return ice_is_vsi_state_nominal(vsi); -} - -/** - * ice_peer_update_vsi_filter - update main VSI filters for RDMA - * @peer_obj: pointer to RDMA peer object - * @filter: selection of filters to enable or disable + * ice_cdev_info_update_vsi_filter - update main VSI filters for RDMA + * @cdev_info: pointer to struct for aux device updating filters + * @vsi_id: vsi HW idx to update filter on * @enable: bool whether to enable or disable filters */ static int -ice_peer_update_vsi_filter(struct ice_peer_obj *peer_obj, - enum ice_rdma_filter __maybe_unused filter, - bool enable) +ice_cdev_info_update_vsi_filter(struct iidc_core_dev_info *cdev_info, + u16 vsi_id, bool enable) { struct ice_vsi *vsi; struct ice_pf *pf; int ret; - if (!ice_validate_peer_obj(peer_obj)) + if (!cdev_info) return -EINVAL; - pf = pci_get_drvdata(peer_obj->pdev); + pf = pci_get_drvdata(cdev_info->pdev); - vsi = ice_get_main_vsi(pf); + vsi = ice_find_vsi(pf, vsi_id); if (!vsi) return -EINVAL; @@ -1191,58 +499,67 @@ ice_peer_update_vsi_filter(struct ice_peer_obj *peer_obj, } /** - * ice_peer_vc_send - send a virt channel message from a peer - * @peer_obj: pointer to a peer object - * @vf_id: the absolute VF ID of recipient of message + * ice_cdev_info_vc_send - send a virt channel message from an aux driver + * @cdev_info: pointer to cdev_info struct for aux driver + * @vf_id: the VF ID of recipient of message * @msg: pointer to message contents * @len: len of message + * + * Note that the VF ID is absolute for RDMA operations, but a relative ID for + * IPSEC operations. */ static int -ice_peer_vc_send(struct ice_peer_obj *peer_obj, u32 vf_id, u8 *msg, u16 len) +ice_cdev_info_vc_send(struct iidc_core_dev_info *cdev_info, u32 vf_id, + u8 *msg, u16 len) { - enum ice_status status; struct ice_pf *pf; + u32 rel_vf_id; + int status; - if (!ice_validate_peer_obj(peer_obj)) + if (!cdev_info) return -EINVAL; if (!msg || !len) return -ENOMEM; - pf = pci_get_drvdata(peer_obj->pdev); + pf = pci_get_drvdata(cdev_info->pdev); if (len > ICE_AQ_MAX_BUF_LEN) return -EINVAL; if (ice_is_reset_in_progress(pf->state)) return -EBUSY; - switch (peer_obj->peer_drv->driver_id) { - case ICE_PEER_RDMA_DRIVER: - if (vf_id >= pf->num_alloc_vfs) + switch (cdev_info->cdev_info_id) { + case IIDC_RDMA_ID: + /* The ID is absolute so it must be converted first */ + rel_vf_id = ice_rel_vf_id(&pf->hw, vf_id); + + if (!ice_is_valid_vf_id(pf, rel_vf_id)) return -ENODEV; /* VIRTCHNL_OP_RDMA is being used for RoCEv2 msg also */ - status = ice_aq_send_msg_to_vf(&pf->hw, vf_id, VIRTCHNL_OP_RDMA, - 0, msg, len, NULL); + status = ice_aq_send_msg_to_vf(&pf->hw, rel_vf_id, + VIRTCHNL_OP_RDMA, 0, msg, len, + NULL); break; default: - dev_err(ice_pf_to_dev(pf), - "Peer driver (%u) not supported!", (u32)peer_obj->peer_drv->driver_id); + dev_err(ice_pf_to_dev(pf), "Can't send message to VF, Aux not supported, %u\n", + (u32)cdev_info->cdev_info_id); return -ENODEV; } if (status) - dev_err(ice_pf_to_dev(pf), "Unable to send msg to VF, error %s\n", - ice_stat_str(status)); - return ice_status_to_errno(status); + dev_err(ice_pf_to_dev(pf), "Unable to send msg to VF, error %d\n", + status); + return status; } /** - * ice_reserve_peer_qvector - Reserve vector resources for peer drivers + * ice_reserve_cdev_info_qvector - Reserve vector resources for aux drivers * @pf: board private structure to initialize */ -static int ice_reserve_peer_qvector(struct ice_pf *pf) +static int ice_reserve_cdev_info_qvector(struct ice_pf *pf) { - if (test_bit(ICE_FLAG_IWARP_ENA, pf->flags)) { + if (ice_chk_rdma_cap(pf)) { int index; index = ice_get_res(pf, pf->irq_tracker, pf->num_rdma_msix, ICE_RES_RDMA_VEC_ID); @@ -1255,234 +572,393 @@ static int ice_reserve_peer_qvector(struct ice_pf *pf) } /** - * ice_peer_close_task - call peer's close asynchronously - * @work: pointer to work_struct contained by the peer_obj_int struct + * ice_send_vf_reset_to_aux - send a VF reset notification to the aux driver + * @cdev_info: pointer to the cdev_info object + * @vf_id: VF ID to query * - * This method (asynchronous) of calling a peer's close function is - * meant to be used in the reset path. + * Tell the RDMA auxiliary driver that a VF is resetting */ -static void ice_peer_close_task(struct work_struct *work) +void ice_send_vf_reset_to_aux(struct iidc_core_dev_info *cdev_info, u16 vf_id) { - struct ice_peer_obj_int *peer_obj_int; - struct ice_peer_obj *peer_obj; + struct iidc_event *event; - peer_obj_int = container_of(work, struct ice_peer_obj_int, peer_close_task); - - peer_obj = ice_get_peer_obj(peer_obj_int); - if (!peer_obj || !peer_obj->peer_ops) + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (!event) return; - - /* If this peer_obj is going to close, we do not want any state changes - * to happen until after we successfully finish or abort the close. - * Grab the peer_obj_state_mutex to protect this flow - */ - mutex_lock(&peer_obj_int->peer_obj_state_mutex); - - /* Only allow a close to go to the peer if they are in a state - * to accept it. The last state of PREP_RST is a special case - * that will not normally happen, but it is up to the peer - * to handle it correctly. - */ - if (test_bit(ICE_PEER_OBJ_STATE_OPENED, peer_obj_int->state) || - test_bit(ICE_PEER_OBJ_STATE_PREPPED, peer_obj_int->state) || - test_bit(ICE_PEER_OBJ_STATE_PREP_RST, peer_obj_int->state)) { - - ice_peer_state_change(peer_obj_int, ICE_PEER_OBJ_STATE_CLOSING, true); - - if (peer_obj->peer_ops->close) - peer_obj->peer_ops->close(peer_obj, peer_obj_int->rst_type); - - ice_peer_state_change(peer_obj_int, ICE_PEER_OBJ_STATE_CLOSED, true); - } - - mutex_unlock(&peer_obj_int->peer_obj_state_mutex); + set_bit(IIDC_EVENT_VF_RESET, event->type); + event->info.vf_id = (u32)vf_id; + ice_send_event_to_aux(cdev_info, event); + kfree(event); } /** - * ice_peer_update_vsi - update the pf_vsi info in peer_obj struct - * @peer_obj_int: pointer to peer_obj internal struct - * @data: opaque pointer - VSI to be updated + * ice_cdev_info_get_vf_port_info - get a VF's information + * @cdev_info: pointer to the cdev_info object + * @abs_vf_id: Absolute VF ID to query + * @vf_port_info: structure to populate for the caller + * + * Allow the RDMA auxiliary driver to query a VF's information */ -int ice_peer_update_vsi(struct ice_peer_obj_int *peer_obj_int, void *data) +static int +ice_cdev_info_get_vf_port_info(struct iidc_core_dev_info *cdev_info, + u16 abs_vf_id, + struct iidc_vf_port_info *vf_port_info) { - struct ice_vsi *vsi = (struct ice_vsi *)data; - struct ice_peer_obj *peer_obj; + struct ice_pf *pf; - peer_obj = ice_get_peer_obj(peer_obj_int); - if (!peer_obj) - return 0; + if (!cdev_info || !vf_port_info) + return -EINVAL; - peer_obj->pf_vsi_num = vsi->vsi_num; - return 0; + pf = pci_get_drvdata(cdev_info->pdev); + + return ice_get_vf_port_info(pf, ice_rel_vf_id(&pf->hw, abs_vf_id), + vf_port_info); } -/* Initialize the ice_ops struct, which is used in 'ice_init_peer_devices' */ -static const struct ice_ops ops = { - .alloc_res = ice_peer_alloc_res, - .free_res = ice_peer_free_res, - .is_vsi_ready = ice_peer_is_vsi_ready, - .reg_for_notification = ice_peer_reg_for_notif, - .unreg_for_notification = ice_peer_unreg_for_notif, - .notify_state_change = ice_peer_report_state_change, - .request_reset = ice_peer_request_reset, - .peer_register = ice_peer_register, - .peer_unregister = ice_peer_unregister, - .update_vsi_filter = ice_peer_update_vsi_filter, - .vc_send = ice_peer_vc_send, +/** + * ice_find_cdev_info_by_id - find cdev_info instance by its id + * @pf: pointer to private board struct + * @cdev_info_id: peer driver ID + */ +struct iidc_core_dev_info +*ice_find_cdev_info_by_id(struct ice_pf *pf, int cdev_info_id) +{ + struct iidc_core_dev_info *cdev_info = NULL; + unsigned int i; + + if (!pf->cdev_infos) + return NULL; + + for (i = 0; i < ARRAY_SIZE(ice_cdev_ids); i++) { + cdev_info = pf->cdev_infos[i]; + if (cdev_info && + cdev_info->cdev_info_id == cdev_info_id) + break; + cdev_info = NULL; + } + return cdev_info; +} + +/** + * ice_cdev_info_update_vsi - update the pf_vsi info in cdev_info struct + * @cdev_info: pointer to cdev_info struct + * @vsi: VSI to be updated + */ +void ice_cdev_info_update_vsi(struct iidc_core_dev_info *cdev_info, + struct ice_vsi *vsi) +{ + if (!cdev_info) + return; + + cdev_info->vport_id = vsi->vsi_num; +} + +/* Initialize the ice_ops struct, which is used in 'ice_init_aux_devices' */ +static const struct iidc_core_ops iidc_ops = { + .alloc_res = ice_cdev_info_alloc_res, + .free_res = ice_cdev_info_free_res, + .request_reset = ice_cdev_info_request_reset, + .update_vport_filter = ice_cdev_info_update_vsi_filter, + .get_vf_info = ice_cdev_info_get_vf_port_info, + .vc_send = ice_cdev_info_vc_send, + .ieps_entry = ice_ieps_entry, }; /** - * ice_init_peer_devices - initializes peer objects and aux devices - * @pf: ptr to ice_pf - * - * This function initializes peer objects and auxiliary device, then - * associates them with specified pci_dev as their parent. + * ice_cdev_info_adev_release - function to be mapped to aux dev's release op + * @dev: pointer to device to free */ -int ice_init_peer_devices(struct ice_pf *pf) +static void ice_cdev_info_adev_release(struct device *dev) +{ + struct iidc_auxiliary_dev *iadev; + + iadev = container_of(dev, struct iidc_auxiliary_dev, adev.dev); + kfree(iadev); +} + +/* ice_plug_aux_dev - allocate and register aux dev for cdev_info + * @cdev_info: pointer to cdev_info struct + * @name: name of peer_aux_dev + * + * The cdev_info must be setup before calling this function + */ +int ice_plug_aux_dev(struct iidc_core_dev_info *cdev_info, const char *name) +{ + struct iidc_auxiliary_dev *iadev; + struct auxiliary_device *adev; + struct ice_pf *pf; + int ret = 0; + + if (!cdev_info || !name) + return -EINVAL; + + pf = pci_get_drvdata(cdev_info->pdev); + if (!pf) + return -EINVAL; + + /* if this PF does not support a technology that requires auxiliary + * devices, then exit gracefully + */ + if (!ice_is_aux_ena(pf)) + return ret; + mutex_lock(&pf->adev_mutex); + if (cdev_info->adev) + goto aux_plug_out; + + if (cdev_info->cdev_info_id == IIDC_RDMA_ID && !ice_chk_rdma_cap(pf)) + goto aux_plug_out; + + iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); + if (!iadev) { + ret = -ENOMEM; + goto aux_plug_out; + } + + adev = &iadev->adev; + cdev_info->adev = adev; + iadev->cdev_info = cdev_info; + + adev->id = pf->aux_idx; + adev->dev.release = ice_cdev_info_adev_release; + adev->dev.parent = &cdev_info->pdev->dev; + adev->name = name; + + ret = auxiliary_device_init(adev); + if (ret) { + cdev_info->adev = NULL; + kfree(iadev); + goto aux_plug_out; + } + + ret = auxiliary_device_add(adev); + if (ret) { + cdev_info->adev = NULL; + auxiliary_device_uninit(adev); + } + +aux_plug_out: + mutex_unlock(&pf->adev_mutex); + return ret; +} + +/* ice_unplug_aux_dev - unregister and free aux dev + * @cdev_info: pointer cdev_info struct + */ +void ice_unplug_aux_dev(struct iidc_core_dev_info *cdev_info) +{ + struct ice_pf *pf; + + if (!cdev_info) + return; + pf = pci_get_drvdata(cdev_info->pdev); + + /* if this aux dev has already been unplugged move on */ + mutex_lock(&pf->adev_mutex); + if (!cdev_info->adev) { + mutex_unlock(&pf->adev_mutex); + return; + } + + auxiliary_device_delete(cdev_info->adev); + auxiliary_device_uninit(cdev_info->adev); + cdev_info->adev = NULL; + mutex_unlock(&pf->adev_mutex); +} + +/* ice_plug_aux_devs - allocate and register aux dev for cdev_info + * @pf: pointer to pf struct + * + * The PFs cdev_infos array must be setup before calling this function + */ +int ice_plug_aux_devs(struct ice_pf *pf) +{ + int ret; + u8 i; + + for (i = 0; i < ARRAY_SIZE(ice_cdev_ids); i++) { + const char *name; + + if (!pf->cdev_infos[i]) + continue; + + if (pf->cdev_infos[i]->cdev_info_id == IIDC_RDMA_ID) { + if (pf->cdev_infos[i]->rdma_protocol == + IIDC_RDMA_PROTOCOL_IWARP) + name = IIDC_RDMA_IWARP_NAME; + else + name = IIDC_RDMA_ROCE_NAME; + } else { + name = ice_cdev_ids[i].name; + } + + ret = ice_plug_aux_dev(pf->cdev_infos[i], name); + if (ret) + return ret; + } + + return 0; +} + +/* ice_unplug_aux_devs - unregister and free aux devs + * @pf: pointer to pf struct + */ +void ice_unplug_aux_devs(struct ice_pf *pf) +{ + u8 i; + + for (i = 0; i < ARRAY_SIZE(ice_cdev_ids); i++) { + ice_unplug_aux_dev(pf->cdev_infos[i]); + } +} + +/** + * ice_cdev_init_rdma_qos_info - initialize qos_info for RDMA peer + * @pf: pointer to ice_pf + * @qos_info: pointer to qos_info struct + */ +static void ice_cdev_init_rdma_qos_info(struct ice_pf *pf, + struct iidc_qos_params *qos_info) +{ + int j; + + /* setup qos_info fields with defaults */ + qos_info->num_apps = 0; + qos_info->num_tc = 1; + + for (j = 0; j < IIDC_MAX_USER_PRIORITY; j++) + qos_info->up2tc[j] = 0; + + qos_info->tc_info[0].rel_bw = 100; + for (j = 1; j < IEEE_8021QAZ_MAX_TCS; j++) + qos_info->tc_info[j].rel_bw = 0; + /* for DCB, override the qos_info defaults. */ + ice_setup_dcb_qos_info(pf, qos_info); + +} + +/** + * ice_init_aux_devices - initializes cdev_info objects and aux devices + * @pf: ptr to ice_pf + */ +int ice_init_aux_devices(struct ice_pf *pf) { - struct ice_port_info *port_info = pf->hw.port_info; struct ice_vsi *vsi = pf->vsi[0]; struct pci_dev *pdev = pf->pdev; struct device *dev = &pdev->dev; - int status = 0; + int err; unsigned int i; /* Reserve vector resources */ - status = ice_reserve_peer_qvector(pf); - if (status < 0) { - dev_err(dev, "failed to reserve vectors for peer drivers\n"); - return status; + err = ice_reserve_cdev_info_qvector(pf); + if (err < 0) { + dev_err(dev, "failed to reserve vectors for aux drivers\n"); + return err; } - for (i = 0; i < ARRAY_SIZE(ice_mfd_cells); i++) { - struct ice_peer_obj_platform_data *platform_data; - struct ice_peer_obj_int *peer_obj_int; - struct ice_peer_drv_int *peer_drv_int; + + /* This PFs auxiliary id value */ + pf->aux_idx = ida_alloc(&ice_cdev_info_ida, GFP_KERNEL); + if (pf->aux_idx < 0) { + dev_err(dev, "failed to allocate device ID for aux drvs\n"); + return -ENOMEM; + } + + for (i = 0; i < ARRAY_SIZE(ice_cdev_ids); i++) { struct msix_entry *entry = NULL; - struct ice_qos_params *qos_info; - struct ice_peer_obj *peer_obj; - int j; + struct iidc_core_dev_info *cdev_info; - peer_obj_int = devm_kzalloc(dev, sizeof(*peer_obj_int), - GFP_KERNEL); - if (!peer_obj_int) + /* structure layout needed for container_of's looks like: + * iidc_auxiliary_dev (container_of super-struct for adev) + * |--> auxiliary_device + * |--> *iidc_core_dev_info (pointer from cdev_info struct) + * + * The iidc_auxiliary_device has a lifespan as long as it + * is on the bus. Once removed it will be freed and a new + * one allocated if needed to re-add. + * + * The iidc_core_dev_info is tied to the life of the PF, and + * will exist as long as the PF driver is loaded. It will be + * freed in the remove flow for the PF driver. + */ + cdev_info = kzalloc(sizeof(*cdev_info), GFP_KERNEL); + if (!cdev_info) { + ida_simple_remove(&ice_cdev_info_ida, pf->aux_idx); + pf->aux_idx = -1; return -ENOMEM; - pf->peers[i] = peer_obj_int; - - peer_drv_int = devm_kzalloc(dev, sizeof(*peer_drv_int), - GFP_KERNEL); - if (!peer_drv_int) - return -ENOMEM; - - peer_obj_int->peer_drv_int = peer_drv_int; - - /* Initialize driver values */ - for (j = 0; j < ICE_EVENT_NBITS; j++) - bitmap_zero(peer_drv_int->current_events[j].type, - ICE_EVENT_NBITS); - - mutex_init(&peer_obj_int->peer_obj_state_mutex); - - peer_obj = ice_get_peer_obj(peer_obj_int); - peer_obj_int->plat_data.peer_obj = peer_obj; - platform_data = &peer_obj_int->plat_data; - peer_obj->peer_ops = NULL; - peer_obj->hw_addr = (u8 __iomem *)pf->hw.hw_addr; - peer_obj->ver.major = ICE_PEER_MAJOR_VER; - peer_obj->ver.minor = ICE_PEER_MINOR_VER; - peer_obj->ver.support = ICE_IDC_FEATURES; - peer_obj->peer_obj_id = ice_mfd_cells[i].id; - peer_obj->pf_vsi_num = vsi->vsi_num; - peer_obj->netdev = vsi->netdev; - peer_obj->initial_mtu = vsi->netdev->mtu; - ether_addr_copy(peer_obj->lan_addr, port_info->mac.lan_addr); - - ice_mfd_cells[i].platform_data = platform_data; - ice_mfd_cells[i].pdata_size = sizeof(*platform_data); - - peer_obj_int->ice_peer_wq = - alloc_ordered_workqueue("ice_peer_wq_%d", WQ_UNBOUND, - i); - if (!peer_obj_int->ice_peer_wq) - return -ENOMEM; - INIT_WORK(&peer_obj_int->peer_close_task, ice_peer_close_task); - - peer_obj->pdev = pdev; - peer_obj->ari_ena = pci_ari_enabled(pdev->bus); - peer_obj->bus_num = PCI_BUS_NUM(pdev->devfn); - if (!peer_obj->ari_ena) { - peer_obj->dev_num = PCI_SLOT(pdev->devfn); - peer_obj->fn_num = PCI_FUNC(pdev->devfn); - } else { - peer_obj->dev_num = 0; - peer_obj->fn_num = pdev->devfn & 0xff; } - qos_info = &peer_obj->initial_qos_info; + pf->cdev_infos[i] = cdev_info; - /* setup qos_info fields with defaults */ - qos_info->num_apps = 0; - qos_info->num_tc = 1; - - for (j = 0; j < ICE_IDC_MAX_USER_PRIORITY; j++) - qos_info->up2tc[j] = 0; - - qos_info->tc_info[0].rel_bw = 100; - for (j = 1; j < IEEE_8021QAZ_MAX_TCS; j++) - qos_info->tc_info[j].rel_bw = 0; - - /* for DCB, override the qos_info defaults. */ - ice_setup_dcb_qos_info(pf, qos_info); + cdev_info->hw_addr = (u8 __iomem *)pf->hw.hw_addr; + cdev_info->ver.major = IIDC_MAJOR_VER; + cdev_info->ver.minor = IIDC_MINOR_VER; + cdev_info->cdev_info_id = ice_cdev_ids[i].id; + cdev_info->pdev = pdev; /* Initialize ice_ops */ - peer_obj->ops = &ops; - + cdev_info->ops = &iidc_ops; /* make sure peer specific resources such as msix_count and * msix_entries are initialized */ - switch (ice_mfd_cells[i].id) { - case ICE_PEER_RDMA_ID: - if (test_bit(ICE_FLAG_IWARP_ENA, pf->flags)) { - peer_obj->msix_count = pf->num_rdma_msix; - entry = &pf->msix_entries[pf->rdma_base_vector]; + switch (ice_cdev_ids[i].id) { + + case IIDC_RDMA_ID: + if (!ice_chk_rdma_cap(pf)) { + pf->cdev_infos[i] = NULL; + kfree(cdev_info); + continue; } - pf->rdma_peer = peer_obj; + cdev_info->vport_id = vsi->vsi_num; + cdev_info->netdev = vsi->netdev; + cdev_info->rdma_protocol = IIDC_RDMA_PROTOCOL_ROCEV2; + cdev_info->rdma_caps.gen = IIDC_RDMA_GEN_2; + cdev_info->ftype = IIDC_FUNCTION_TYPE_PF; + cdev_info->cdev_info_id = IIDC_RDMA_ID; + cdev_info->pf_id = pf->hw.pf_id; +#ifdef HAVE_NETDEV_UPPER_INFO + cdev_info->rdma_active_port = ICE_LAG_INVALID_PORT; + cdev_info->main_pf_port = pf->hw.port_info->lport; +#endif /* HAVE_NETDEV_UPPER_INFO */ + ice_cdev_init_rdma_qos_info(pf, &cdev_info->qos_info); + /* make sure peer specific resources such as msix_count + * and msix_entries are initialized + */ + cdev_info->msix_count = pf->num_rdma_msix; + entry = &pf->msix_entries[pf->rdma_base_vector]; break; default: break; } - - peer_obj->msix_entries = entry; - ice_peer_state_change(peer_obj_int, ICE_PEER_OBJ_STATE_INIT, - false); + cdev_info->msix_entries = entry; } + set_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags); - status = ida_simple_get(&ice_peer_index_ida, 0, 0, GFP_KERNEL); - if (status < 0) { - dev_err(&pdev->dev, "failed to get unique index for device\n"); - return status; - } - - pf->peer_idx = status; - - status = mfd_add_devices(dev, pf->peer_idx, ice_mfd_cells, - ARRAY_SIZE(ice_mfd_cells), NULL, 0, NULL); - if (status) { - dev_err(dev, "Failure adding MFD devs for peers: %d\n", status); - return status; - } - - for (i = 0; i < ARRAY_SIZE(ice_mfd_cells); i++) { - snprintf(pf->peers[i]->plat_name, ICE_MAX_PEER_NAME, "%s.%d", - ice_mfd_cells[i].name, - pf->peer_idx + ice_mfd_cells[i].id); - dev = bus_find_device_by_name(&platform_bus_type, NULL, - pf->peers[i]->plat_name); - if (dev) { - dev_dbg(dev, "Peer Created: %s %d\n", - pf->peers[i]->plat_name, pf->peer_idx); - put_device(dev); - } - } - - return status; + return err; } + +/** + * ice_is_rdma_aux_loaded - check if RDMA auxiliary driver is loaded + * @pf: ptr to ice_pf + */ +bool ice_is_rdma_aux_loaded(struct ice_pf *pf) +{ + struct iidc_core_dev_info *rcdi; + struct iidc_auxiliary_drv *iadrv; + bool loaded; + + rcdi = ice_find_cdev_info_by_id(pf, IIDC_RDMA_ID); + if (!rcdi) + return false; + + mutex_lock(&pf->adev_mutex); + device_lock(&rcdi->adev->dev); + iadrv = ice_get_auxiliary_drv(rcdi); + loaded = iadrv ? true : false; + device_unlock(&rcdi->adev->dev); + mutex_unlock(&pf->adev_mutex); + + dev_dbg(ice_pf_to_dev(pf), "RDMA Auxiliary Driver status: %s\n", + loaded ? "loaded" : "not loaded"); + + return loaded; +} + diff --git a/drivers/thirdparty/ice/ice_idc.h b/drivers/thirdparty/ice/ice_idc.h deleted file mode 100644 index d2b74e0c539c..000000000000 --- a/drivers/thirdparty/ice/ice_idc.h +++ /dev/null @@ -1,422 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (C) 2018-2021, Intel Corporation. */ - -#ifndef _ICE_IDC_H_ -#define _ICE_IDC_H_ - -#include -#include -#include -#include -#include - -#include - -/* This major and minor version represent IDC API version information. - * During peer driver registration, peer driver specifies major and minor - * version information (via. peer_driver:ver_info). It gets checked against - * following defines and if mismatch, then peer driver registration - * fails and appropriate message gets logged. - */ -#define ICE_PEER_MAJOR_VER 7 -#define ICE_PEER_MINOR_VER 1 - -enum ice_peer_features { - ICE_PEER_FEATURE_ADK_SUPPORT, - ICE_PEER_FEATURE_PTP_SUPPORT, - ICE_PEER_FEATURE_SRIOV_SUPPORT, - ICE_PEER_FEATURE_PCIIOV_SUPPORT, - ICE_PEER_FEATURE_NBITS -}; - -#define ICE_ADK_SUP 0 - -#define ICE_PTP_SUP BIT(ICE_PEER_FEATURE_PTP_SUPPORT) - -#define ICE_SRIOV_SUP BIT(ICE_PEER_FEATURE_SRIOV_SUPPORT) - -#ifdef CONFIG_PCI_IOV -#define ICE_PCIIOV_SUP BIT(ICE_PEER_FEATURE_PCIIOV_SUPPORT) -#else -#define ICE_PCIIOV_SUP 0 -#endif /* CONFIG_PCI_IOV */ - -#define ICE_IDC_FEATURES (ICE_ADK_SUP | ICE_PTP_SUP | ICE_SRIOV_SUP |\ - ICE_PCIIOV_SUP) - -enum ice_event_type { - ICE_EVENT_LINK_CHANGE = 0x0, - ICE_EVENT_MTU_CHANGE, - ICE_EVENT_TC_CHANGE, - ICE_EVENT_API_CHANGE, - ICE_EVENT_MBX_CHANGE, - ICE_EVENT_CRIT_ERR, - ICE_EVENT_NBITS /* must be last */ -}; - -enum ice_res_type { - ICE_INVAL_RES = 0x0, - ICE_VSI, - ICE_VEB, - ICE_EVENT_Q, - ICE_EGRESS_CMPL_Q, - ICE_CMPL_EVENT_Q, - ICE_ASYNC_EVENT_Q, - ICE_DOORBELL_Q, - ICE_RDMA_QSETS_TXSCHED, -}; - -enum ice_peer_reset_type { - ICE_PEER_PFR = 0, - ICE_PEER_CORER, - ICE_PEER_CORER_SW_CORE, - ICE_PEER_CORER_SW_FULL, - ICE_PEER_GLOBR, -}; - -/* reason notified to peer driver as part of event handling */ -enum ice_close_reason { - ICE_REASON_INVAL = 0x0, - ICE_REASON_HW_UNRESPONSIVE, - ICE_REASON_INTERFACE_DOWN, /* Administrative down */ - ICE_REASON_PEER_DRV_UNREG, /* peer driver getting unregistered */ - ICE_REASON_PEER_OBJ_UNINIT, - ICE_REASON_GLOBR_REQ, - ICE_REASON_CORER_REQ, - ICE_REASON_EMPR_REQ, - ICE_REASON_PFR_REQ, - ICE_REASON_HW_RESET_PENDING, - ICE_REASON_RECOVERY_MODE, - ICE_REASON_PARAM_CHANGE, -}; - -enum ice_rdma_filter { - ICE_RDMA_FILTER_INVAL = 0x0, - ICE_RDMA_FILTER_IWARP, - ICE_RDMA_FILTER_ROCEV2, - ICE_RDMA_FILTER_BOTH, -}; - -/* This information is needed to handle peer driver registration, - * instead of adding more params to peer_drv_registration function, - * let's get it thru' peer_drv object. - */ -struct ice_ver_info { - u16 major; - u16 minor; - u64 support; -}; - -/* Struct to hold per DCB APP info */ -struct ice_dcb_app_info { - u8 priority; - u8 selector; - u16 prot_id; -}; - -struct ice_peer_obj; -struct ice_peer_obj_int; - -#define ICE_IDC_MAX_USER_PRIORITY 8 -#define ICE_IDC_MAX_APPS 64 -#define ICE_IDC_DSCP_NUM_VAL 64 - -/* Source timer mode */ -enum ice_src_tmr_mode { - ICE_SRC_TMR_MODE_NANOSECONDS, - ICE_SRC_TMR_MODE_LOCKED, - - NUM_ICE_SRC_TMR_MODE -}; - - - -/* Struct to hold per RDMA Qset info */ -struct ice_rdma_qset_params { - u32 teid; /* qset TEID */ - u16 qs_handle; /* RDMA driver provides this */ - u16 vsi_id; /* VSI index */ - u8 tc; /* TC branch the QSet should belong to */ - u8 reserved[3]; -}; - -struct ice_res_base { - /* Union for future provision e.g. other res_type */ - union { - struct ice_rdma_qset_params qsets; - } res; -}; - -struct ice_res { - /* Type of resource. Filled by peer driver */ - enum ice_res_type res_type; - /* Count requested by peer driver */ - u16 cnt_req; - - - /* Number of resources allocated. Filled in by callee. - * Based on this value, caller to fill up "resources" - */ - u16 res_allocated; - - /* Unique handle to resources allocated. Zero if call fails. - * Allocated by callee and for now used by caller for internal - * tracking purpose. - */ - u32 res_handle; - - /* Peer driver has to allocate sufficient memory, to accommodate - * cnt_requested before calling this function. - * Memory has to be zero initialized. It is input/output param. - * As a result of alloc_res API, this structures will be populated. - */ - struct ice_res_base res[1]; -}; - -struct ice_qos_info { - u64 tc_ctx; - u8 rel_bw; - u8 prio_type; - u8 egress_virt_up; - u8 ingress_virt_up; -}; - -#define IDC_QOS_MODE_VLAN 0x0 -#define IDC_QOS_MODE_DSCP 0x1 - -/* Struct to hold QoS info */ -struct ice_qos_params { - struct ice_qos_info tc_info[IEEE_8021QAZ_MAX_TCS]; - u8 up2tc[ICE_IDC_MAX_USER_PRIORITY]; - u8 vsi_relative_bw; - u8 vsi_priority_type; - u32 num_apps; - u8 pfc_mode; - u8 dscp_map[ICE_IDC_DSCP_NUM_VAL]; - struct ice_dcb_app_info apps[ICE_IDC_MAX_APPS]; - u8 num_tc; -}; - -union ice_event_info { - /* ICE_EVENT_LINK_CHANGE */ - struct { - struct net_device *lwr_nd; - u16 vsi_num; /* HW index of VSI corresponding to lwr ndev */ - u8 new_link_state; - u8 lport; - } link_info; - /* ICE_EVENT_MTU_CHANGE */ - u16 mtu; - /* ICE_EVENT_TC_CHANGE */ - struct ice_qos_params port_qos; - /* ICE_EVENT_API_CHANGE */ - u8 api_rdy; - /* ICE_EVENT_MBX_CHANGE */ - u8 mbx_rdy; - /* ICE_EVENT_CRIT_ERR */ - u32 reg; -}; - -/* ice_event elements are to be passed back and forth between the ice driver - * and the peer drivers. They are to be used to both register/unregister - * for event reporting and to report an event (events can be either ice - * generated or peer generated). - * - * For (un)registering for events, the structure needs to be populated with: - * reporter - pointer to the ice_peer_obj struct of the peer (un)registering - * type - bitmap with bits set for event types to (un)register for - * - * For reporting events, the structure needs to be populated with: - * reporter - pointer to peer that generated the event (NULL for ice) - * type - bitmap with single bit set for this event type - * info - union containing data relevant to this event type - */ -struct ice_event { - struct ice_peer_obj *reporter; - DECLARE_BITMAP(type, ICE_EVENT_NBITS); - union ice_event_info info; -}; - -/* Following APIs are implemented by ICE driver and invoked by peer drivers */ -struct ice_ops { - /* APIs to allocate resources such as VEB, VSI, Doorbell queues, - * completion queues, Tx/Rx queues, etc... - */ - int (*alloc_res)(struct ice_peer_obj *peer_obj, - struct ice_res *res, - int partial_acceptable); - int (*free_res)(struct ice_peer_obj *peer_obj, - struct ice_res *res); - - int (*is_vsi_ready)(struct ice_peer_obj *peer_obj); - int (*peer_register)(struct ice_peer_obj *peer_obj); - int (*peer_unregister)(struct ice_peer_obj *peer_obj); - int (*request_reset)(struct ice_peer_obj *obj, - enum ice_peer_reset_type reset_type); - - void (*notify_state_change)(struct ice_peer_obj *obj, - struct ice_event *event); - - /* Notification APIs */ - void (*reg_for_notification)(struct ice_peer_obj *obj, - struct ice_event *event); - void (*unreg_for_notification)(struct ice_peer_obj *obj, - struct ice_event *event); - int (*update_vsi_filter)(struct ice_peer_obj *peer_obj, - enum ice_rdma_filter filter, bool enable); - int (*vc_send)(struct ice_peer_obj *peer_obj, u32 vf_id, u8 *msg, - u16 len); -}; - -/* Following APIs are implemented by peer drivers and invoked by ICE driver */ -struct ice_peer_ops { - void (*event_handler)(struct ice_peer_obj *peer_obj, - struct ice_event *event); - - /* Why we have 'open' and when it is expected to be called: - * 1. symmetric set of API w.r.t close - * 2. To be invoked form driver initialization path - * - call peer_driver:open once ice driver is fully initialized - * 3. To be invoked upon RESET complete - * - * Calls to open are performed from ice_finish_init_peer_obj - * which is invoked from the service task. This helps keep objects - * from having their open called until the ice driver is ready and - * has scheduled its service task. - */ - int (*open)(struct ice_peer_obj *peer_obj); - - /* Peer's close function is to be called when the peer needs to be - * quiesced. This can be for a variety of reasons (enumerated in the - * ice_close_reason enum struct). A call to close will only be - * followed by a call to either remove or open. No IDC calls from the - * peer should be accepted until it is re-opened. - * - * The *reason* parameter is the reason for the call to close. This - * can be for any reason enumerated in the ice_close_reason struct. - * It's primary reason is for the peer's bookkeeping and in case the - * peer want to perform any different tasks dictated by the reason. - */ - void (*close)(struct ice_peer_obj *peer_obj, - enum ice_close_reason reason); - - int (*vc_receive)(struct ice_peer_obj *peer_obj, u32 vf_id, u8 *msg, - u16 len); - /* tell RDMA peer to prepare for TC change in a blocking call - * that will directly precede the change event - */ - void (*prep_tc_change)(struct ice_peer_obj *peer_obj); -}; - -#define ICE_PEER_RDMA_NAME "ice_rdma" -#define ICE_PEER_RDMA_ID 0x00000010 -#define ICE_MAX_NUM_PEERS 4 - -/* The const struct that instantiates peer_obj_id needs to be initialized - * in the .c with the macro ASSIGN_PEER_INFO. - * For example: - * static const struct peer_obj_id peer_obj_ids[] = ASSIGN_PEER_INFO; - */ -struct peer_obj_id { - char *name; - int id; -}; - -#define IDC_RDMA_INFO { .name = ICE_PEER_RDMA_NAME, .id = ICE_PEER_RDMA_ID }, -#define IDC_AE_INFO -#define IDC_IPSEC_INFO -#define IDC_SWITCH_INFO -#define IDC_ADK_INFO -/* this is a list of all possible peers, some are unused but left for clarity */ -#define ASSIGN_PEER_INFO \ -{ \ - IDC_RDMA_INFO \ - IDC_AE_INFO \ - IDC_IPSEC_INFO \ - IDC_SWITCH_INFO \ - IDC_ADK_INFO \ -} - -#define ice_peer_priv(x) ((x)->peer_priv) - -/* structure representing peer_object */ -struct ice_peer_obj { - struct ice_ver_info ver; - struct pci_dev *pdev; /* PCI device of corresponding to main function */ - /* KVA / Linear address corresponding to BAR0 of underlying - * pci_device. - */ - u8 __iomem *hw_addr; - int peer_obj_id; - - int index; - - /* Opaque pointer for peer specific data tracking. This memory will - * be alloc'd and freed by the peer driver and used for private data - * accessible only to the specific peer. It is stored here so that - * when this struct is passed to the peer via an IDC call, the data - * can be accessed by the peer at that time. - * The peers should only retrieve the pointer by the macro: - * ice_peer_priv(struct ice_peer_obj *) - */ - void *peer_priv; - - - u8 ftype; /* PF(false) or VF (true) */ - - /* Data VSI created by driver */ - u16 pf_vsi_num; - - u8 lan_addr[ETH_ALEN]; /* default MAC address of main netdev */ - u16 initial_mtu; /* Initial MTU of main netdev */ - struct ice_qos_params initial_qos_info; - struct net_device *netdev; - /* PCI info */ - u8 ari_ena; - u16 bus_num; - u16 dev_num; - u16 fn_num; - - /* Based on peer driver type, this shall point to corresponding MSIx - * entries in pf->msix_entries (which were allocated as part of driver - * initialization) e.g. for RDMA driver, msix_entries reserved will be - * num_online_cpus + 1. - */ - u16 msix_count; /* How many vectors are reserved for this device */ - struct msix_entry *msix_entries; - - /* Following struct contains function pointers to be initialized - * by ICE driver and called by peer driver - */ - const struct ice_ops *ops; - - /* Following struct contains function pointers to be initialized - * by peer driver and called by ICE driver - */ - const struct ice_peer_ops *peer_ops; - - /* Pointer to peer_drv struct to be populated by peer driver */ - struct ice_peer_drv *peer_drv; -}; - -struct ice_peer_obj_platform_data { - struct ice_peer_obj *peer_obj; -}; - -/* structure representing peer driver - * Peer driver to initialize those function ptrs and - * it will be invoked by ICE as part of driver_registration - * via bus infrastructure - */ -struct ice_peer_drv { - u16 driver_id; -#define ICE_PEER_LAN_DRIVER 0 -#define ICE_PEER_RDMA_DRIVER 4 -#define ICE_PEER_ADK_DRIVER 5 - - struct ice_ver_info ver; - const char *name; - -}; - -#endif /* _ICE_IDC_H_*/ diff --git a/drivers/thirdparty/ice/ice_idc_int.h b/drivers/thirdparty/ice/ice_idc_int.h index 6939681f1498..72d5fcf0d700 100644 --- a/drivers/thirdparty/ice/ice_idc_int.h +++ b/drivers/thirdparty/ice/ice_idc_int.h @@ -4,167 +4,22 @@ #ifndef _ICE_IDC_INT_H_ #define _ICE_IDC_INT_H_ -#include "ice.h" -#include "ice_idc.h" +#include "iidc.h" +#define ICE_MAX_NUM_AUX 4 -enum ice_peer_obj_state { - ICE_PEER_OBJ_STATE_INIT, - ICE_PEER_OBJ_STATE_PROBED, - ICE_PEER_OBJ_STATE_OPENING, - ICE_PEER_OBJ_STATE_OPENED, - ICE_PEER_OBJ_STATE_PREP_RST, - ICE_PEER_OBJ_STATE_PREPPED, - ICE_PEER_OBJ_STATE_CLOSING, - ICE_PEER_OBJ_STATE_CLOSED, - ICE_PEER_OBJ_STATE_REMOVED, - ICE_PEER_OBJ_STATE_API_RDY, - ICE_PEER_OBJ_STATE_NBITS, /* must be last */ -}; +struct ice_pf; +void ice_send_event_to_auxs(struct ice_pf *pf, struct iidc_event *event); +struct iidc_auxiliary_drv +*ice_get_auxiliary_drv(struct iidc_core_dev_info *cdev_info); +void ice_send_event_to_aux_no_lock(struct iidc_core_dev_info *cdev, void *data); -enum ice_peer_drv_state { - ICE_PEER_DRV_STATE_MBX_RDY, - ICE_PEER_DRV_STATE_NBITS, /* must be last */ -}; - -struct ice_peer_drv_int { - struct ice_peer_drv *peer_drv; - - /* States associated with peer driver */ - DECLARE_BITMAP(state, ICE_PEER_DRV_STATE_NBITS); - - /* if this peer_obj is the originator of an event, these are the - * most recent events of each type - */ - struct ice_event current_events[ICE_EVENT_NBITS]; -}; - -#define ICE_MAX_PEER_NAME 64 - -struct ice_peer_obj_int { - struct ice_peer_obj peer_obj; - struct ice_peer_drv_int *peer_drv_int; /* driver private structure */ - char plat_name[ICE_MAX_PEER_NAME]; - struct ice_peer_obj_platform_data plat_data; - - /* if this peer_obj is the originator of an event, these are the - * most recent events of each type - */ - struct ice_event current_events[ICE_EVENT_NBITS]; - /* Events a peer has registered to be notified about */ - DECLARE_BITMAP(events, ICE_EVENT_NBITS); - - /* States associated with peer_obj */ - DECLARE_BITMAP(state, ICE_PEER_OBJ_STATE_NBITS); - struct mutex peer_obj_state_mutex; /* peer_obj state mutex */ - - /* per peer workqueue */ - struct workqueue_struct *ice_peer_wq; - - struct work_struct peer_prep_task; - struct work_struct peer_close_task; - - enum ice_close_reason rst_type; -}; - -static inline struct -ice_peer_obj_int *peer_to_ice_obj_int(struct ice_peer_obj *peer_obj) -{ - return peer_obj ? container_of(peer_obj, struct ice_peer_obj_int, - peer_obj) : NULL; -} - -static inline struct -ice_peer_obj *ice_get_peer_obj(struct ice_peer_obj_int *peer_obj_int) -{ - if (peer_obj_int) - return &peer_obj_int->peer_obj; - else - return NULL; -} - -#if IS_ENABLED(CONFIG_MFD_CORE) -int ice_peer_update_vsi(struct ice_peer_obj_int *peer_obj_int, void *data); -int ice_close_peer_for_reset(struct ice_peer_obj_int *peer_obj_int, void *data); -int ice_unroll_peer(struct ice_peer_obj_int *peer_obj_int, void *data); -int ice_unreg_peer_obj(struct ice_peer_obj_int *peer_obj_int, void *data); -int ice_peer_close(struct ice_peer_obj_int *peer_obj_int, void *data); -int ice_peer_check_for_reg(struct ice_peer_obj_int *peer_obj_int, void *data); -int -ice_finish_init_peer_obj(struct ice_peer_obj_int *peer_obj_int, void *data); -static inline bool ice_validate_peer_obj(struct ice_peer_obj *peer_obj) -{ - struct ice_peer_obj_int *peer_obj_int; - struct ice_pf *pf; - - if (!peer_obj || !peer_obj->pdev) - return false; - - if (!peer_obj->peer_ops) - return false; - - pf = pci_get_drvdata(peer_obj->pdev); - if (!pf) - return false; - - peer_obj_int = peer_to_ice_obj_int(peer_obj); - if (!peer_obj_int) - return false; - - if (test_bit(ICE_PEER_OBJ_STATE_REMOVED, peer_obj_int->state) || - test_bit(ICE_PEER_OBJ_STATE_INIT, peer_obj_int->state)) - return false; - - return true; -} -#else /* !CONFIG_MFD_CORE */ -static inline int -ice_peer_update_vsi(struct ice_peer_obj_int *peer_obj_int, void *data) -{ - return 0; -} - -static inline int -ice_close_peer_for_reset(struct ice_peer_obj_int *peer_obj_int, void *data) -{ - return 0; -} - -static inline int -ice_unroll_peer(struct ice_peer_obj_int *peer_obj_int, void *data) -{ - return 0; -} - -static inline int -ice_unreg_peer_obj(struct ice_peer_obj_int *peer_obj_int, void *data) -{ - return 0; -} - -static inline int -ice_peer_close(struct ice_peer_obj_int *peer_obj_int, void *data) -{ - return 0; -} - -static inline int -ice_peer_check_for_reg(struct ice_peer_obj_int *peer_obj_int, void *data) -{ - return 0; -} - -static inline int -ice_finish_init_peer_obj(struct ice_peer_obj_int *peer_obj_int, void *data) -{ - return 0; -} - -static inline bool ice_validate_peer_obj(struct ice_peer_obj *peer) -{ - return true; -} - -#endif /* !CONFIG_MFD_CORE */ +void ice_cdev_info_update_vsi(struct iidc_core_dev_info *cdev_info, + struct ice_vsi *vsi); +int ice_unroll_cdev_info(struct iidc_core_dev_info *cdev_info, void *data); +struct iidc_core_dev_info +*ice_find_cdev_info_by_id(struct ice_pf *pf, int cdev_info_id); +void ice_send_vf_reset_to_aux(struct iidc_core_dev_info *cdev_info, u16 vf_id); +bool ice_is_rdma_aux_loaded(struct ice_pf *pf); #endif /* !_ICE_IDC_INT_H_ */ diff --git a/drivers/thirdparty/ice/ice_ieps.c b/drivers/thirdparty/ice/ice_ieps.c new file mode 100644 index 000000000000..3674a0e87203 --- /dev/null +++ b/drivers/thirdparty/ice/ice_ieps.c @@ -0,0 +1,1032 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2021, Intel Corporation. */ + +/* Intel(R) Ethernet Connection E800 Series Linux Driver IEPS extensions */ + +#include "ice_ieps.h" +#include "ice.h" +#include "ice_lib.h" + +static struct ieps_peer_api_version ice_ieps_version = { + .major = IEPS_VERSION_PEER_MAJOR, + .minor = IEPS_VERSION_PEER_MINOR, +}; + +/** + * ice_ieps_i2c_fill - Fill I2C read_write AQ command descriptor + * @pf: ptr to pf + * @rw: ptr I2C read/write data structure + * @desc: ptr to AQ descriptor to fill + */ +static void +ice_ieps_i2c_fill(struct ice_pf *pf, struct ieps_peer_i2c *rw, + struct ice_aq_desc *desc) +{ + struct ice_aqc_i2c *cmd = &desc->params.read_write_i2c; + struct ice_aqc_link_topo_params *tparams; + struct ice_hw *hw = &pf->hw; + u8 params; + + params = (rw->data_len << ICE_AQC_I2C_DATA_SIZE_S); + if (unlikely(rw->en_10b_addr)) + params |= ICE_AQC_I2C_ADDR_TYPE_10BIT; + else + params |= ICE_AQC_I2C_ADDR_TYPE_7BIT; + + cmd->i2c_params = params; + cmd->i2c_bus_addr = cpu_to_le16(rw->dev_addr); + cmd->i2c_addr = cpu_to_le16(rw->reg_addr); + + tparams = &cmd->topo_addr.topo_params; + tparams->index = rw->bus; + tparams->lport_num = hw->port_info->lport; + tparams->lport_num_valid = ICE_AQC_LINK_TOPO_PORT_NUM_VALID; + tparams->node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE << + ICE_AQC_LINK_TOPO_NODE_CTX_S); +} + +/** + * ice_ieps_i2c_write - Request FW to perform CPK IO widget I2C write + * @pf: ptr to pf + * @rw: ptr to I2C read/write data structure + */ +static enum ieps_peer_status +ice_ieps_i2c_write(struct ice_pf *pf, struct ieps_peer_i2c *rw) +{ + struct ice_hw *hw = &pf->hw; + struct ice_aq_desc desc; + u8 i, remaining, wrlen; + + if (rw->data_len == 0) { + dev_dbg(ice_pf_to_dev(pf), "ERROR: i2c_wrlen=0\n"); + return IEPS_PEER_INVALID_ARG; + } + + dev_dbg(ice_pf_to_dev(pf), + "i2c_wr bus=%d dev=0x%x reg=0x%x len=%d data[0]=0x%x\n", + rw->bus, rw->dev_addr, rw->reg_addr, rw->data_len, rw->data[0]); + +#define ICE_IEPS_I2C_WR_SZ 4 + for (i = 0; i < rw->data_len; i += ICE_IEPS_I2C_WR_SZ) { + struct ice_aqc_i2c *i2c; + int status; + + remaining = rw->data_len - i; + if (remaining > ICE_IEPS_I2C_WR_SZ) + wrlen = ICE_IEPS_I2C_WR_SZ; + else + wrlen = remaining; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c); + ice_ieps_i2c_fill(pf, rw, &desc); + i2c = &desc.params.read_write_i2c; + + /* Copy write values */ + memcpy(i2c->i2c_data, &rw->data[i], wrlen); + i2c->i2c_params &= ~ICE_AQC_I2C_DATA_SIZE_M; + i2c->i2c_params |= (wrlen << ICE_AQC_I2C_DATA_SIZE_S); + + i2c->i2c_addr = cpu_to_le16(rw->reg_addr + i); + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (status) { + dev_dbg(ice_pf_to_dev(pf), "ERROR: i2c_wr status=%d\n", + status); + return IEPS_PEER_FW_ERROR; + } + } + + return IEPS_PEER_SUCCESS; +} + +/** + * ice_ieps_i2c_read - Request FW to perform CPK IO widget I2C read + * @pf: ptr to pf + * @rw: ptr to I2C read/write data structure + */ +static enum ieps_peer_status +ice_ieps_i2c_read(struct ice_pf *pf, struct ieps_peer_i2c *rw) +{ + struct ice_hw *hw = &pf->hw; + struct ice_aq_desc desc; + u8 i, remaining, rdlen; + + if (rw->data_len == 0) { + dev_dbg(ice_pf_to_dev(pf), "ERROR: i2c_rdlen=0\n"); + return IEPS_PEER_INVALID_ARG; + } +#define ICE_IEPS_I2C_RD_SZ 15 + for (i = 0; i < rw->data_len; i += ICE_IEPS_I2C_RD_SZ) { + struct ice_aqc_i2c *i2c; + int status; + + remaining = rw->data_len - i; + if (remaining > ICE_IEPS_I2C_RD_SZ) + rdlen = ICE_IEPS_I2C_RD_SZ; + else + rdlen = remaining; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c); + ice_ieps_i2c_fill(pf, rw, &desc); + i2c = &desc.params.read_write_i2c; + + i2c->i2c_params &= ~ICE_AQC_I2C_DATA_SIZE_M; + i2c->i2c_params |= (rdlen << ICE_AQC_I2C_DATA_SIZE_S); + + i2c->i2c_addr = cpu_to_le16(rw->reg_addr + i); + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (status) { + dev_dbg(ice_pf_to_dev(pf), "ERROR: i2c_rd status=%d\n", + status); + return IEPS_PEER_FW_ERROR; + } + + /* Copy back read values */ + memcpy(&rw->data[i], desc.params.read_i2c_resp.i2c_data, rdlen); + } + + dev_dbg(ice_pf_to_dev(pf), + "i2c_rd bus=%d dev=0x%x reg=0x%x len=%d data[0]=0x%x\n", + rw->bus, rw->dev_addr, rw->reg_addr, rw->data_len, rw->data[0]); + + return IEPS_PEER_SUCCESS; +} + +/** + * ice_ieps_mdio_fill_desc - Fill MDIO read_write AQ command descriptor + * @pf: ptr to pf + * @rw: ptr to MDIO read/write data structure + * @desc: ptr to AQ descriptor to fill MDIO data + */ +static enum ieps_peer_status +ice_ieps_mdio_fill_desc(struct ice_pf *pf, struct ieps_peer_mdio *rw, + struct ice_aq_desc *desc) +{ + struct ice_aqc_mdio *mdio_cmd = &desc->params.read_mdio; + struct ice_aqc_link_topo_params *tparams; + struct ice_hw *hw = &pf->hw; + + tparams = &mdio_cmd->topo_addr.topo_params; + tparams->index = rw->bus; + tparams->lport_num = hw->port_info->lport; + tparams->lport_num_valid = ICE_AQC_LINK_TOPO_PORT_NUM_VALID; + tparams->node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE << + ICE_AQC_LINK_TOPO_NODE_CTX_S); + + if (rw->bus & ~(ICE_AQC_MDIO_BUS_ADDR_M >> ICE_AQC_MDIO_BUS_ADDR_S)) { + dev_dbg(ice_pf_to_dev(pf), "ERROR: mdio_bus=%d max=%d\n", + rw->bus, + (ICE_AQC_MDIO_BUS_ADDR_M >> ICE_AQC_MDIO_BUS_ADDR_S)); + return IEPS_PEER_INVALID_ARG; + } + + if (rw->dev_type & ~(ICE_AQC_MDIO_DEV_M >> ICE_AQC_MDIO_DEV_S)) { + dev_dbg(ice_pf_to_dev(pf), "ERROR: mdiodev=%d max=%d\n", + rw->dev_type, + (ICE_AQC_MDIO_DEV_M >> ICE_AQC_MDIO_DEV_S)); + return IEPS_PEER_INVALID_ARG; + } + + mdio_cmd->mdio_bus_address = rw->phy_addr << ICE_AQC_MDIO_BUS_ADDR_S; + switch (rw->clause) { + case IEPS_PEER_MDIO_CLAUSE_22: + mdio_cmd->mdio_device_addr = ICE_AQC_MDIO_CLAUSE_22; + break; + + case IEPS_PEER_MDIO_CLAUSE_45: + mdio_cmd->mdio_device_addr = rw->dev_type << ICE_AQC_MDIO_DEV_S; + mdio_cmd->mdio_device_addr |= ICE_AQC_MDIO_CLAUSE_45; + break; + + default: + dev_dbg(ice_pf_to_dev(pf), "ERROR: mdio_cl=%d\n", rw->clause); + return IEPS_PEER_INVALID_ARG; + } + + return IEPS_PEER_SUCCESS; +} + +/** + * ice_ieps_mdio_read - Request FW to perform MDIO read + * @pf: ptr to pf + * @rw: ptr to MDIO read/write data structure + */ +static enum ieps_peer_status +ice_ieps_mdio_read(struct ice_pf *pf, struct ieps_peer_mdio *rw) +{ + enum ieps_peer_status pstatus; + struct ice_hw *hw = &pf->hw; + struct ice_aq_desc desc; + int i; + + for (i = 0; i < rw->data_len; i++) { + int status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_mdio); + pstatus = ice_ieps_mdio_fill_desc(pf, rw, &desc); + if (pstatus) + return pstatus; + + desc.params.read_mdio.offset = cpu_to_le16(rw->reg_addr + i); + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (status) { + dev_dbg(ice_pf_to_dev(pf), "ERROR: mdio_rd status=%d\n", + status); + return IEPS_PEER_FW_ERROR; + } + rw->data[i] = le16_to_cpu(desc.params.read_mdio.data); + } + + dev_dbg(ice_pf_to_dev(pf), + "mdio_rd bus=%d phy=0x%x dev=0x%x reg=0x%x len=%d data=0x%x\n", + rw->bus, rw->phy_addr, rw->dev_type, rw->reg_addr, rw->data_len, + rw->data[0]); + + return IEPS_PEER_SUCCESS; +} + +/** + * ice_ieps_mdio_write - Request FW to perform MDIO write + * @pf: ptr to pf + * @rw: ptr to MDIO read/write data structure + */ +static enum ieps_peer_status +ice_ieps_mdio_write(struct ice_pf *pf, struct ieps_peer_mdio *rw) +{ + int i; + + dev_dbg(ice_pf_to_dev(pf), + "mdio_rd bus=%d phy=0x%x dev=0x%x reg=0x%x len=%d data=0x%x\n", + rw->bus, rw->phy_addr, rw->dev_type, rw->reg_addr, rw->data_len, + rw->data[0]); + + for (i = 0; i < rw->data_len; i++) { + enum ieps_peer_status pstatus; + struct ice_hw *hw = &pf->hw; + struct ice_aq_desc desc; + int status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_mdio); + pstatus = ice_ieps_mdio_fill_desc(pf, rw, &desc); + if (pstatus) + return pstatus; + + desc.params.read_mdio.offset = cpu_to_le16(rw->reg_addr + i); + desc.params.read_mdio.data = cpu_to_le16(rw->data[i]); + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (status) { + dev_err(ice_pf_to_dev(pf), "ERROR: mdio_wr status=%d\n", + status); + return IEPS_PEER_FW_ERROR; + } + } + + return IEPS_PEER_SUCCESS; +} + +#define ICE_IEPS_ETH_GPIO_MAX_PIN_COUNT 20 + +/** + * ice_ieps_gpio_fill_desc - Fill GPIO set_get AQ command descriptor + * @pf: ptr to pf + * @io: ptr to gpio set/get data structure + * @desc: ptr to AQ descriptor to fill + */ +static enum ieps_peer_status +ice_ieps_gpio_fill_desc(struct ice_pf *pf, struct ieps_peer_gpio *io, + struct ice_aq_desc *desc) +{ + struct ice_aqc_sw_gpio *sw_gpio_cmd = &desc->params.sw_read_write_gpio; + + if (io->pin_num >= ICE_IEPS_ETH_GPIO_MAX_PIN_COUNT) { + dev_dbg(ice_pf_to_dev(pf), "ERROR: pin=%d max=%d\n", + io->pin_num, ICE_IEPS_ETH_GPIO_MAX_PIN_COUNT); + return IEPS_PEER_INVALID_ARG; + } + + sw_gpio_cmd->gpio_num = io->pin_num; + if (io->pin_val) + sw_gpio_cmd->gpio_params |= ICE_AQC_SW_GPIO_PARAMS_VALUE; + + if (io->is_input) + sw_gpio_cmd->gpio_params |= ICE_AQC_SW_GPIO_PARAMS_DIRECTION; + + sw_gpio_cmd->gpio_ctrl_handle = 0; /* SOC/on-chip GPIO */ + + return IEPS_PEER_SUCCESS; +} + +/** + * ice_ieps_sw_gpio_set - Request FW to perform GPIO set operations + * @pf: ptr to pf + * @io: ptr to gpio set/get data structure + */ +static enum ieps_peer_status +ice_ieps_sw_gpio_set(struct ice_pf *pf, struct ieps_peer_gpio *io) +{ + enum ieps_peer_status pstatus; + struct ice_hw *hw = &pf->hw; + struct ice_aq_desc desc; + int status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sw_set_gpio); + pstatus = ice_ieps_gpio_fill_desc(pf, io, &desc); + if (pstatus) + return pstatus; + + dev_dbg(ice_pf_to_dev(pf), "gpio_set pin=%d dir=%s val=%d\n", + io->pin_num, io->is_input ? "IN" : "OUT", io->pin_val ? 1 : 0); + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (status) { + dev_err(ice_pf_to_dev(pf), "ERROR: sw_gpio_set status=%d\n", + status); + return IEPS_PEER_FW_ERROR; + } + + return IEPS_PEER_SUCCESS; +} + +/** + * ice_ieps_sw_gpio_get - Request FW to perform GPIO get operations + * @pf: ptr to pf + * @io: ptr to gpio set/get data structure + */ +static enum ieps_peer_status +ice_ieps_sw_gpio_get(struct ice_pf *pf, struct ieps_peer_gpio *io) +{ + enum ieps_peer_status pstatus; + struct ice_hw *hw = &pf->hw; + struct ice_aq_desc desc; + int status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sw_get_gpio); + pstatus = ice_ieps_gpio_fill_desc(pf, io, &desc); + if (pstatus) + return pstatus; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (status) { + dev_err(ice_pf_to_dev(pf), "ERROR: sw_gpio_get status=%d\n", + status); + return IEPS_PEER_FW_ERROR; + } + + io->is_input = desc.params.sw_read_write_gpio.gpio_params & + ICE_AQC_SW_GPIO_PARAMS_DIRECTION; + io->pin_val = desc.params.sw_read_write_gpio.gpio_params & + ICE_AQC_SW_GPIO_PARAMS_VALUE; + + dev_dbg(ice_pf_to_dev(pf), "gpio_get pin=%d dir=%s val=%d\n", + io->pin_num, io->is_input ? "IN" : "OUT", io->pin_val ? 1 : 0); + + return IEPS_PEER_SUCCESS; +} + +/** + * ice_ieps_ver_check - Perform IEPS versicn check + * @pf: ptr to pf + * @lib_ver: ptr to IEPS api lib version to check against + */ +static enum ieps_peer_status +ice_ieps_ver_check(struct ice_pf *pf, struct ieps_peer_api_version *lib_ver) +{ + struct ieps_peer_api_version *my_ver = &ice_ieps_version; + + if (lib_ver->major != my_ver->major || lib_ver->minor < my_ver->minor) { + dev_err(ice_pf_to_dev(pf), "ERROR: version check\t" + "exp: mj=%d, mn=%d. given: mj=%d, mn=%d\n", + my_ver->major, my_ver->minor, + lib_ver->major, lib_ver->minor); + + return IEPS_PEER_VER_INCOMPATIBLE; + } + + return IEPS_PEER_SUCCESS; +} + +#define ICE_MASK_PHY_AN_CAPS 0xF + +/** + * ice_ieps_get_phy_caps - Request FW for PHY capabilities + * @pf: ptr to pf + * @report_mode: capability to retrieve like NVM vs ACTIVE + * @pcaps: ptr to phy caps structre to store result + */ +static enum ieps_peer_status +ice_ieps_get_phy_caps(struct ice_pf *pf, u8 report_mode, + struct ieps_peer_phy_caps *pcaps) +{ + enum ieps_peer_status pstatus = IEPS_PEER_SUCCESS; + struct ice_aqc_get_phy_caps_data *aq_pcaps; + struct ice_hw *hw = &pf->hw; + int status; + + aq_pcaps = kzalloc(sizeof(*aq_pcaps), GFP_KERNEL); + if (!aq_pcaps) + return IEPS_PEER_NO_MEMORY; + + dev_dbg(ice_pf_to_dev(pf), "get phy_caps lport=%d\n", + hw->port_info->lport); + status = ice_aq_get_phy_caps(hw->port_info, false, report_mode, + aq_pcaps, NULL); + if (status) { + dev_dbg(ice_pf_to_dev(pf), "ERROR: get_phy_caps ret=%d\n", + status); + pstatus = IEPS_PEER_FW_ERROR; + goto err_exit; + } + + pcaps->phy_type_low = le64_to_cpu(aq_pcaps->phy_type_low); + pcaps->phy_type_high = le64_to_cpu(aq_pcaps->phy_type_high); + + pcaps->en_tx_pause = !!(aq_pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE); + pcaps->en_rx_pause = !!(aq_pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE); + pcaps->low_power_mode = !!(aq_pcaps->caps & ICE_AQC_PHY_LOW_POWER_MODE); + pcaps->en_link = !!(aq_pcaps->caps & ICE_AQC_PHY_EN_LINK); + pcaps->an_mode = !!(aq_pcaps->caps & ICE_AQC_PHY_AN_MODE); + pcaps->en_lesm = !!(aq_pcaps->caps & ICE_AQC_PHY_EN_LESM); + pcaps->en_auto_fec = !!(aq_pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC); + + pcaps->an_options_bm = aq_pcaps->low_power_ctrl_an & + ICE_MASK_PHY_AN_CAPS; + pcaps->fec_options_bm = aq_pcaps->link_fec_options & + ICE_AQC_PHY_FEC_MASK; + + if (report_mode == ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA) + memcpy(pcaps->phy_fw_ver, aq_pcaps->phy_fw_ver, + sizeof(pcaps->phy_fw_ver)); + +err_exit: + kfree(aq_pcaps); + + return pstatus; +} + +/** + * ice_ieps_get_phy_status - Request FW to perform retrieve link status + * @pf: ptr to pf + * @st: ptr to link status data structure to store the results + */ +static enum ieps_peer_status +ice_ieps_get_phy_status(struct ice_pf *pf, struct ieps_peer_phy_link_status *st) +{ + enum ieps_peer_status pstatus = IEPS_PEER_SUCCESS; + struct ice_link_status *link; + struct ice_hw *hw = &pf->hw; + int status; + + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) + return IEPS_PEER_NO_MEMORY; + + status = ice_aq_get_link_info(hw->port_info, true, link, NULL); + if (status) { + pstatus = IEPS_PEER_FW_ERROR; + goto err_exit; + } + + st->link_up = !!(link->link_info & ICE_AQ_LINK_UP); + st->link_fault_rx = !!(link->link_info & ICE_AQ_LINK_FAULT_RX); + st->link_fault_tx = !!(link->link_info & ICE_AQ_LINK_FAULT_TX); + st->link_fault_remote = !!(link->link_info & ICE_AQ_LINK_FAULT_REMOTE); + st->link_up_ext_port = !!(link->link_info & ICE_AQ_LINK_UP_PORT); + st->media_available = !!(link->link_info & ICE_AQ_MEDIA_AVAILABLE); + st->an_complete = !!(link->an_info & ICE_AQ_AN_COMPLETED); + st->an_capable = !!(link->an_info & ICE_AQ_LP_AN_ABILITY); + st->los = !(link->link_info & ICE_AQ_SIGNAL_DETECT); + + st->phy_type_low = cpu_to_le64(link->phy_type_low); + st->phy_type_high = cpu_to_le64(link->phy_type_high); + st->lse_on = link->lse_ena; + +err_exit: + kfree(link); + + return pstatus; +} + +/** + * ice_ieps_set_mode - Request FW to set port mode + * @pf: ptr to pf + * @mode: ptr to enumerated port mode + */ +static enum ieps_peer_status +ice_ieps_set_mode(struct ice_pf *pf, enum ieps_peer_port_mode *mode) +{ + struct ice_hw *hw = &pf->hw; + bool ena_link = false; + int status; + + if (*mode >= NUM_IEPS_PEER_PORT_MODE) + return IEPS_PEER_INVALID_PORT_MODE; + + if (*mode == IEPS_PEER_PORT_MODE_UP) + ena_link = true; + + status = ice_aq_set_link_restart_an(hw->port_info, ena_link, NULL); + if (status) { + dev_err(ice_pf_to_dev(pf), "ERROR: set_mode status=%d\n", + status); + return IEPS_PEER_FW_ERROR; + } + + return IEPS_PEER_SUCCESS; +} + +/** + * ice_ieps_get_mode - Request FW to return port mode + * @pf: ptr to pf + * @mode: ptr to store retrieved port mode + */ +static enum ieps_peer_status +ice_ieps_get_mode(struct ice_pf *pf, enum ieps_peer_port_mode *mode) +{ + enum ieps_peer_status pstatus = IEPS_PEER_SUCCESS; + struct ieps_peer_phy_caps *pcaps; + + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); + if (!pcaps) + return IEPS_PEER_NO_MEMORY; + + pstatus = ice_ieps_get_phy_caps(pf, ICE_AQC_REPORT_ACTIVE_CFG, pcaps); + if (pstatus) + goto err_exit; + + if (pcaps->en_link) + *mode = IEPS_PEER_PORT_MODE_UP; + else + *mode = IEPS_PEER_PORT_MODE_DOWN; + +err_exit: + kfree(pcaps); + + return pstatus; +} + +/** + * ice_ieps_phy_type_decode - Decode phy_type bitmap to PHY_TYPE enum + * @pf: ptr to pf + * @phy_cfg: ptr to aqc_set_phy_cfg structure + * @phy_type: ptr to enumerated phy_type + * + * With IEPS, there always be single PHY_TYPE active in the PHY. + * IEPS driver explicitly initializes PHY_TYPE to single value to + * override possible multi phy-type in default NVM PHY_TYPE config + */ +static enum ieps_peer_status +ice_ieps_phy_type_decode(struct ice_pf *pf, + struct ice_aqc_set_phy_cfg_data *phy_cfg, + enum ieps_peer_phy_type *phy_type) +{ + bool phy_type_found = false, phy_type_multi = false; + int i; + + if (phy_cfg->phy_type_low) { + for (i = 0; i <= ICE_PHY_TYPE_LOW_MAX_INDEX; i++) { + u64 type_low = le64_to_cpu(phy_cfg->phy_type_low); + + if (type_low & BIT_ULL(i)) { + *phy_type = i; + phy_type_found = true; + + if (type_low >> (i + 1)) + phy_type_multi = true; + + break; + } + } + } + + if (phy_type_found && phy_cfg->phy_type_high) + phy_type_multi = true; + + if (!phy_type_multi && phy_cfg->phy_type_high) { + for (i = 0; i <= ICE_PHY_TYPE_HIGH_MAX_INDEX; i++) { + u64 type_high = le64_to_cpu(phy_cfg->phy_type_high); + + if (type_high & BIT_ULL(i)) { + *phy_type = ICE_PHY_TYPE_LOW_MAX_INDEX + 1 + i; + phy_type_found = true; + + if (type_high >> (i + 1)) + phy_type_multi = true; + + break; + } + } + } + + if (!phy_type_found || phy_type_multi) { + dev_err(ice_pf_to_dev(pf), + "ERROR: MULTIPLE_PHY_TYPE l=0x%llx h=0x%llx\n", + phy_cfg->phy_type_low, phy_cfg->phy_type_high); + return IEPS_PEER_MULTIPLE_PHY_TYPE; + } + + return IEPS_PEER_SUCCESS; +} + +/** + * ice_ieps_phy_type_setget - Helper function to set/get phy-type + * @pf: ptr to pf + * @op_set: true for set operation else, get operation + * @attr_data: ptr to ieps_port_attr obj containing attr and config value + * @phy_cfg: ptr to aqc_set_phy_cfg structure + */ +static enum ieps_peer_status +ice_ieps_phy_type_setget(struct ice_pf *pf, bool op_set, + struct ieps_peer_port_attr_data *attr_data, + struct ice_aqc_set_phy_cfg_data *phy_cfg) +{ + struct ieps_peer_phy_caps *pcaps; + enum ieps_peer_status pstatus; + + if (!op_set) + return ice_ieps_phy_type_decode(pf, phy_cfg, + &attr_data->cfg.phy_type); + + if (attr_data->cfg.phy_type >= NUM_IEPS_PEER_PHY_TYPE || + attr_data->cfg.phy_type < 0) + return IEPS_PEER_PORT_INV_PHY_TYPE; + + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); + if (!pcaps) + return IEPS_PEER_NO_MEMORY; + + pstatus = ice_ieps_get_phy_caps(pf, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, + pcaps); + if (pstatus) + goto rel_mem_exit; + + if (attr_data->cfg.phy_type >= 64) { + u64 type_high = 1ULL << (attr_data->cfg.phy_type - 64); + + if (!(type_high & pcaps->phy_type_high)) + pstatus = IEPS_PEER_PHY_TYPE_NOTSUP; + + phy_cfg->phy_type_high = cpu_to_le64(type_high); + } else { + u64 type_low = 1ULL << attr_data->cfg.phy_type; + + if (!(type_low & pcaps->phy_type_low)) + pstatus = IEPS_PEER_PHY_TYPE_NOTSUP; + + phy_cfg->phy_type_low = cpu_to_le64(type_low); + } + +rel_mem_exit: + kfree(pcaps); + return pstatus; +} + +/** + * ice_ieps_an_setget - Helper function to set/get Autonegotiation options + * @pf: ptr to pf + * @op_set: true for set operation else, get operation + * @attr_data: ptr to ieps_port_attr obj containing attr and config value + * @phy_cfg: ptr to aqc_set_phy_cfg structure + */ +static enum ieps_peer_status +ice_ieps_an_setget(struct ice_pf *pf, bool op_set, + struct ieps_peer_port_attr_data *attr_data, + struct ice_aqc_set_phy_cfg_data *phy_cfg) +{ + if (op_set) { + if (attr_data->cfg.an_cl37_enable) + phy_cfg->low_power_ctrl_an = ICE_AQC_PHY_AN_EN_CLAUSE37; + else + phy_cfg->low_power_ctrl_an = 0; + } else { + attr_data->cfg.an_cl37_enable = !!(phy_cfg->low_power_ctrl_an & + ICE_AQC_PHY_AN_EN_CLAUSE37); + } + + return IEPS_PEER_SUCCESS; +} + +/** + * ice_ieps_fec_type_setget - Helper function to set/get FEC options + * @pf: ptr to pf + * @op_set: true for set operation else, get operation + * @attr_data: ptr to ieps_port_attr obj containing attr and config value + * @phy_cfg: ptr to aqc_set_phy_cfg structure + */ +static enum ieps_peer_status +ice_ieps_fec_type_setget(struct ice_pf *pf, bool op_set, + struct ieps_peer_port_attr_data *attr_data, + struct ice_aqc_set_phy_cfg_data *phy_cfg) +{ + struct ieps_peer_phy_caps *pcaps; + enum ieps_peer_status pstatus; + + if (!op_set) { + attr_data->cfg.fec_options_bm = phy_cfg->link_fec_opt & + ICE_AQC_PHY_FEC_MASK; + return IEPS_PEER_SUCCESS; + } + + if (attr_data->cfg.fec_options_bm & ~ICE_AQC_PHY_FEC_MASK) + return IEPS_PEER_INVALID_FEC_OPT; + + phy_cfg->link_fec_opt = attr_data->cfg.fec_options_bm & + ICE_AQC_PHY_FEC_MASK; + + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); + if (!pcaps) + return IEPS_PEER_NO_MEMORY; + + pstatus = ice_ieps_get_phy_caps(pf, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, + pcaps); + if (pstatus) + goto rel_mem_exit; + + if ((phy_cfg->link_fec_opt & pcaps->fec_options_bm) != + phy_cfg->link_fec_opt) + pstatus = IEPS_PEER_FEC_OPT_NOTSUP; + +rel_mem_exit: + kfree(pcaps); + return pstatus; +} + +/** + * ice_ieps_set_get_attr_copy_data - Map data between ieps & ice structures + * @pf: ptr to pf + * @op_set: true for set operation else, get operation + * @attr_data: ptr to ieps_port_attr obj containing attr and config value + * @phy_cfg: ptr to aqc_set_phy_cfg structure + */ +static enum ieps_peer_status +ice_ieps_set_get_attr_copy_data(struct ice_pf *pf, bool op_set, + struct ieps_peer_port_attr_data *attr_data, + struct ice_aqc_set_phy_cfg_data *phy_cfg) +{ + switch (attr_data->attr) { + case IEPS_PEER_PA_PHY_TYPE: + return ice_ieps_phy_type_setget(pf, op_set, attr_data, phy_cfg); + + case IEPS_PEER_PA_PHY_AN: + return ice_ieps_an_setget(pf, op_set, attr_data, phy_cfg); + + case IEPS_PEER_PA_PHY_FEC: + return ice_ieps_fec_type_setget(pf, op_set, attr_data, phy_cfg); + + default: + return IEPS_PEER_INVALID_PORT_ATTR; + } +} + +/** + * ice_ieps_set_get_attr - Generic function to request FW to set/get phy config + * @pf: ptr to pf + * @op_set: true for set operation else, get operation + * @attr_data: ptr to ieps_port_attr obj containing attr and config value + */ +static enum ieps_peer_status +ice_ieps_set_get_attr(struct ice_pf *pf, bool op_set, + struct ieps_peer_port_attr_data *attr_data) +{ + struct ice_aqc_set_phy_cfg_data *phy_cfg; + struct ice_aqc_get_phy_caps_data *pcaps; + enum ieps_peer_status pstatus; + struct ice_hw *hw = &pf->hw; + int status; + + phy_cfg = kzalloc(sizeof(*phy_cfg), GFP_KERNEL); + if (!phy_cfg) + return IEPS_PEER_NO_MEMORY; + + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); + if (!pcaps) { + kfree(phy_cfg); + return IEPS_PEER_NO_MEMORY; + } + + if (op_set) + dev_dbg(ice_pf_to_dev(pf), "set_attr = %d\n", attr_data->attr); + else + dev_dbg(ice_pf_to_dev(pf), "get_attr = %d\n", attr_data->attr); + + status = ice_aq_get_phy_caps(hw->port_info, false, + ICE_AQC_REPORT_ACTIVE_CFG, pcaps, NULL); + if (status) { + dev_dbg(ice_pf_to_dev(pf), "ERROR: set_attr get_phy_caps status=%d\n", + status); + pstatus = IEPS_PEER_FW_ERROR; + goto release_exit; + } + + phy_cfg->phy_type_low = pcaps->phy_type_low; + phy_cfg->phy_type_high = pcaps->phy_type_high; + phy_cfg->caps = pcaps->caps; + phy_cfg->low_power_ctrl_an = pcaps->low_power_ctrl_an; + phy_cfg->link_fec_opt = pcaps->link_fec_options; + phy_cfg->eee_cap = pcaps->eee_cap; + + pstatus = ice_ieps_set_get_attr_copy_data(pf, op_set, attr_data, + phy_cfg); + if (pstatus) + goto release_exit; + + if (!op_set) { + pstatus = IEPS_PEER_SUCCESS; + goto release_exit; + } + + status = ice_aq_set_phy_cfg(hw, hw->port_info, phy_cfg, NULL); + if (status) { + dev_dbg(ice_pf_to_dev(pf), "ERROR: set_phy_caps status=%d\n", + status); + pstatus = IEPS_PEER_FW_ERROR; + } + +release_exit: + kfree(pcaps); + kfree(phy_cfg); + + return pstatus; +} + +/** + * ice_ieps_phy_reg_rw - Perform RMN0 reg rd/wr over SBQ + * @pf: ptr to pf + * @rw: ptr to PHY reg read/write data structure + */ +static enum ieps_peer_status +ice_ieps_phy_reg_rw(struct ice_pf *pf, struct ieps_peer_intphy_reg_rw *rw) +{ + struct ice_sbq_msg_input sbq_msg = {0}; + struct ice_hw *hw = &pf->hw; + int status; + +#define ICE_IEPS_SBQ_ADDR_HIGH_S 16 +#define ICE_IEPS_SBQ_ADDR_HIGH_M 0xFFFFFFFF +#define ICE_IEPS_SBQ_ADDR_LOW_M 0xFFFF + + sbq_msg.dest_dev = rmn_0; + sbq_msg.msg_addr_low = rw->reg & ICE_IEPS_SBQ_ADDR_LOW_M; + sbq_msg.msg_addr_high = (rw->reg >> ICE_IEPS_SBQ_ADDR_HIGH_S) & + ICE_IEPS_SBQ_ADDR_HIGH_M; + if (rw->is_write) { + sbq_msg.opcode = ice_sbq_msg_wr; + sbq_msg.data = rw->data; + } else { + sbq_msg.opcode = ice_sbq_msg_rd; + } + + status = ice_sbq_rw_reg(hw, &sbq_msg); + if (status) { + dev_dbg(ice_pf_to_dev(pf), "ERROR: sbq_rw_reg status=%d\n", + status); + return IEPS_PEER_FW_ERROR; + } + + if (!rw->is_write) + rw->data = sbq_msg.data; + + return IEPS_PEER_SUCCESS; +} + +/** + * ice_ieps_set_lm_config - request FW to disable LESM + * @pf: ptr to pf + * @en_lesm: set true to enable LESM else disable LESM + */ +static enum ieps_peer_status +ice_ieps_set_lm_config(struct ice_pf *pf, bool en_lesm) +{ + enum ieps_peer_status pstatus = IEPS_PEER_SUCCESS; + struct ice_aqc_set_phy_cfg_data *phy_cfg; + struct ice_aqc_get_phy_caps_data *pcaps; + struct ice_hw *hw = &pf->hw; + int status; + + phy_cfg = kzalloc(sizeof(*phy_cfg), GFP_KERNEL); + if (!phy_cfg) + return IEPS_PEER_NO_MEMORY; + + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); + if (!pcaps) { + kfree(phy_cfg); + return IEPS_PEER_NO_MEMORY; + } + + status = ice_aq_get_phy_caps(hw->port_info, false, + ICE_AQC_REPORT_ACTIVE_CFG, pcaps, NULL); + if (status) { + dev_dbg(ice_pf_to_dev(pf), "ERROR:get_phy_caps status=%d\n", + status); + pstatus = IEPS_PEER_FW_ERROR; + goto release_exit; + } + + phy_cfg->phy_type_low = pcaps->phy_type_low; + phy_cfg->phy_type_high = pcaps->phy_type_high; + phy_cfg->low_power_ctrl_an = pcaps->low_power_ctrl_an; + phy_cfg->link_fec_opt = pcaps->link_fec_options; + phy_cfg->eee_cap = pcaps->eee_cap; + phy_cfg->caps = pcaps->caps; + + if (en_lesm) + phy_cfg->caps |= ICE_AQ_PHY_ENA_LESM; + else + phy_cfg->caps &= ~ICE_AQ_PHY_ENA_LESM; + + status = ice_aq_set_phy_cfg(hw, hw->port_info, phy_cfg, NULL); + if (status) { + dev_err(ice_pf_to_dev(pf), "ERROR: lm port config status=%d\n", + status); + pstatus = IEPS_PEER_FW_ERROR; + } + +release_exit: + kfree(pcaps); + kfree(phy_cfg); + + return pstatus; +} + +/** + * ice_ieps_entry - Request FW to perform GPIO set operations + * @obj: ptr to IDC peer device data object + * @vptr_arg: ptr to peer arg structure containing cmd and cmd specific data + */ +int ice_ieps_entry(struct iidc_core_dev_info *obj, void *vptr_arg) +{ + struct ieps_peer_arg *arg = (struct ieps_peer_arg *)vptr_arg; + struct ice_pf *pf; + void *vptr; + + if (!obj || !obj->pdev) + return IEPS_PEER_INVALID_PEER_DEV; + + pf = pci_get_drvdata(obj->pdev); + if (!pf) + return IEPS_PEER_INVALID_PEER_DEV; + + if (!arg || !arg->data) + return IEPS_PEER_INVALID_ARG; + + if (arg->cmd >= NUM_IEPS_PEER_CMD) + return IEPS_PEER_INVALID_CMD; + + vptr = arg->data; + switch (arg->cmd) { + case IEPS_PEER_CMD_VERSION_CHECK: + return ice_ieps_ver_check(pf, + (struct ieps_peer_api_version *)vptr); + + case IEPS_PEER_CMD_I2C_READ: + return ice_ieps_i2c_read(pf, (struct ieps_peer_i2c *)vptr); + + case IEPS_PEER_CMD_I2C_WRITE: + return ice_ieps_i2c_write(pf, (struct ieps_peer_i2c *)vptr); + + case IEPS_PEER_CMD_MDIO_READ: + return ice_ieps_mdio_read(pf, (struct ieps_peer_mdio *)vptr); + + case IEPS_PEER_CMD_MDIO_WRITE: + return ice_ieps_mdio_write(pf, (struct ieps_peer_mdio *)vptr); + + case IEPS_PEER_CMD_GPIO_GET: + return ice_ieps_sw_gpio_get(pf, (struct ieps_peer_gpio *)vptr); + + case IEPS_PEER_CMD_GPIO_SET: + return ice_ieps_sw_gpio_set(pf, (struct ieps_peer_gpio *)vptr); + + case IEPS_PEER_CMD_GET_NVM_PHY_CAPS: + return ice_ieps_get_phy_caps(pf, + ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, + (struct ieps_peer_phy_caps *)vptr); + + case IEPS_PEER_CMD_GET_LINK_STATUS: + return ice_ieps_get_phy_status(pf, + (struct ieps_peer_phy_link_status *)vptr); + + case IEPS_PEER_CMD_PORT_GET_MODE: + return ice_ieps_get_mode(pf, (enum ieps_peer_port_mode *)vptr); + + case IEPS_PEER_CMD_PORT_SET_MODE: + return ice_ieps_set_mode(pf, (enum ieps_peer_port_mode *)vptr); + + case IEPS_PEER_CMD_PORT_SET_ATTR: + return ice_ieps_set_get_attr(pf, true, + (struct ieps_peer_port_attr_data *)vptr); + + case IEPS_PEER_CMD_PORT_GET_ATTR: + return ice_ieps_set_get_attr(pf, false, + (struct ieps_peer_port_attr_data *)vptr); + + case IEPS_PEER_CMD_INTPHY_REG_RW: + return ice_ieps_phy_reg_rw(pf, + (struct ieps_peer_intphy_reg_rw *)vptr); + + case IEPS_PEER_CMD_SET_LM_CONFIG: + return ice_ieps_set_lm_config(pf, *(bool *)vptr); + + default: + return IEPS_PEER_INVALID_CMD; + } + + return IEPS_PEER_SUCCESS; +} diff --git a/drivers/thirdparty/ice/ice_ieps.h b/drivers/thirdparty/ice/ice_ieps.h new file mode 100644 index 000000000000..967e70bcfd7a --- /dev/null +++ b/drivers/thirdparty/ice/ice_ieps.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +/* Intel(R) Ethernet Connection E800 Series Linux Driver IEPS extensions */ + +#ifndef _ICE_IEPS_H_ +#define _ICE_IEPS_H_ + +#include "ieps_peer.h" +#include "iidc.h" + +int ice_ieps_entry(struct iidc_core_dev_info *obj, void *arg); + +#endif /* _ICE_IEPS_H_ */ diff --git a/drivers/thirdparty/ice/ice_imem.c b/drivers/thirdparty/ice/ice_imem.c new file mode 100644 index 000000000000..880ffa181d4f --- /dev/null +++ b/drivers/thirdparty/ice/ice_imem.c @@ -0,0 +1,250 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#include "ice_common.h" +#include "ice_parser_util.h" + +#define ICE_IMEM_TABLE_SIZE 192 + +static void _imem_bst_bm_dump(struct ice_hw *hw, struct ice_bst_main *bm) +{ + dev_info(ice_hw_to_dev(hw), "boost main:\n"); + dev_info(ice_hw_to_dev(hw), "\tal0 = %d\n", bm->al0); + dev_info(ice_hw_to_dev(hw), "\tal1 = %d\n", bm->al1); + dev_info(ice_hw_to_dev(hw), "\tal2 = %d\n", bm->al2); + dev_info(ice_hw_to_dev(hw), "\tpg = %d\n", bm->pg); +} + +static void _imem_bst_kb_dump(struct ice_hw *hw, struct ice_bst_keybuilder *kb) +{ + dev_info(ice_hw_to_dev(hw), "boost key builder:\n"); + dev_info(ice_hw_to_dev(hw), "\tpriority = %d\n", kb->priority); + dev_info(ice_hw_to_dev(hw), "\ttsr_ctrl = %d\n", kb->tsr_ctrl); +} + +static void _imem_np_kb_dump(struct ice_hw *hw, struct ice_np_keybuilder *kb) +{ + dev_info(ice_hw_to_dev(hw), "next proto key builder:\n"); + dev_info(ice_hw_to_dev(hw), "\tops = %d\n", kb->ops); + dev_info(ice_hw_to_dev(hw), "\tstart_or_reg0 = %d\n", + kb->start_or_reg0); + dev_info(ice_hw_to_dev(hw), "\tlen_or_reg1 = %d\n", kb->len_or_reg1); +} + +static void _imem_pg_kb_dump(struct ice_hw *hw, struct ice_pg_keybuilder *kb) +{ + dev_info(ice_hw_to_dev(hw), "parse graph key builder:\n"); + dev_info(ice_hw_to_dev(hw), "\tflag0_ena = %d\n", kb->flag0_ena); + dev_info(ice_hw_to_dev(hw), "\tflag1_ena = %d\n", kb->flag1_ena); + dev_info(ice_hw_to_dev(hw), "\tflag2_ena = %d\n", kb->flag2_ena); + dev_info(ice_hw_to_dev(hw), "\tflag3_ena = %d\n", kb->flag3_ena); + dev_info(ice_hw_to_dev(hw), "\tflag0_idx = %d\n", kb->flag0_idx); + dev_info(ice_hw_to_dev(hw), "\tflag1_idx = %d\n", kb->flag1_idx); + dev_info(ice_hw_to_dev(hw), "\tflag2_idx = %d\n", kb->flag2_idx); + dev_info(ice_hw_to_dev(hw), "\tflag3_idx = %d\n", kb->flag3_idx); + dev_info(ice_hw_to_dev(hw), "\talu_reg_idx = %d\n", kb->alu_reg_idx); +} + +static void _imem_alu_dump(struct ice_hw *hw, struct ice_alu *alu, int index) +{ + dev_info(ice_hw_to_dev(hw), "alu%d:\n", index); + dev_info(ice_hw_to_dev(hw), "\topc = %d\n", alu->opc); + dev_info(ice_hw_to_dev(hw), "\tsrc_start = %d\n", alu->src_start); + dev_info(ice_hw_to_dev(hw), "\tsrc_len = %d\n", alu->src_len); + dev_info(ice_hw_to_dev(hw), "\tshift_xlate_select = %d\n", + alu->shift_xlate_select); + dev_info(ice_hw_to_dev(hw), "\tshift_xlate_key = %d\n", + alu->shift_xlate_key); + dev_info(ice_hw_to_dev(hw), "\tsrc_reg_id = %d\n", alu->src_reg_id); + dev_info(ice_hw_to_dev(hw), "\tdst_reg_id = %d\n", alu->dst_reg_id); + dev_info(ice_hw_to_dev(hw), "\tinc0 = %d\n", alu->inc0); + dev_info(ice_hw_to_dev(hw), "\tinc1 = %d\n", alu->inc1); + dev_info(ice_hw_to_dev(hw), "\tproto_offset_opc = %d\n", + alu->proto_offset_opc); + dev_info(ice_hw_to_dev(hw), "\tproto_offset = %d\n", + alu->proto_offset); + dev_info(ice_hw_to_dev(hw), "\tbranch_addr = %d\n", alu->branch_addr); + dev_info(ice_hw_to_dev(hw), "\timm = %d\n", alu->imm); + dev_info(ice_hw_to_dev(hw), "\tdst_start = %d\n", alu->dst_start); + dev_info(ice_hw_to_dev(hw), "\tdst_len = %d\n", alu->dst_len); + dev_info(ice_hw_to_dev(hw), "\tflags_extr_imm = %d\n", + alu->flags_extr_imm); + dev_info(ice_hw_to_dev(hw), "\tflags_start_imm= %d\n", + alu->flags_start_imm); +} + +/** + * ice_imem_dump - dump an imem item info + * @hw: pointer to the hardware structure + * @item: imem item to dump + */ +void ice_imem_dump(struct ice_hw *hw, struct ice_imem_item *item) +{ + dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx); + _imem_bst_bm_dump(hw, &item->b_m); + _imem_bst_kb_dump(hw, &item->b_kb); + dev_info(ice_hw_to_dev(hw), "pg priority = %d\n", item->pg); + _imem_np_kb_dump(hw, &item->np_kb); + _imem_pg_kb_dump(hw, &item->pg_kb); + _imem_alu_dump(hw, &item->alu0, 0); + _imem_alu_dump(hw, &item->alu1, 1); + _imem_alu_dump(hw, &item->alu2, 2); +} + +/** The function parses a 4 bits Boost Main with below format: + * BIT 0: ALU 0 (bm->alu0) + * BIT 1: ALU 1 (bm->alu1) + * BIT 2: ALU 2 (bm->alu2) + * BIT 3: Parge Graph (bm->pg) + */ +static void _imem_bm_init(struct ice_bst_main *bm, u8 data) +{ + bm->al0 = (data & 0x1) != 0; + bm->al1 = (data & 0x2) != 0; + bm->al2 = (data & 0x4) != 0; + bm->pg = (data & 0x8) != 0; +} + +/** The function parses a 10 bits Boost Main Build with below format: + * BIT 0-7: Priority (bkb->priority) + * BIT 8: TSR Control (bkb->tsr_ctrl) + * BIT 9: Reserved + */ +static void _imem_bkb_init(struct ice_bst_keybuilder *bkb, u16 data) +{ + bkb->priority = (u8)(data & 0xff); + bkb->tsr_ctrl = (data & 0x100) != 0; +} + +/** The function parses a 18 bits Next Protocol Key Build with below format: + * BIT 0-1: Opcode kb->ops + * BIT 2-9: Start / Reg 0 (kb->start_or_reg0) + * BIT 10-17: Length / Reg 1 (kb->len_or_reg1) + */ +static void _imem_npkb_init(struct ice_np_keybuilder *kb, u32 data) +{ + kb->ops = (u8)(data & 0x3); + kb->start_or_reg0 = (u8)((data >> 2) & 0xff); + kb->len_or_reg1 = (u8)((data >> 10) & 0xff); +} + +/** The function parses a 35 bits Parse Graph Key Build with below format: + * BIT 0: Flag 0 Enable (kb->flag0_ena) + * BIT 1-6: Flag 0 Index (kb->flag0_idx) + * BIT 7: Flag 1 Enable (kb->flag1_ena) + * BIT 8-13: Flag 1 Index (kb->flag1_idx) + * BIT 14: Flag 2 Enable (kb->flag2_ena) + * BIT 15-20: Flag 2 Index (kb->flag2_idx) + * BIT 21: Flag 3 Enable (kb->flag3_ena) + * BIT 22-27: Flag 3 Index (kb->flag3_idx) + * BIT 28-34: ALU Register Index (kb->alu_reg_idx) + */ +static void _imem_pgkb_init(struct ice_pg_keybuilder *kb, u64 data) +{ + kb->flag0_ena = (data & 0x1) != 0; + kb->flag0_idx = (u8)((data >> 1) & 0x3f); + kb->flag1_ena = ((data >> 7) & 0x1) != 0; + kb->flag1_idx = (u8)((data >> 8) & 0x3f); + kb->flag2_ena = ((data >> 14) & 0x1) != 0; + kb->flag2_idx = (u8)((data >> 15) & 0x3f); + kb->flag3_ena = ((data >> 21) & 0x1) != 0; + kb->flag3_idx = (u8)((data >> 22) & 0x3f); + kb->alu_reg_idx = (u8)((data >> 28) & 0x7f); +} + +/** The function parses a 96 bits ALU entry with below format: + * BIT 0-5: Opcode (alu->opc) + * BIT 6-13: Source Start (alu->src_start) + * BIT 14-18: Source Length (alu->src_len) + * BIT 19: Shift/Xlate Select (alu->shift_xlate_select) + * BIT 20-23: Shift/Xlate Key (alu->shift_xlate_key) + * BIT 24-30: Source Register ID (alu->src_reg_id) + * BIT 31-37: Dest. Register ID (alu->dst_reg_id) + * BIT 38: Inc0 (alu->inc0) + * BIT 39: Inc1:(alu->inc1) + * BIT 40:41 Protocol Offset Opcode (alu->proto_offset_opc) + * BIT 42:49 Protocol Offset (alu->proto_offset) + * BIT 50:57 Branch Address (alu->branch_addr) + * BIT 58:73 Immediate (alu->imm) + * BIT 74 Dedicated Flags Enable (alu->dedicate_flags_ena) + * BIT 75:80 Dest. Start (alu->dst_start) + * BIT 81:86 Dest. Length (alu->dst_len) + * BIT 87 Flags Extract Imm. (alu->flags_extr_imm) + * BIT 88:95 Flags Start/Immediate (alu->flags_start_imm) + * + * NOTE: the first 5 bits are skipped as the start bit is not + * byte aligned. + */ +static void _imem_alu_init(struct ice_alu *alu, u8 *data) +{ + u64 d64 = *(u64 *)data >> 5; + + alu->opc = (enum ice_alu_opcode)(d64 & 0x3f); + alu->src_start = (u8)((d64 >> 6) & 0xff); + alu->src_len = (u8)((d64 >> 14) & 0x1f); + alu->shift_xlate_select = ((d64 >> 19) & 0x1) != 0; + alu->shift_xlate_key = (u8)((d64 >> 20) & 0xf); + alu->src_reg_id = (u8)((d64 >> 24) & 0x7f); + alu->dst_reg_id = (u8)((d64 >> 31) & 0x7f); + alu->inc0 = ((d64 >> 38) & 0x1) != 0; + alu->inc1 = ((d64 >> 39) & 0x1) != 0; + alu->proto_offset_opc = (u8)((d64 >> 40) & 0x3); + alu->proto_offset = (u8)((d64 >> 42) & 0xff); + alu->branch_addr = (u8)((d64 >> 50) & 0xff); + + d64 = *(u64 *)(&data[7]) >> 7; + + alu->imm = (u16)(d64 & 0xffff); + alu->dedicate_flags_ena = ((d64 >> 16) & 0x1) != 0; + alu->dst_start = (u8)((d64 >> 17) & 0x3f); + alu->dst_len = (u8)((d64 >> 23) & 0x3f); + alu->flags_extr_imm = ((d64 >> 29) & 0x1) != 0; + alu->flags_start_imm = (u8)((d64 >> 30) & 0xff); +} + +/** The function parses a 384 bits IMEM entry with below format: + * BIT 0-3: Boost Main (ii->b_m) + * BIT 4-13: Boost Key Build (ii->b_kb) + * BIT 14-15: PG Priority (ii->pg) + * BIT 16-33: Next Proto Key Build (ii->np_kb) + * BIT 34-68: PG Key Build (ii->pg_kb) + * BIT 69-164: ALU0 (ii->alu0) + * BIT 165-260:ALU1 (ii->alu1) + * BIT 261-356:ALU2 (ii->alu2) + * BIT 357-383:Reserved + */ +static void _imem_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int size) +{ + struct ice_imem_item *ii = (struct ice_imem_item *)item; + u8 *buf = (u8 *)data; + + ii->idx = idx; + + _imem_bm_init(&ii->b_m, buf[0]); + _imem_bkb_init(&ii->b_kb, *((u16 *)(&buf[0])) >> 4); + + ii->pg = (u8)((buf[1] & 0xc0) >> 6); + _imem_npkb_init(&ii->np_kb, *((u32 *)(&buf[2]))); + _imem_pgkb_init(&ii->pg_kb, *((u64 *)(&buf[2])) >> 18); + _imem_alu_init(&ii->alu0, &buf[8]); + _imem_alu_init(&ii->alu1, &buf[20]); + _imem_alu_init(&ii->alu2, &buf[32]); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_imem_dump(hw, ii); +} + +/** + * ice_imem_table_get - create an imem table + * @hw: pointer to the hardware structure + */ +struct ice_imem_item *ice_imem_table_get(struct ice_hw *hw) +{ + return (struct ice_imem_item *) + ice_parser_create_table(hw, ICE_SID_RXPARSER_IMEM, + sizeof(struct ice_imem_item), + ICE_IMEM_TABLE_SIZE, + ice_parser_sect_item_get, + _imem_parse_item, false); +} diff --git a/drivers/thirdparty/ice/ice_imem.h b/drivers/thirdparty/ice/ice_imem.h new file mode 100644 index 000000000000..439b5b2655f7 --- /dev/null +++ b/drivers/thirdparty/ice/ice_imem.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_IMEM_H_ +#define _ICE_IMEM_H_ + +struct ice_bst_main { + bool al0; + bool al1; + bool al2; + bool pg; +}; + +struct ice_bst_keybuilder { + u8 priority; + bool tsr_ctrl; +}; + +struct ice_np_keybuilder { + u8 ops; + u8 start_or_reg0; + u8 len_or_reg1; +}; + +struct ice_pg_keybuilder { + bool flag0_ena; + bool flag1_ena; + bool flag2_ena; + bool flag3_ena; + u8 flag0_idx; + u8 flag1_idx; + u8 flag2_idx; + u8 flag3_idx; + u8 alu_reg_idx; +}; + +enum ice_alu_opcode { + ICE_ALU_PARK = 0, + ICE_ALU_MOV_ADD = 1, + ICE_ALU_ADD = 2, + ICE_ALU_MOV_AND = 4, + ICE_ALU_AND = 5, + ICE_ALU_AND_IMM = 6, + ICE_ALU_MOV_OR = 7, + ICE_ALU_OR = 8, + ICE_ALU_MOV_XOR = 9, + ICE_ALU_XOR = 10, + ICE_ALU_NOP = 11, + ICE_ALU_BR = 12, + ICE_ALU_BREQ = 13, + ICE_ALU_BRNEQ = 14, + ICE_ALU_BRGT = 15, + ICE_ALU_BRLT = 16, + ICE_ALU_BRGEQ = 17, + ICE_ALU_BRLEG = 18, + ICE_ALU_SETEQ = 19, + ICE_ALU_ANDEQ = 20, + ICE_ALU_OREQ = 21, + ICE_ALU_SETNEQ = 22, + ICE_ALU_ANDNEQ = 23, + ICE_ALU_ORNEQ = 24, + ICE_ALU_SETGT = 25, + ICE_ALU_ANDGT = 26, + ICE_ALU_ORGT = 27, + ICE_ALU_SETLT = 28, + ICE_ALU_ANDLT = 29, + ICE_ALU_ORLT = 30, + ICE_ALU_MOV_SUB = 31, + ICE_ALU_SUB = 32, + ICE_ALU_INVALID = 64, +}; + +struct ice_alu { + enum ice_alu_opcode opc; + u8 src_start; + u8 src_len; + bool shift_xlate_select; + u8 shift_xlate_key; + u8 src_reg_id; + u8 dst_reg_id; + bool inc0; + bool inc1; + u8 proto_offset_opc; + u8 proto_offset; + u8 branch_addr; + u16 imm; + bool dedicate_flags_ena; + u8 dst_start; + u8 dst_len; + bool flags_extr_imm; + u8 flags_start_imm; +}; + +struct ice_imem_item { + u16 idx; + struct ice_bst_main b_m; + struct ice_bst_keybuilder b_kb; + u8 pg; + struct ice_np_keybuilder np_kb; + struct ice_pg_keybuilder pg_kb; + struct ice_alu alu0; + struct ice_alu alu1; + struct ice_alu alu2; +}; + +void ice_imem_dump(struct ice_hw *hw, struct ice_imem_item *item); +struct ice_imem_item *ice_imem_table_get(struct ice_hw *hw); +#endif /* _ICE_IMEM_H_ */ diff --git a/drivers/thirdparty/ice/ice_irq.c b/drivers/thirdparty/ice/ice_irq.c new file mode 100644 index 000000000000..faae2e8b5cd3 --- /dev/null +++ b/drivers/thirdparty/ice/ice_irq.c @@ -0,0 +1,377 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#include "ice.h" +#include "ice_lib.h" +#include "ice_irq.h" + +#ifdef HAVE_PCI_ALLOC_IRQ +static int ice_alloc_and_fill_msix_entries(struct ice_pf *pf, int nvec) +{ + int i; + + pf->msix_entries = kcalloc(nvec, sizeof(*pf->msix_entries), + GFP_KERNEL); + if (!pf->msix_entries) + return -ENOMEM; + + for (i = 0; i < nvec; i++) { + pf->msix_entries[i].entry = i; + pf->msix_entries[i].vector = ice_get_irq_num(pf, i); + } + + return 0; +} +#endif /* HAVE_PCI_ALLOC_IRQ */ + +#ifndef HAVE_PCI_ALLOC_IRQ +static int ice_alloc_msix_entries(struct ice_pf *pf, u16 num_entries) +{ + u16 i; + + pf->msix_entries = devm_kcalloc(ice_pf_to_dev(pf), num_entries, + sizeof(*pf->msix_entries), GFP_KERNEL); + if (!pf->msix_entries) + return -ENOMEM; + + for (i = 0; i < num_entries; i++) + pf->msix_entries[i].entry = i; + + return 0; +} + +static void ice_free_msix_entries(struct ice_pf *pf) +{ + devm_kfree(ice_pf_to_dev(pf), pf->msix_entries); + pf->msix_entries = NULL; +} +#endif /* HAVE_PCI_ALLOC_IRQ */ + +static void ice_dis_msix(struct ice_pf *pf) +{ +#ifdef HAVE_PCI_ALLOC_IRQ + pci_free_irq_vectors(pf->pdev); +#else + ice_free_msix_entries(pf); + pci_disable_msix(pf->pdev); +#endif /* HAVE_PCI_ALLOC_IRQ */ +} + +static int ice_ena_msix(struct ice_pf *pf, int nvec) +{ +#ifdef HAVE_PCI_ALLOC_IRQ + return pci_alloc_irq_vectors(pf->pdev, ICE_MIN_MSIX, nvec, + PCI_IRQ_MSIX); +#else + int vectors; + int err; + + err = ice_alloc_msix_entries(pf, nvec); + if (err) + return err; + + vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, + ICE_MIN_MSIX, nvec); + if (vectors < 0) + ice_free_msix_entries(pf); + + return vectors; +#endif /* HAVE_PCI_ALLOC_IRQ */ +} + +static void ice_adj_vec_clear(int *src, int size) +{ + int i; + + for (i = 0; i < size; i++) + src[i] = 0; +} + +static void ice_adj_vec_sum(int *dst, int *src, int size) +{ + int i; + + for (i = 0; i < size; i++) + dst[i] += src[i]; +} + +/* + * Allow 256 queue pairs for ADQ only if the PF has at least + * 1024 msix vectors (1 or 2 port NIC). + */ +static int ice_adq_max_qps(struct ice_pf *pf) +{ + if (pf->hw.func_caps.common_cap.num_msix_vectors >= 1024) + return ICE_ADQ_MAX_QPS; + + return num_online_cpus(); +} + +/** + * ice_ena_msix_range - request a range of MSI-X vectors from the OS + * @pf: board private structure + * + * The driver tries to enable best-case scenario MSI-X vectors. If that doesn't + * succeed than adjust to irqs number returned by kernel. + * + * The fall-back logic is described below with each [#] represented needed irqs + * number for the step. If any of the steps is lower than received number, then + * return the number of MSI-X. If any of the steps is greater, then check next + * one. If received value is lower than irqs value in last step return error. + * + * Step [0]: Enable the best-case scenario MSI-X vectors. + * + * Step [1]: Enable MSI-X vectors with eswitch support disabled + * + * Step [2]: Enable MSI-X vectors with the number of vectors reserved for + * MACVLAN and Scalable IOV support reduced by a factor of 2. + * + * Step [3]: Enable MSI-X vectors with the number of vectors reserved for + * MACVLAN and Scalable IOV support reduced by a factor of 4. + * + * Step [4]: Enable MSI-X vectors with MACVLAN and Scalable IOV support + * disabled. + * + * Step [5]: Enable MSI-X vectors with the number of pf->num_lan_msix reduced + * by a factor of 2 from the previous step (i.e. num_online_cpus() / 2). + * Also, with the number of pf->num_rdma_msix reduced by a factor of ~2 from the + * previous step (i.e. num_online_cpus() / 2 + ICE_RDMA_NUM_AEQ_MSIX). + * + * Step [6]: Same as step [3], except reduce both by a factor of 4. + * + * Step [7]: Enable the bare-minimum MSI-X vectors. + * + * Each feature has separate table with needed irqs in each step. Sum of these + * tables is tracked in adj_vec to show needed irqs in each step. Separate + * tables are later use to set correct number of irqs for each feature based on + * choosed step. + */ +static int ice_ena_msix_range(struct ice_pf *pf) +{ +#define ICE_ADJ_VEC_STEPS 8 +#define ICE_ADJ_VEC_WORST_CASE 0 +#define ICE_ADJ_VEC_BEST_CASE (ICE_ADJ_VEC_STEPS - 1) + int num_cpus = num_online_cpus(); + int rdma_adj_vec[ICE_ADJ_VEC_STEPS] = { + ICE_MIN_RDMA_MSIX, + num_cpus / 4 > ICE_MIN_RDMA_MSIX ? + num_cpus / 4 + ICE_RDMA_NUM_AEQ_MSIX : + ICE_MIN_RDMA_MSIX, + num_cpus / 2 > ICE_MIN_RDMA_MSIX ? + num_cpus / 2 + ICE_RDMA_NUM_AEQ_MSIX : + ICE_MIN_RDMA_MSIX, + num_cpus > ICE_MIN_RDMA_MSIX ? + num_cpus + ICE_RDMA_NUM_AEQ_MSIX : ICE_MIN_RDMA_MSIX, + num_cpus > ICE_MIN_RDMA_MSIX ? + num_cpus + ICE_RDMA_NUM_AEQ_MSIX : ICE_MIN_RDMA_MSIX, + num_cpus > ICE_MIN_RDMA_MSIX ? + num_cpus + ICE_RDMA_NUM_AEQ_MSIX : ICE_MIN_RDMA_MSIX, + num_cpus > ICE_MIN_RDMA_MSIX ? + num_cpus + ICE_RDMA_NUM_AEQ_MSIX : ICE_MIN_RDMA_MSIX, + num_cpus > ICE_MIN_RDMA_MSIX ? + num_cpus + ICE_RDMA_NUM_AEQ_MSIX : ICE_MIN_RDMA_MSIX, + }; + int lan_adj_vec[ICE_ADJ_VEC_STEPS] = { + ICE_MIN_LAN_MSIX, + max_t(int, num_cpus / 4, ICE_MIN_LAN_MSIX), + max_t(int, num_cpus / 2, ICE_MIN_LAN_MSIX), + max_t(int, num_cpus, ICE_MIN_LAN_MSIX), + max_t(int, num_cpus, ICE_MIN_LAN_MSIX), + max_t(int, num_cpus, ICE_MIN_LAN_MSIX), + max_t(int, num_cpus, ICE_MIN_LAN_MSIX), + max_t(int, ice_adq_max_qps(pf), ICE_MIN_LAN_MSIX), + }; + int fdir_adj_vec[ICE_ADJ_VEC_STEPS] = { + ICE_FDIR_MSIX, ICE_FDIR_MSIX, ICE_FDIR_MSIX, + ICE_FDIR_MSIX, ICE_FDIR_MSIX, ICE_FDIR_MSIX, + ICE_FDIR_MSIX, ICE_FDIR_MSIX, + }; + int adj_vec[ICE_ADJ_VEC_STEPS] = { + ICE_OICR_MSIX, ICE_OICR_MSIX, ICE_OICR_MSIX, + ICE_OICR_MSIX, ICE_OICR_MSIX, ICE_OICR_MSIX, + ICE_OICR_MSIX, ICE_OICR_MSIX, + }; +#ifdef HAVE_NDO_DFWD_OPS + int macvlan_adj_vec[ICE_ADJ_VEC_STEPS] = { + 0, 0, 0, 0, + (ICE_MAX_MACVLANS * ICE_DFLT_VEC_VMDQ_VSI) / 4, + (ICE_MAX_MACVLANS * ICE_DFLT_VEC_VMDQ_VSI) / 2, + ICE_MAX_MACVLANS * ICE_DFLT_VEC_VMDQ_VSI, + ICE_MAX_MACVLANS * ICE_DFLT_VEC_VMDQ_VSI, + }; +#endif /* OFFLOAD_MACVLAN_SUPPORT */ + int eswitch_adj_vec[ICE_ADJ_VEC_STEPS] = { + 0, 0, 0, 0, 0, 0, 0, + ICE_ESWITCH_MSIX, + }; + int scalable_adj_vec[ICE_ADJ_VEC_STEPS] = { + 0, 0, 0, 0, + (ICE_MAX_SCALABLE * ICE_NUM_VF_MSIX_SMALL) / 4, + (ICE_MAX_SCALABLE * ICE_NUM_VF_MSIX_SMALL) / 2, + ICE_MAX_SCALABLE * ICE_NUM_VF_MSIX_SMALL, + ICE_MAX_SCALABLE * ICE_NUM_VF_MSIX_SMALL, + }; + struct device *dev = ice_pf_to_dev(pf); + int adj_step = ICE_ADJ_VEC_BEST_CASE; + int err = -ENOSPC; + int v_actual, i; + int needed = 0; + + needed += ICE_OICR_MSIX; + + needed += lan_adj_vec[ICE_ADJ_VEC_BEST_CASE]; + ice_adj_vec_sum(adj_vec, lan_adj_vec, ICE_ADJ_VEC_STEPS); + + if (test_bit(ICE_FLAG_ESWITCH_CAPABLE, pf->flags)) { + needed += eswitch_adj_vec[ICE_ADJ_VEC_BEST_CASE]; + ice_adj_vec_sum(adj_vec, eswitch_adj_vec, ICE_ADJ_VEC_STEPS); + } else { + ice_adj_vec_clear(eswitch_adj_vec, ICE_ADJ_VEC_STEPS); + } +#ifdef HAVE_NDO_DFWD_OPS + + if (test_bit(ICE_FLAG_VMDQ_ENA, pf->flags)) { + needed += macvlan_adj_vec[ICE_ADJ_VEC_BEST_CASE]; + ice_adj_vec_sum(adj_vec, macvlan_adj_vec, ICE_ADJ_VEC_STEPS); + } else { + ice_adj_vec_clear(macvlan_adj_vec, ICE_ADJ_VEC_STEPS); + } +#endif /* OFFLOAD_MACVLAN_SUPPORT */ + + if (ice_chk_rdma_cap(pf)) { + needed += rdma_adj_vec[ICE_ADJ_VEC_BEST_CASE]; + ice_adj_vec_sum(adj_vec, rdma_adj_vec, ICE_ADJ_VEC_STEPS); + } else { + ice_adj_vec_clear(rdma_adj_vec, ICE_ADJ_VEC_STEPS); + } + + if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { + needed += fdir_adj_vec[ICE_ADJ_VEC_BEST_CASE]; + ice_adj_vec_sum(adj_vec, fdir_adj_vec, ICE_ADJ_VEC_STEPS); + } else { + ice_adj_vec_clear(fdir_adj_vec, ICE_ADJ_VEC_STEPS); + } + + if (test_bit(ICE_FLAG_SIOV_CAPABLE, pf->flags)) { + needed += scalable_adj_vec[ICE_ADJ_VEC_BEST_CASE]; + ice_adj_vec_sum(adj_vec, scalable_adj_vec, ICE_ADJ_VEC_STEPS); + } else { + ice_adj_vec_clear(scalable_adj_vec, ICE_ADJ_VEC_STEPS); + } + + v_actual = ice_ena_msix(pf, needed); + if (v_actual < 0) { + err = v_actual; + goto err; + } else if (v_actual < adj_vec[ICE_ADJ_VEC_WORST_CASE]) { + ice_dis_msix(pf); + goto err; + } + + for (i = ICE_ADJ_VEC_WORST_CASE + 1; i < ICE_ADJ_VEC_STEPS; i++) { + if (v_actual < adj_vec[i]) { + adj_step = i - 1; + break; + } + } + + pf->num_lan_msix = lan_adj_vec[adj_step]; + pf->num_rdma_msix = rdma_adj_vec[adj_step]; + if (test_bit(ICE_FLAG_ESWITCH_CAPABLE, pf->flags) && + !eswitch_adj_vec[adj_step]) { + dev_warn(dev, "Not enough MSI-X for eswitch support, disabling feature\n"); + clear_bit(ICE_FLAG_ESWITCH_CAPABLE, pf->flags); + } +#ifdef HAVE_NDO_DFWD_OPS + if (test_bit(ICE_FLAG_VMDQ_ENA, pf->flags) && + !macvlan_adj_vec[adj_step]) { + dev_warn(dev, "Not enough MSI-X for hardware MACVLAN support, disabling feature\n"); + clear_bit(ICE_FLAG_VMDQ_ENA, pf->flags); + } +#endif /* OFFLOAD_MACVLAN_SUPPORT */ + pf->max_adq_qps = lan_adj_vec[adj_step]; + if (test_bit(ICE_FLAG_SIOV_CAPABLE, pf->flags) && + !scalable_adj_vec[adj_step]) { + dev_warn(dev, "Not enough MSI-X for Scalable IOV support, disabling feature\n"); + clear_bit(ICE_FLAG_SIOV_CAPABLE, pf->flags); + } + return v_actual; + +err: + dev_err(dev, "Failed to enable MSI-X vectors\n"); + return err; +} + +/** + * ice_init_interrupt_scheme - Determine proper interrupt scheme + * @pf: board private structure to initialize + */ +int ice_init_interrupt_scheme(struct ice_pf *pf) +{ + int vectors = ice_ena_msix_range(pf); + + if (vectors < 0) + return vectors; + + /* pf->msix_entries is used in idc and needs to be filled on kernel + * with new irq alloc API + */ +#ifdef HAVE_PCI_ALLOC_IRQ + if (ice_alloc_and_fill_msix_entries(pf, vectors)) { + ice_dis_msix(pf); + return -ENOMEM; + } +#endif /* HAVE_PCI_ALLOC_IRQ */ + /* set up vector assignment tracking */ + pf->irq_tracker = + devm_kzalloc(ice_pf_to_dev(pf), + struct_size(pf->irq_tracker, list, vectors), + GFP_KERNEL); + if (!pf->irq_tracker) { + ice_dis_msix(pf); + return -ENOMEM; + } + + /* populate SW interrupts pool with number of OS granted IRQs. */ + pf->num_avail_sw_msix = (u16)vectors; + pf->irq_tracker->num_entries = (u16)vectors; + pf->irq_tracker->end = pf->irq_tracker->num_entries; + + return 0; +} + +/** + * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme + * @pf: board private structure + */ +void ice_clear_interrupt_scheme(struct ice_pf *pf) +{ +#ifdef HAVE_PCI_ALLOC_IRQ + kfree(pf->msix_entries); + pf->msix_entries = NULL; + +#endif /* PEER_SUPPORT */ + ice_dis_msix(pf); + + if (pf->irq_tracker) { + devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker); + pf->irq_tracker = NULL; + } +} + +/** + * ice_get_irq_num - get system irq number based on index from driver + * @pf: board private structure + * @idx: driver irq index + */ +int ice_get_irq_num(struct ice_pf *pf, int idx) +{ +#ifdef HAVE_PCI_ALLOC_IRQ + return pci_irq_vector(pf->pdev, idx); +#else + if (!pf->msix_entries) + return -EINVAL; + + return pf->msix_entries[idx].vector; +#endif /* HAVE_PCI_ALLOC_IRQ */ +} diff --git a/drivers/thirdparty/ice/ice_irq.h b/drivers/thirdparty/ice/ice_irq.h new file mode 100644 index 000000000000..644caa4090be --- /dev/null +++ b/drivers/thirdparty/ice/ice_irq.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_IRQ_H_ +#define _ICE_IRQ_H_ + +int ice_init_interrupt_scheme(struct ice_pf *pf); +void ice_clear_interrupt_scheme(struct ice_pf *pf); + +int ice_get_irq_num(struct ice_pf *pf, int idx); + +#endif diff --git a/drivers/thirdparty/ice/ice_lag.c b/drivers/thirdparty/ice/ice_lag.c index ef36be8fb867..c3a1397cac42 100644 --- a/drivers/thirdparty/ice/ice_lag.c +++ b/drivers/thirdparty/ice/ice_lag.c @@ -4,9 +4,496 @@ /* Link Aggregation code */ #include "ice.h" +#include "ice_lib.h" #ifdef HAVE_NETDEV_UPPER_INFO #include "ice_lag.h" +static DEFINE_IDA(ice_lag_ida); + +/** + * ice_lag_nop_handler - no-op Rx handler to disable LAG + * @pskb: pointer to skb pointer + */ +rx_handler_result_t ice_lag_nop_handler(struct sk_buff __always_unused **pskb) +{ + return RX_HANDLER_PASS; +} + +/** + * netif_is_same_ice - determine if netdev is on the same ice NIC as local PF + * @pf: local PF struct + * @netdev: netdev we are evaluating + */ +static bool netif_is_same_ice(struct ice_pf *pf, struct net_device *netdev) +{ + struct ice_netdev_priv *np; + struct ice_pf *test_pf; + struct ice_vsi *vsi; + + if (!netif_is_ice(netdev)) + return false; + + np = netdev_priv(netdev); + if (!np) + return false; + + vsi = np->vsi; + if (!vsi) + return false; + + test_pf = vsi->back; + if (!test_pf) + return false; + + if (pf->pdev->bus != test_pf->pdev->bus || + pf->pdev->slot != test_pf->pdev->slot) + return false; + + return true; +} + +/** + * ice_netdev_to_lag - return pointer to associated lag struct from netdev + * @netdev: pointer to net_device struct + */ +static struct ice_lag *ice_netdev_to_lag(struct net_device *netdev) +{ + struct ice_netdev_priv *np; + struct ice_vsi *vsi; + + if (!netif_is_ice(netdev)) + return NULL; + + np = netdev_priv(netdev); + if (!np) + return NULL; + + vsi = np->vsi; + if (!vsi) + return NULL; + + return vsi->back->lag; +} + +/** + * ice_lag_find_primary - return the lag struct for primary interface in a bond + * @lag: lag info struct + */ +struct ice_lag *ice_lag_find_primary(struct ice_lag *lag) +{ + struct ice_lag *primary_lag = NULL; + struct list_head *tmp; + + list_for_each(tmp, lag->netdev_head) { + struct ice_lag_netdev_list *entry; + struct ice_lag *tmp_lag; + + entry = list_entry(tmp, struct ice_lag_netdev_list, node); + tmp_lag = ice_netdev_to_lag(entry->netdev); + if (tmp_lag && tmp_lag->primary) { + primary_lag = tmp_lag; + break; + } + } + + return primary_lag; +} + +/** + * ice_plug_aux_dev_lock - plug aux dev while handling lag mutex lock + * @cdev: pointer to struct for aux device + * @name: name of aux dev to use in plug call + * @lag: pointer to lag struct containing the mutex to unlock/lock + */ +static void ice_plug_aux_dev_lock(struct iidc_core_dev_info *cdev, + const char *name, struct ice_lag *lag) +{ + mutex_unlock(&lag->pf->lag_mutex); + ice_plug_aux_dev(cdev, name); + mutex_lock(&lag->pf->lag_mutex); +} + +/** + * ice_unplug_aux_dev_lock - unplug aux dev while handling lag mutex lock + * @cdev: pointer to struct for aux device + * @lag: pointer to lag struct containing the mutex to unlock/lock + */ +static void ice_unplug_aux_dev_lock(struct iidc_core_dev_info *cdev, + struct ice_lag *lag) +{ + mutex_unlock(&lag->pf->lag_mutex); + ice_unplug_aux_dev(cdev); + mutex_lock(&lag->pf->lag_mutex); +} + +#define ICE_LAG_NUM_RULES 0x1 +#define ICE_LAG_LA_VSI_S 3 +#define ICE_LAG_LA_VALID BIT(16) +#define ICE_LAG_RES_SUBSCRIBE BIT(15) +#define ICE_LAG_RES_SHARED BIT(14) + +/** + * ice_lag_add_lg_action - add a large action to redirect RDMA traffic + * @hw: pointer to the HW struct + * @lkup: recipe for lookup + * @rinfo: information related to rule that needs to be programmed + * @entry: return struct for recipe_id, rule_id and vsi_handle. + */ +static int +ice_lag_add_lg_action(struct ice_hw *hw, struct ice_adv_lkup_elem *lkup, + struct ice_adv_rule_info *rinfo, + struct ice_rule_query_data *entry) +{ + const struct ice_dummy_pkt_offsets *pkt_offsets; + struct ice_pf *pf = (struct ice_pf *)hw->back; + u16 rule_buf_sz, pkt_len, vsi_handle, rid = 0; + struct ice_aqc_sw_rules_elem *s_rule = NULL; + const u8 *pkt = NULL; + int ret = 0; + u32 act = 0; + + if (!entry) + return -EINVAL; + + if (entry->rid || entry->rule_id) { + dev_warn(ice_pf_to_dev(pf), "Error: Secondary interface already has filter defined\n"); + return -EINVAL; + } + if (!hw->switch_info->prof_res_bm_init) { + hw->switch_info->prof_res_bm_init = 1; + ice_init_prof_result_bm(hw); + } + + ice_find_dummy_packet(lkup, 1, rinfo->tun_type, &pkt, &pkt_len, + &pkt_offsets); + if (!pkt) { + dev_warn(&pf->pdev->dev, "Could not find dummy packet for LAG filter rule\n"); + return -EINVAL; + } + + vsi_handle = rinfo->sw_act.vsi_handle; + if (!ice_is_vsi_valid(hw, vsi_handle)) { + dev_warn(ice_pf_to_dev(pf), "VSI not valid for adding Lg Action\n"); + return -EINVAL; + } + + ret = ice_add_adv_recipe(hw, lkup, 1, rinfo, &rid); + if (ret) { + dev_warn(ice_pf_to_dev(pf), "Failed adding advance recipe\n"); + return ret; + } + + rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len; + s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); + if (!s_rule) + return -ENOMEM; + + act = (rinfo->lg_id << ICE_SINGLE_ACT_PTR_VAL_S) | ICE_SINGLE_ACT_PTR | + ICE_SINGLE_ACT_PTR_HAS_FWD | ICE_SINGLE_ACT_PTR_BIT | + ICE_SINGLE_ACT_LAN_ENABLE | ICE_SINGLE_ACT_LB_ENABLE; + + s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); + s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(hw->port_info->lport); + s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid); + s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); + + ret = ice_fill_adv_dummy_packet(lkup, 1, s_rule, pkt, pkt_len, + pkt_offsets); + if (ret) { + dev_warn(ice_pf_to_dev(pf), "Could not file dummy packet for Lg Action\n"); + goto ice_lag_lg_act_err; + } + + ret = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, + rule_buf_sz, 1, ice_aqc_opc_add_sw_rules, NULL); + if (ret) { + dev_warn(ice_pf_to_dev(pf), "Fail adding switch rule for Lg Action\n"); + goto ice_lag_lg_act_err; + } + + entry->rid = rid; + entry->rule_id = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); + entry->vsi_handle = rinfo->sw_act.vsi_handle; + +ice_lag_lg_act_err: + kfree(s_rule); + return ret; +} + +/** + * ice_lag_add_prune_list - Add primary's VSI to secondary's prune list + * @lag: lag info struct + * @event_pf: PF struct for interface we are modifying prune list on + */ +static void ice_lag_add_prune_list(struct ice_lag *lag, struct ice_pf *event_pf) +{ + u16 num_vsi, rule_buf_sz, vsi_list_id, prim_vsi_num, event_vsi_idx; + struct ice_aqc_sw_rules_elem *s_rule = NULL; + struct ice_sw_recipe *recp_list; + struct device *dev; + + num_vsi = 1; + + recp_list = &event_pf->hw.switch_info->recp_list[ICE_SW_LKUP_VLAN]; + dev = ice_pf_to_dev(lag->pf); + prim_vsi_num = lag->pf->vsi[0]->vsi_num; + event_vsi_idx = event_pf->vsi[0]->idx; + + if (!ice_find_vsi_list_entry(recp_list, event_vsi_idx, &vsi_list_id)) { + dev_dbg(dev, "Could not locate prune list when setting up RDMA on LAG\n"); + return; + } + + rule_buf_sz = ICE_SW_RULE_VSI_LIST_SIZE(num_vsi); + s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); + if (!s_rule) + return; + + s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_PRUNE_LIST_SET); + s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); + s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi); + s_rule->pdata.vsi_list.vsi[0] = cpu_to_le16(prim_vsi_num); + + if (ice_aq_sw_rules(&lag->pf->hw, (struct ice_aqc_sw_rules *)s_rule, + rule_buf_sz, 1, ice_aqc_opc_update_sw_rules, NULL)) + dev_warn(dev, "Error adding VSI prune list\n"); + kfree(s_rule); +} + +/** + * ice_lag_del_prune_list - Reset Secondary's prune list to just its own VSI + * @lag: local Secondary interface's ice_lag struct + * @event_pf: PF struct for unlinking interface + */ +static void ice_lag_del_prune_list(struct ice_lag *lag, struct ice_pf *event_pf) +{ + u16 num_vsi, vsi_num, vsi_idx, rule_buf_sz, vsi_list_id; + struct ice_aqc_sw_rules_elem *s_rule = NULL; + struct ice_sw_recipe *recp_list; + struct device *dev; + + num_vsi = 1; + + recp_list = &event_pf->hw.switch_info->recp_list[ICE_SW_LKUP_VLAN]; + dev = ice_pf_to_dev(lag->pf); + vsi_num = lag->pf->vsi[0]->vsi_num; + vsi_idx = event_pf->vsi[0]->idx; + + if (!ice_find_vsi_list_entry(recp_list, vsi_idx, &vsi_list_id)) { + dev_dbg(dev, "Could not locate prune list when unwinding RDMA on LAG\n"); + return; + } + + rule_buf_sz = ICE_SW_RULE_VSI_LIST_SIZE(num_vsi); + s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); + if (!s_rule) + return; + + rule_buf_sz = ICE_SW_RULE_VSI_LIST_SIZE(num_vsi); + s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR); + s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); + s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi); + s_rule->pdata.vsi_list.vsi[0] = cpu_to_le16(vsi_num); + + if (ice_aq_sw_rules(&lag->pf->hw, (struct ice_aqc_sw_rules *)s_rule, + rule_buf_sz, 1, ice_aqc_opc_update_sw_rules, NULL)) + dev_warn(dev, "Error clearing VSI prune list\n"); + + kfree(s_rule); +} + +/** + * ice_lag_rdma_create_fltr - Create switch rule to redirect RoCEv2 traffic + * @lag: lag info struct + */ +static int ice_lag_rdma_create_fltr(struct ice_lag *lag) +{ + struct ice_aqc_alloc_free_res_elem *sw_buf; + struct ice_aqc_sw_rules_elem rule = { 0 }; + struct ice_aqc_res_elem *sw_ele; + struct ice_lag *primary_lag; + struct ice_vsi *primary_vsi; + struct ice_netdev_priv *np; + u16 buf_len; + int ret = 0; + + if (!lag->primary) + primary_lag = ice_lag_find_primary(lag); + else + primary_lag = lag; + + if (!primary_lag) + return -EINVAL; + + np = netdev_priv(primary_lag->netdev); + primary_vsi = np->vsi; + + buf_len = ICE_SW_RULE_LG_ACT_SIZE(ICE_LAG_NUM_RULES); + sw_buf = kzalloc(buf_len, GFP_KERNEL); + if (!sw_buf) + return -ENOMEM; + + sw_buf->num_elems = cpu_to_le16(ICE_LAG_NUM_RULES); + sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_WIDE_TABLE_1 | + ICE_LAG_RES_SHARED); + if (lag->primary) { + u32 large_action = 0x0; + + dev_dbg(ice_pf_to_dev(lag->pf), "Configuring filter on Primary\n"); + /* Allocate a shared Large Action on primary interface + * This allows for the creation of a filter + * to direct traffic from one interface to another. + */ + ret = ice_aq_alloc_free_res(&lag->pf->hw, ICE_LAG_NUM_RULES, + sw_buf, buf_len, + ice_aqc_opc_alloc_res, NULL); + if (ret) { + dev_err(ice_pf_to_dev(lag->pf), + "Failed Allocating Lg Action item %d\n", ret); + goto create_fltr_out; + } + + sw_ele = &sw_buf->elem[0]; + lag->action_idx = le16_to_cpu(sw_ele->e.flu_resp); + + large_action |= (primary_vsi->vsi_num << ICE_LAG_LA_VSI_S) | + ICE_LAG_LA_VALID; + + /* Fill out add switch rule structure */ + rule.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT); + rule.pdata.lg_act.index = cpu_to_le16(lag->action_idx); + rule.pdata.lg_act.size = cpu_to_le16(ICE_LAG_NUM_RULES); + rule.pdata.lg_act.act[0] = cpu_to_le32(large_action); + + /* call add switch rule */ + ret = ice_aq_sw_rules(&lag->pf->hw, &rule, sizeof(rule), + ICE_LAG_NUM_RULES, + ice_aqc_opc_add_sw_rules, NULL); + if (ret) + dev_err(ice_pf_to_dev(lag->pf), + "Failed configuring shared Lg Action item %d\n", + ret); + } else { + struct ice_adv_rule_info rule_info = { 0 }; + struct ice_adv_lkup_elem *item; + + dev_dbg(ice_pf_to_dev(lag->pf), "Configuring filter on Secondary\n"); + sw_buf->res_type |= cpu_to_le16(ICE_LAG_RES_SUBSCRIBE); + sw_buf->elem[0].e.flu_resp = + cpu_to_le16(primary_lag->action_idx); + + /* Subscribe to shared large action on non-primary interface. + * This allows this PF to use shared item to direct RDMA + * traffic to another interface's resource. + */ + ret = ice_aq_alloc_free_res(&lag->pf->hw, ICE_LAG_NUM_RULES, + sw_buf, buf_len, + ice_aqc_opc_alloc_res, NULL); + if (ret) { + dev_err(ice_pf_to_dev(lag->pf), + "Failed subscribing to Lg Action item %d\n", + ret); + goto create_fltr_out; + } + + /* Add switch rule */ + item = kzalloc(sizeof(*item), GFP_KERNEL); + if (!item) { + ret = -ENOMEM; + goto create_fltr_out; + } + + memset(item, 0, sizeof(*item)); + item->type = ICE_UDP_ILOS; + memcpy(&item->h_u.l4_hdr.dst_port, "\x12\xB7", 2); + memset(&item->m_u.l4_hdr.dst_port, 0xFF, 2); + + rule_info.sw_act.src = lag->pf->hw.port_info->lport; + rule_info.sw_act.fltr_act = ICE_LG_ACTION; + rule_info.sw_act.vsi_handle = primary_vsi->idx; + rule_info.priority = 7; + rule_info.rx = 1; + rule_info.lg_id = primary_lag->action_idx; + rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN; + + ret = ice_lag_add_lg_action(&lag->pf->hw, item, &rule_info, + &lag->fltr); + kfree(item); + } + +create_fltr_out: + kfree(sw_buf); + return ret; +} + +/** + * ice_lag_rdma_del_fltr - Delete switch rule filter for RoCEv2 traffic + * @lag: lag info struct + */ +static void ice_lag_rdma_del_fltr(struct ice_lag *lag) +{ + struct ice_rule_query_data *rm_entry = &lag->fltr; + struct ice_aqc_sw_rules_elem *s_rule; + struct ice_hw *hw = &lag->pf->hw; + u16 rule_buf_sz; + + rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE; + s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); + if (!s_rule) + return; + + s_rule->pdata.lkup_tx_rx.act = 0; + s_rule->pdata.lkup_tx_rx.index = + cpu_to_le16(rm_entry->rule_id); + s_rule->pdata.lkup_tx_rx.hdr_len = 0; + if (ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, + rule_buf_sz, 1, + ice_aqc_opc_remove_sw_rules, NULL)) + dev_warn(ice_pf_to_dev(lag->pf), + "Failed to remove RDMA switch rule\n"); + + rm_entry->rid = 0; + rm_entry->rule_id = 0; + + kfree(s_rule); +} + +/** + * ice_lag_rdma_del_action - free / unsub large action + * @lag: LAG structure of the primary interface + */ +static void ice_lag_rdma_del_action(struct ice_lag *lag) +{ + struct ice_aqc_alloc_free_res_elem *sw_buf; + struct ice_lag *primary_lag; + u16 buf_len = 0x6; + int ret; + + if (lag->primary) + primary_lag = lag; + else + primary_lag = ice_lag_find_primary(lag); + + if (!primary_lag) + return; + + sw_buf = kzalloc(buf_len, GFP_KERNEL); + if (!sw_buf) + return; + + sw_buf->num_elems = cpu_to_le16(ICE_LAG_NUM_RULES); + sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_WIDE_TABLE_1); + sw_buf->elem[0].e.flu_resp = cpu_to_le16(primary_lag->action_idx); + + ret = ice_aq_alloc_free_res(&lag->pf->hw, ICE_LAG_NUM_RULES, + sw_buf, buf_len, ice_aqc_opc_free_res, + NULL); + if (ret) + dev_warn(ice_pf_to_dev(lag->pf), + "Error trying to delete/unsub from large action %d\n", + ret); +} + /** * ice_lag_set_primary - set PF LAG state as Primary * @lag: LAG info struct @@ -18,13 +505,20 @@ static void ice_lag_set_primary(struct ice_lag *lag) if (!pf) return; - if (lag->role != ICE_LAG_UNSET && lag->role != ICE_LAG_BACKUP) { - dev_warn(ice_pf_to_dev(pf), "%s: Attempt to be Primary, but incompatible state.\n", - netdev_name(lag->netdev)); + /* No previous primary interface */ + if (lag->role == ICE_LAG_UNSET) { + lag->role = ICE_LAG_PRIMARY; return; } - lag->role = ICE_LAG_PRIMARY; + /* Taking primary role from previous primary */ + if (lag->role == ICE_LAG_BACKUP) { + lag->role = ICE_LAG_PRIMARY; + return; + } + + dev_warn(ice_pf_to_dev(pf), "%s: Attempt to be Primary, but incompatible state. %d\n", + netdev_name(lag->netdev), lag->role); } /** @@ -38,13 +532,20 @@ static void ice_lag_set_backup(struct ice_lag *lag) if (!pf) return; - if (lag->role != ICE_LAG_UNSET && lag->role != ICE_LAG_PRIMARY) { - dev_dbg(ice_pf_to_dev(pf), "%s: Attempt to be Backup, but incompatible state\n", - netdev_name(lag->netdev)); + /* No previous backup interface */ + if (lag->role == ICE_LAG_UNSET) { + lag->role = ICE_LAG_BACKUP; return; } - lag->role = ICE_LAG_BACKUP; + /* Moving to backup from active role */ + if (lag->role == ICE_LAG_PRIMARY) { + lag->role = ICE_LAG_BACKUP; + return; + } + + dev_dbg(ice_pf_to_dev(pf), "%s: Attempt to be Backup, but incompatible state %d\n", + netdev_name(lag->netdev), lag->role); } /** @@ -53,13 +554,12 @@ static void ice_lag_set_backup(struct ice_lag *lag) */ static void ice_display_lag_info(struct ice_lag *lag) { - const char *name, *peer, *upper, *role, *bonded, *master; + const char *name, *upper, *role, *bonded, *primary; struct device *dev = &lag->pf->pdev->dev; name = lag->netdev ? netdev_name(lag->netdev) : "unset"; - peer = lag->peer_netdev ? netdev_name(lag->peer_netdev) : "unset"; upper = lag->upper_netdev ? netdev_name(lag->upper_netdev) : "unset"; - master = lag->master ? "TRUE" : "FALSE"; + primary = lag->primary ? "TRUE" : "FALSE"; bonded = lag->bonded ? "BONDED" : "UNBONDED"; switch (lag->role) { @@ -79,8 +579,111 @@ static void ice_display_lag_info(struct ice_lag *lag) role = "ERROR"; } - dev_dbg(dev, "%s %s, peer:%s, upper:%s, role:%s, master:%s\n", name, - bonded, peer, upper, role, master); + dev_dbg(dev, "%s %s, upper:%s, role:%s, primary:%s\n", name, + bonded, upper, role, primary); +} + +/** + * ice_is_bond_rdma_cap - check bond netdevs for RDMA compliance + * @lag: pointer to local lag struct + */ +static bool ice_is_bond_rdma_cap(struct ice_lag *lag) +{ + struct list_head *tmp; + + list_for_each(tmp, lag->netdev_head) { + struct ice_dcbx_cfg *dcb_cfg, *peer_dcb_cfg; + struct ice_lag_netdev_list *entry; + struct ice_netdev_priv *peer_np; + struct net_device *peer_netdev; + struct ice_vsi *vsi, *peer_vsi; + + entry = list_entry(tmp, struct ice_lag_netdev_list, node); + peer_netdev = entry->netdev; + /* non ice netdevs can't be used for RDMA */ + if (!netif_is_ice(peer_netdev)) { + netdev_info(lag->netdev, "Found non-ice netdev %s\n", + netdev_name(peer_netdev)); + return false; + } + + peer_np = netdev_priv(peer_netdev); + vsi = ice_get_main_vsi(lag->pf); + peer_vsi = peer_np->vsi; + + /* interfaces on different devices cannot be used for RDMA */ + if (lag->pf->pdev->bus != peer_vsi->back->pdev->bus || + lag->pf->pdev->slot != peer_vsi->back->pdev->slot) { + netdev_info(lag->netdev, "Found netdev %s on different device\n", + netdev_name(peer_netdev)); + return false; + } + + dcb_cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg; + peer_dcb_cfg = &peer_vsi->port_info->qos_cfg.local_dcbx_cfg; + + /* interfaces with different DCB config cannot be used for + * RDMA + */ + if (memcmp(dcb_cfg, peer_dcb_cfg, + sizeof(struct ice_dcbx_cfg))) { + netdev_info(lag->netdev, "Found netdev %s with different DCB config\n", + netdev_name(peer_netdev)); + return false; + } + } + + return true; +} + +/** + * ice_lag_chk_rdma - verify aggregate valid to support RDMA + * @lag: LAG struct for this interface + * @ptr: opaque data for netdev event info + */ +static void ice_lag_chk_rdma(struct ice_lag *lag, void *ptr) +{ + struct net_device *event_netdev, *event_upper; + struct netdev_notifier_bonding_info *info; + struct netdev_bonding_info *bonding_info; + struct iidc_core_dev_info *cdev; + + /* if we are not primary, or this event for a netdev not in our + * bond, then we don't need to evaluate. + */ + if (!lag->primary) + return; + + event_netdev = netdev_notifier_info_to_dev(ptr); + rcu_read_lock(); + event_upper = netdev_master_upper_dev_get_rcu(event_netdev); + rcu_read_unlock(); + if (event_upper != lag->upper_netdev) + return; + + info = (struct netdev_notifier_bonding_info *)ptr; + bonding_info = &info->bonding_info; + lag->bond_mode = bonding_info->master.bond_mode; + + cdev = ice_find_cdev_info_by_id(lag->pf, IIDC_RDMA_ID); + if (!cdev) + return; + + if (lag->bond_mode != BOND_MODE_ACTIVEBACKUP || + cdev->rdma_protocol != IIDC_RDMA_PROTOCOL_ROCEV2) + goto unplug_out; + + if (!ice_is_bond_rdma_cap(lag)) + goto unplug_out; + + ice_set_rdma_cap(lag->pf); + ice_plug_aux_dev_lock(cdev, IIDC_RDMA_ROCE_NAME, lag); + + return; + +unplug_out: + ice_clear_rdma_cap(lag->pf); + ice_unplug_aux_dev_lock(cdev, lag); } /** @@ -92,12 +695,14 @@ static void ice_display_lag_info(struct ice_lag *lag) */ static void ice_lag_info_event(struct ice_lag *lag, void *ptr) { - struct net_device *event_netdev, *netdev_tmp; struct netdev_notifier_bonding_info *info; struct netdev_bonding_info *bonding_info; + struct net_device *event_netdev; const char *lag_netdev_name; event_netdev = netdev_notifier_info_to_dev(ptr); + if (!netif_is_ice(event_netdev)) + return; info = ptr; lag_netdev_name = netdev_name(lag->netdev); bonding_info = &info->bonding_info; @@ -115,19 +720,6 @@ static void ice_lag_info_event(struct ice_lag *lag, void *ptr) goto lag_out; } - rcu_read_lock(); - for_each_netdev_in_bond_rcu(lag->upper_netdev, netdev_tmp) { - if (!netif_is_ice(netdev_tmp)) - continue; - - if (netdev_tmp && netdev_tmp != lag->netdev && - lag->peer_netdev != netdev_tmp) { - dev_hold(netdev_tmp); - lag->peer_netdev = netdev_tmp; - } - } - rcu_read_unlock(); - if (bonding_info->slave.state) ice_lag_set_backup(lag); else @@ -137,87 +729,378 @@ lag_out: ice_display_lag_info(lag); } +/** + * ice_lag_move_node - move scheduling node for RDMA LAG failover + * @lag: lag info struct + * @oldport: number of previous active port + * @newport: number of new active port + * @tc: traffic class of the qset node to move + */ +static int ice_lag_move_node(struct ice_lag *lag, u8 oldport, u8 newport, u8 tc) +{ + struct ice_hw *old_hw = NULL, *new_hw = NULL; + u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS] = {}; + struct ice_aqc_move_rdma_qset_buffer *buf; + struct ice_sched_node *node, *new_parent; + struct ice_aqc_move_rdma_qset_cmd *cmd; + struct ice_vsi *new_vsi = NULL; + struct ice_aq_desc desc; + struct list_head *tmp; + int err; + + max_rdmaqs[tc]++; + + /* locate the HW struct for old and new ports */ + list_for_each(tmp, lag->netdev_head) { + struct ice_lag_netdev_list *entry; + struct net_device *tmp_netdev; + struct ice_netdev_priv *np; + struct ice_vsi *vsi; + struct ice_hw *hw; + + entry = list_entry(tmp, struct ice_lag_netdev_list, node); + tmp_netdev = entry->netdev; + if (!tmp_netdev) + continue; + + np = netdev_priv(tmp_netdev); + if (!np) + continue; + + vsi = np->vsi; + if (!vsi) + continue; + + hw = &vsi->back->hw; + if (hw->port_info->lport == oldport) { + old_hw = hw; + continue; + } + + if (hw->port_info->lport == newport) { + new_vsi = vsi; + new_hw = hw; + } + } + + if (!old_hw || !new_hw || !new_vsi) { + dev_warn(ice_pf_to_dev(lag->pf), + "Could not locate resources to move node\n"); + return -EINVAL; + } + + node = ice_sched_find_node_by_teid(old_hw->port_info->root, + lag->rdma_qset[tc].teid); + if (!node) { + dev_dbg(ice_pf_to_dev(lag->pf), + "did not find teid %d in old port, checking new\n", + lag->rdma_qset[tc].teid); + node = ice_sched_find_node_by_teid(new_hw->port_info->root, + lag->rdma_qset[tc].teid); + if (!node) { + dev_warn(ice_pf_to_dev(lag->pf), + "Failed to find TEID %d to move for TC %d\n", + lag->rdma_qset[tc].teid, tc); + return -EINVAL; + } + } + + cmd = &desc.params.move_rdma_qset; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_rdma_qset); + + cmd->num_rdma_qset = 1; + cmd->flags = ICE_AQC_PF_MODE_KEEP_OWNERSHIP; + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + buf = kzalloc(ICE_LAG_SINGLE_FILTER_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + err = ice_cfg_vsi_rdma(new_hw->port_info, new_vsi->idx, + new_vsi->tc_cfg.ena_tc, max_rdmaqs); + if (err) { + dev_warn(ice_pf_to_dev(lag->pf), "Failed configuring port RDMA\n"); + goto node_move_err; + } + + new_parent = ice_sched_get_free_qparent(new_hw->port_info, new_vsi->idx, + tc, ICE_SCHED_NODE_OWNER_RDMA); + if (!new_parent) { + dev_warn(ice_pf_to_dev(lag->pf), "Could not find free qparent\n"); + err = -EINVAL; + goto node_move_err; + } + + buf->src_parent_teid = node->info.parent_teid; + buf->dest_parent_teid = new_parent->info.node_teid; + buf->descs[0].qset_teid = cpu_to_le16(lag->rdma_qset[tc].teid); + buf->descs[0].tx_qset_id = cpu_to_le16(lag->rdma_qset[tc].qs_handle); + + err = ice_aq_send_cmd(&lag->pf->hw, &desc, buf, + ICE_LAG_SINGLE_FILTER_SIZE, NULL); + if (!err) + node->info.parent_teid = new_parent->info.node_teid; + +node_move_err: + kfree(buf); + return err; +} + +/** + * ice_lag_move_nodes - move scheduling nodes for RDMA LAG failover + * @lag: lag info struct + * @oldport: number of previous active port + * @newport: number of new active port + */ +static void ice_lag_move_nodes(struct ice_lag *lag, u8 oldport, u8 newport) +{ + int err; + u8 i; + + ice_for_each_traffic_class(i) + if (lag->rdma_qset[i].teid) { + err = ice_lag_move_node(lag, oldport, newport, i); + if (err) + dev_err(&lag->pf->pdev->dev, "Error moving qset for TC %d: %d\n", + i, err); + } +} + +/** + * ice_lag_reclaim_node - reclaim node for specific TC back to original owner + * @lag: ice_lag struct for primary interface + * @active_hw: ice_hw struct for the currently active interface + * @tc: which TC to reclaim qset node for + */ +static int +ice_lag_reclaim_node(struct ice_lag *lag, struct ice_hw *active_hw, u8 tc) +{ + struct ice_aqc_move_rdma_qset_buffer *buf; + struct ice_sched_node *node, *new_parent; + struct ice_aqc_move_rdma_qset_cmd *cmd; + struct ice_aq_desc desc; + struct ice_hw *prim_hw; + + prim_hw = &lag->pf->hw; + node = ice_sched_find_node_by_teid(prim_hw->port_info->root, + lag->rdma_qset[tc].teid); + if (!node) { + dev_warn(ice_pf_to_dev(lag->pf), "Cannot find node to reclaim for TC %d\n", + tc); + return -EINVAL; + } + + cmd = &desc.params.move_rdma_qset; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_rdma_qset); + + cmd->num_rdma_qset = 1; + cmd->flags = ICE_AQC_PF_MODE_KEEP_OWNERSHIP; + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + new_parent = ice_sched_get_free_qparent(prim_hw->port_info, + lag->pf->vsi[0]->idx, tc, + ICE_SCHED_NODE_OWNER_RDMA); + if (!new_parent) { + dev_warn(ice_pf_to_dev(lag->pf), "Could not find free qparent\n"); + return -EINVAL; + } + + buf = kzalloc(ICE_LAG_SINGLE_FILTER_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + buf->src_parent_teid = node->info.parent_teid; + buf->dest_parent_teid = new_parent->info.node_teid; + buf->descs[0].qset_teid = cpu_to_le16(lag->rdma_qset[tc].teid); + buf->descs[0].tx_qset_id = cpu_to_le16(lag->rdma_qset[tc].qs_handle); + + if (!ice_aq_send_cmd(&lag->pf->hw, &desc, buf, + ICE_LAG_SINGLE_FILTER_SIZE, NULL)) + node->info.parent_teid = new_parent->info.node_teid; + + kfree(buf); + return 0; +} + +/** + * ice_lag_reclaim_nodes - helper function to reclaim nodes back to originator + * @lag: ice_lag struct for primary interface + * @active_hw: ice_hw struct for the currently active interface + */ +static void ice_lag_reclaim_nodes(struct ice_lag *lag, struct ice_hw *active_hw) +{ + u8 tc; + + ice_for_each_traffic_class(tc) + if (lag->rdma_qset[tc].teid) { + if (ice_lag_reclaim_node(lag, active_hw, tc)) + dev_err(ice_pf_to_dev(lag->pf), "Error reclaiming qset for TC %d\n", + tc); + } +} + +/** + * ice_lag_move_node_sync - move RDMA nodes out of sync with bonding events + * @old_hw: HW struct where the node currently resides + * @new_hw: HW struct where node is moving to + * @new_vsi: new vsi that will be parent to node + * @qset: params of the qset that is moving + * + * When qsets are allocated or freed on a bonded interface by the RDMA aux + * driver making calls into the IDC interface, depending on the state of that + * aggregate, it might be necessary to move the scheduleing nodes for that + * qset to a different interfaces tree. This happens without the advent of a + * netdev bonding info event. ice_lag_move_node_sync will handle that case. + */ +int ice_lag_move_node_sync(struct ice_hw *old_hw, struct ice_hw *new_hw, + struct ice_vsi *new_vsi, + struct iidc_rdma_qset_params *qset) +{ + u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS] = {}; + struct ice_aqc_move_rdma_qset_buffer *buf; + struct ice_sched_node *node, *new_parent; + struct ice_aqc_move_rdma_qset_cmd *cmd; + struct ice_aq_desc desc; + struct ice_hw *prim_hw; + struct ice_pf *old_pf; + int ret = 0; + + max_rdmaqs[qset->tc]++; + + node = ice_sched_find_node_by_teid(old_hw->port_info->root, qset->teid); + if (!node) { + node = ice_sched_find_node_by_teid(new_hw->port_info->root, + qset->teid); + if (!node) + return -ENOMEM; + } + + cmd = &desc.params.move_rdma_qset; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_rdma_qset); + cmd->num_rdma_qset = 1; + cmd->flags = ICE_AQC_PF_MODE_KEEP_OWNERSHIP; + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + buf = kzalloc(ICE_LAG_SINGLE_FILTER_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ice_cfg_vsi_rdma(new_hw->port_info, new_vsi->idx, + new_vsi->tc_cfg.ena_tc, max_rdmaqs); + + new_parent = ice_sched_get_free_qparent(new_hw->port_info, new_vsi->idx, + qset->tc, + ICE_SCHED_NODE_OWNER_RDMA); + if (!new_parent) { + ret = -ENOMEM; + goto node_sync_out; + } + + old_pf = old_hw->back; + if (old_pf->lag->primary) + prim_hw = old_hw; + else + prim_hw = new_hw; + + buf->src_parent_teid = node->info.parent_teid; + buf->dest_parent_teid = new_parent->info.node_teid; + buf->descs[0].qset_teid = cpu_to_le16(qset->teid); + buf->descs[0].tx_qset_id = cpu_to_le16(qset->qs_handle); + ice_aq_send_cmd(prim_hw, &desc, buf, ICE_LAG_SINGLE_FILTER_SIZE, NULL); + node->info.parent_teid = new_parent->info.node_teid; + +node_sync_out: + kfree(buf); + return ret; +} + /** * ice_lag_link - handle LAG link event * @lag: LAG info struct - * @info: info from the netdev notifier */ -static void -ice_lag_link(struct ice_lag *lag, struct netdev_notifier_changeupper_info *info) +static void ice_lag_link(struct ice_lag *lag) { - struct net_device *netdev_tmp, *upper = info->upper_dev; + struct iidc_core_dev_info *cdev; struct ice_pf *pf = lag->pf; - int peers = 0; - if (lag->bonded) dev_warn(ice_pf_to_dev(pf), "%s Already part of a bond\n", netdev_name(lag->netdev)); - rcu_read_lock(); - for_each_netdev_in_bond_rcu(upper, netdev_tmp) - peers++; - rcu_read_unlock(); - - if (lag->upper_netdev != upper) { - dev_hold(upper); - lag->upper_netdev = upper; - } + ice_clear_sriov_cap(pf); + cdev = ice_find_cdev_info_by_id(pf, IIDC_RDMA_ID); + if (cdev && lag->primary) + cdev->rdma_active_port = lag->pf->hw.port_info->lport; + ice_clear_rdma_cap(pf); + ice_unplug_aux_dev_lock(cdev, lag); lag->bonded = true; lag->role = ICE_LAG_UNSET; - - /* if this is the first element in an LAG mark as master */ - lag->master = !!(peers == 1); } /** * ice_lag_unlink - handle unlink event * @lag: LAG info struct - * @info: info from netdev notification */ -static void -ice_lag_unlink(struct ice_lag *lag, - struct netdev_notifier_changeupper_info *info) +static void ice_lag_unlink(struct ice_lag *lag) { - struct net_device *netdev_tmp, *upper = info->upper_dev; - bool found = false; + struct iidc_core_dev_info *cdev; + struct ice_pf *pf = lag->pf; if (!lag->bonded) { netdev_dbg(lag->netdev, "bonding unlink event on non-LAG netdev\n"); return; } - /* determine if we are in the new LAG config or not */ - rcu_read_lock(); - for_each_netdev_in_bond_rcu(upper, netdev_tmp) { - if (netdev_tmp == lag->netdev) { - found = true; - break; + /* Unplug aux dev from aggregate interface if primary*/ + if (lag->primary) { + lag->primary = false; + cdev = ice_find_cdev_info_by_id(pf, IIDC_RDMA_ID); + if (cdev) { + ice_unplug_aux_dev_lock(cdev, lag); + ice_clear_rdma_cap(pf); + cdev->rdma_active_port = ICE_LAG_INVALID_PORT; } - } - rcu_read_unlock(); + } else { + struct ice_lag *primary_lag; - if (found) - return; + primary_lag = ice_lag_find_primary(lag); + if (primary_lag) { + u8 pri_port, act_port, loc_port; - if (lag->upper_netdev) { - dev_put(lag->upper_netdev); - lag->upper_netdev = NULL; - } - - if (lag->peer_netdev) { - dev_put(lag->peer_netdev); - lag->peer_netdev = NULL; + cdev = ice_find_cdev_info_by_id(primary_lag->pf, + IIDC_RDMA_ID); + if (cdev) { + act_port = cdev->rdma_active_port; + pri_port = primary_lag->pf->hw.port_info->lport; + loc_port = pf->hw.port_info->lport; + if (act_port == loc_port) + ice_lag_move_nodes(primary_lag, + loc_port, pri_port); + } + } } lag->bonded = false; lag->role = ICE_LAG_NONE; + lag->upper_netdev = NULL; + ice_set_sriov_cap(pf); + ice_set_rdma_cap(pf); + cdev = ice_find_cdev_info_by_id(pf, IIDC_RDMA_ID); + if (cdev) { + const char *name; + + if (cdev->rdma_protocol == IIDC_RDMA_PROTOCOL_IWARP) + name = IIDC_RDMA_IWARP_NAME; + else + name = IIDC_RDMA_ROCE_NAME; + ice_plug_aux_dev_lock(cdev, name, lag); + } } /** * ice_lag_changeupper_event - handle LAG changeupper event - * @lag: LAG info struct + * @lag: lag info struct * @ptr: opaque pointer data * * ptr is to be cast into netdev_notifier_changeupper_info @@ -234,47 +1117,416 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr) if (netdev != lag->netdev) return; - if (!info->upper_dev) { - netdev_dbg(netdev, "changeupper rcvd, but no upper defined\n"); - return; + if (info->linking) { + struct ice_lag *primary_lag; + + lag->upper_netdev = info->upper_dev; + /* If there is not already a primary interface in the LAG, + * then mark this one as primary. + * In the case RDMA is supported, this will be the only PCI + * device that will initiate communication and supply resource + * for the RDMA auxiliary driver + */ + primary_lag = ice_lag_find_primary(lag); + if (primary_lag) { + lag->bond_id = primary_lag->bond_id; + if (netif_is_same_ice(primary_lag->pf, netdev)) + if (ice_lag_rdma_create_fltr(lag)) + netdev_warn(lag->netdev, "Error creating RoCEv2 filter\n"); + + } else { + lag->bond_id = ida_alloc(&ice_lag_ida, GFP_KERNEL); + lag->primary = true; + lag->rdma_vsi = lag->pf->vsi[0]; + if (ice_lag_rdma_create_fltr(lag)) + netdev_warn(lag->netdev, "Error creating RoCEv2 filter\n"); + } + + ice_lag_link(lag); + } else { + if (!lag->primary) { + lag->bond_id = -1; + ice_lag_rdma_del_fltr(lag); + } else { + ida_simple_remove(&ice_lag_ida, lag->bond_id); + lag->bond_id = -1; + lag->bond_mode = -1; + } + + ice_lag_rdma_del_action(lag); + ice_lag_unlink(lag); } - netdev_dbg(netdev, "bonding %s\n", info->linking ? "LINK" : "UNLINK"); - - if (!netif_is_lag_master(info->upper_dev)) { - netdev_dbg(netdev, "changeupper rcvd, but not master. bail\n"); - return; - } - - if (info->linking) - ice_lag_link(lag, info); - else - ice_lag_unlink(lag, info); - ice_display_lag_info(lag); } /** - * ice_lag_changelower_event - handle LAG changelower event - * @lag: LAG info struct - * @ptr: opaque data pointer - * - * ptr to be cast to netdev_notifier_changelowerstate_info + * ice_lag_chk_unlink - checks bond for RDMA compliance when netdev leaves + * @lag: local lag struct + * @ptr: opaque pointer data */ -static void ice_lag_changelower_event(struct ice_lag *lag, void *ptr) +static void ice_lag_chk_unlink(struct ice_lag *lag, void *ptr) { - struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + struct netdev_notifier_changeupper_info *info; + struct iidc_core_dev_info *cdev; - if (netdev != lag->netdev) + info = (struct netdev_notifier_changeupper_info *)ptr; + if (!lag->primary || info->linking || + info->upper_dev != lag->upper_netdev) return; - netdev_dbg(netdev, "bonding info\n"); + cdev = ice_find_cdev_info_by_id(lag->pf, IIDC_RDMA_ID); + if (!cdev) + return; - if (!netif_is_lag_port(netdev)) { - netdev_dbg(netdev, "CHANGELOWER rcvd, but netdev not in LAG. Bail\n"); + if (lag->bond_mode == BOND_MODE_ACTIVEBACKUP && + ice_is_bond_rdma_cap(lag) && + cdev->rdma_protocol == IIDC_RDMA_PROTOCOL_ROCEV2) { + ice_set_rdma_cap(lag->pf); + ice_plug_aux_dev_lock(cdev, IIDC_RDMA_ROCE_NAME, lag); + } +} + +/** + * ice_lag_monitor_link - main PF detect if nodes need to move on unlink + * @lag: lag info struct + * @ptr: opaque data containing notifier event + * + * This function is for the primary interface to monitor interfaces leaving the + * aggregate, and if they own scheduling nodes to move them back to the primary. + * Also maintain the prune lists for interfaces entering or leaving the + * aggregate. + */ +static void ice_lag_monitor_link(struct ice_lag *lag, void *ptr) +{ + struct ice_hw *prim_hw, *event_hw, *active_hw = NULL; + struct netdev_notifier_changeupper_info *info; + struct ice_netdev_priv *event_np; + struct iidc_core_dev_info *cdev; + struct net_device *event_netdev; + u8 event_port, prim_port; + struct iidc_event *event; + struct ice_pf *event_pf; + + if (!lag->primary) + return; + + event_netdev = netdev_notifier_info_to_dev(ptr); + /* only ice interfaces should be considered for this function */ + if (!netif_is_ice(event_netdev)) + return; + + event_np = netdev_priv(event_netdev); + event_pf = event_np->vsi->back; + event_hw = &event_pf->hw; + event_port = event_hw->port_info->lport; + prim_hw = &lag->pf->hw; + prim_port = prim_hw->port_info->lport; + + info = (struct netdev_notifier_changeupper_info *)ptr; + if (info->linking) { + struct net_device *event_upper; + + /* If linking port is not the primary, then we need + * to add the primary's VSI to linking ports prune + * list + */ + rcu_read_lock(); + event_upper = netdev_master_upper_dev_get_rcu(event_netdev); + rcu_read_unlock(); + if (prim_port != event_port && event_upper == lag->upper_netdev) + ice_lag_add_prune_list(lag, event_pf); + } else { + if (prim_port != event_port) { + /* If un-linking port is not the primary, then we need + * to remove the primary's VSI from un-linking ports + * prune list + */ + ice_lag_del_prune_list(lag, event_pf); + } else { + struct list_head *tmp; + + /* Primary VSI leaving bond, need to remove its + * VSI from all remaining interfaces prune lists + */ + list_for_each(tmp, lag->netdev_head) { + struct ice_lag_netdev_list *entry; + struct net_device *nd; + + entry = list_entry(tmp, + struct ice_lag_netdev_list, + node); + nd = entry->netdev; + + if (!netif_is_ice(nd)) + continue; + + if (nd && nd != lag->netdev) { + struct ice_netdev_priv *np; + struct ice_vsi *vsi; + struct ice_pf *pf; + + np = netdev_priv(nd); + if (!np) + continue; + vsi = np->vsi; + if (!vsi) + continue; + pf = vsi->back; + if (pf && pf->lag) { + ice_lag_del_prune_list(lag, pf); + pf->lag->bond_id = -1; + } + } + } + } + } + + /* End of linking functionality */ + if (info->linking || !ice_chk_rdma_cap(lag->pf)) + return; + + cdev = ice_find_cdev_info_by_id(lag->pf, IIDC_RDMA_ID); + if (!cdev) + return; + + if ((cdev->rdma_active_port != event_port && + prim_port != event_port) || + (cdev->rdma_active_port == event_port && + prim_port == event_port)) + return; + + /* non-primary active port or primary non-active has left the + * aggregate. Need to perform early failover and move nodes back + * to primary port. This will allow us to either continue RDMA + * communication on the primary port or cease RDMA communication + * cleanly if the primary port has left the aggregate. + */ + if (event_port == prim_port) { + struct list_head *tmp; + + list_for_each(tmp, lag->netdev_head) { + struct ice_lag_netdev_list *entry; + struct ice_netdev_priv *active_np; + struct net_device *tmp_netdev; + struct ice_vsi *active_vsi; + + entry = list_entry(tmp, struct ice_lag_netdev_list, + node); + tmp_netdev = entry->netdev; + if (!tmp_netdev) + continue; + + active_np = netdev_priv(tmp_netdev); + if (!active_np) + continue; + + active_vsi = active_np->vsi; + if (!active_vsi) + continue; + + if (active_vsi->back->hw.port_info->lport == + cdev->rdma_active_port) { + active_hw = &active_vsi->back->hw; + break; + } + } + } else { + active_hw = event_hw; + } + if (!active_hw) { + dev_warn(ice_pf_to_dev(lag->pf), "Could not find Active Port HW struct\n"); return; } + if (!cdev->adev) + return; + + device_lock(&cdev->adev->dev); + event = kzalloc(sizeof(*event), GFP_ATOMIC); + if (event) { + set_bit(IIDC_EVENT_FAILOVER_START, event->type); + ice_send_event_to_aux_no_lock(cdev, event); + } + + dev_warn(ice_pf_to_dev(lag->pf), "Moving nodes from %d to %d\n", + cdev->rdma_active_port, prim_port); + ice_lag_reclaim_nodes(lag, active_hw); + + cdev->rdma_active_port = prim_port; + + if (event) { + clear_bit(IIDC_EVENT_FAILOVER_START, event->type); + set_bit(IIDC_EVENT_FAILOVER_FINISH, event->type); + ice_send_event_to_aux_no_lock(cdev, event); + kfree(event); + } + device_unlock(&cdev->adev->dev); +} + +/** + * ice_lag_monitor_active - main PF keep track of which port is active + * @lag: lag info struct + * @ptr: opaque data containing notifier event + * + * This function is for the primary PF to monitor changes in which port is + * active and handle changes for RDMA functionality + */ +static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr) +{ + struct net_device *event_netdev, *event_upper; + struct netdev_notifier_bonding_info *info; + struct netdev_bonding_info *bonding_info; + struct ice_netdev_priv *event_np; + struct iidc_core_dev_info *cdev; + u8 prim_port, event_port; + struct ice_pf *event_pf; + + if (!lag->primary) + return; + cdev = ice_find_cdev_info_by_id(lag->pf, IIDC_RDMA_ID); + if (!cdev) + return; + + event_netdev = netdev_notifier_info_to_dev(ptr); + rcu_read_lock(); + event_upper = netdev_master_upper_dev_get_rcu(event_netdev); + rcu_read_unlock(); + if (!netif_is_ice(event_netdev) || event_upper != lag->upper_netdev) + return; + event_np = netdev_priv(event_netdev); + event_pf = event_np->vsi->back; + event_port = event_pf->hw.port_info->lport; + prim_port = lag->pf->hw.port_info->lport; + + info = (struct netdev_notifier_bonding_info *)ptr; + bonding_info = &info->bonding_info; + + /* first time setting active port for this aggregate */ + if (cdev->rdma_active_port == ICE_LAG_INVALID_PORT && + !bonding_info->slave.state) { + cdev->rdma_active_port = event_port; + if (prim_port != event_port) { + struct iidc_event *event; + + if (!cdev->adev) + return; + + device_lock(&cdev->adev->dev); + /* start failover process for RDMA */ + event = kzalloc(sizeof(*event), GFP_ATOMIC); + if (event) { + set_bit(IIDC_EVENT_FAILOVER_START, + event->type); + ice_send_event_to_aux_no_lock(cdev, event); + } + + dev_dbg(ice_pf_to_dev(lag->pf), "Moving nodes from %d to %d\n", + prim_port, event_port); + ice_lag_move_nodes(lag, prim_port, event_port); + + if (event) { + clear_bit(IIDC_EVENT_FAILOVER_START, + event->type); + set_bit(IIDC_EVENT_FAILOVER_FINISH, + event->type); + ice_send_event_to_aux_no_lock(cdev, event); + kfree(event); + } + device_unlock(&cdev->adev->dev); + } + return; + } + + /* new active port */ + if (!bonding_info->slave.state && + cdev->rdma_active_port != event_port) { + struct iidc_event *event; + + if (!cdev->adev) + return; + device_lock(&cdev->adev->dev); + /* start failover process for RDMA */ + event = kzalloc(sizeof(*event), GFP_ATOMIC); + if (event) { + set_bit(IIDC_EVENT_FAILOVER_START, event->type); + ice_send_event_to_aux_no_lock(cdev, event); + } + + dev_dbg(ice_pf_to_dev(lag->pf), "Moving nodes from %d to %d\n", + cdev->rdma_active_port, event_port); + ice_lag_move_nodes(lag, cdev->rdma_active_port, event_port); + cdev->rdma_active_port = event_port; + + if (event) { + clear_bit(IIDC_EVENT_FAILOVER_START, event->type); + set_bit(IIDC_EVENT_FAILOVER_FINISH, event->type); + ice_send_event_to_aux_no_lock(cdev, event); + kfree(event); + } + device_unlock(&cdev->adev->dev); + } +} + +/** + * ice_lag_process_event - process a task assigned to the lag_wq + * @work: pointer to work_struct + */ +static void ice_lag_process_event(struct work_struct *work) +{ + struct ice_lag_work *lag_work; + struct net_device *netdev; + struct list_head *tmp, *n; + + lag_work = container_of(work, struct ice_lag_work, lag_task); + + mutex_lock(&lag_work->lag->pf->lag_mutex); + + lag_work->lag->netdev_head = &lag_work->netdev_list.node; + + switch (lag_work->event) { + case NETDEV_CHANGEUPPER: + if (ice_is_feature_supported(lag_work->lag->pf, ICE_F_LAG)) { + ice_lag_monitor_link(lag_work->lag, + &lag_work->info.changeupper_info); + ice_lag_chk_unlink(lag_work->lag, + &lag_work->info.changeupper_info); + } + ice_lag_changeupper_event(lag_work->lag, + &lag_work->info.changeupper_info); + break; + case NETDEV_BONDING_INFO: + if (ice_is_feature_supported(lag_work->lag->pf, ICE_F_LAG)) { + ice_lag_monitor_active(lag_work->lag, + &lag_work->info.bonding_info); + ice_lag_chk_rdma(lag_work->lag, + &lag_work->info.bonding_info); + } + ice_lag_info_event(lag_work->lag, &lag_work->info.bonding_info); + break; + case NETDEV_UNREGISTER: + netdev = lag_work->info.bonding_info.info.dev; + if (netdev == lag_work->lag->netdev && lag_work->lag->bonded && + netdev_unregistering(lag_work->lag->upper_netdev)) + ice_lag_unlink(lag_work->lag); + break; + default: + break; + } + + /* cleanup resources allocated for this work item */ + list_for_each_safe(tmp, n, &lag_work->netdev_list.node) { + struct ice_lag_netdev_list *entry; + + entry = list_entry(tmp, struct ice_lag_netdev_list, node); + list_del(&entry->node); + kfree(entry); + } + lag_work->lag->netdev_head = NULL; + + mutex_unlock(&lag_work->lag->pf->lag_mutex); + + kfree(work); } /** @@ -288,8 +1540,17 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event, void *ptr) { struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + struct net_device *upper_netdev; + struct ice_lag_work *lag_work; struct ice_lag *lag; + if (event != NETDEV_CHANGEUPPER && event != NETDEV_BONDING_INFO && + event != NETDEV_UNREGISTER) + return NOTIFY_DONE; + + if (!(netdev->priv_flags & IFF_BONDING)) + return NOTIFY_DONE; + lag = container_of(notif_blk, struct ice_lag, notif_block); if (!lag->netdev) @@ -299,26 +1560,49 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event, if (!net_eq(dev_net(netdev), &init_net)) return NOTIFY_DONE; - switch (event) { - case NETDEV_CHANGEUPPER: - ice_lag_changeupper_event(lag, ptr); - break; - case NETDEV_CHANGELOWERSTATE: - ice_lag_changelower_event(lag, ptr); - break; - case NETDEV_BONDING_INFO: - ice_lag_info_event(lag, ptr); - break; - default: - break; + /* This memory will be freed at the end of ice_lag_process_event */ + lag_work = kzalloc(sizeof(*lag_work), GFP_KERNEL); + if (!lag_work) + return -ENOMEM; + + lag_work->event_netdev = netdev; + lag_work->lag = lag; + lag_work->event = event; + if (event == NETDEV_CHANGEUPPER) { + struct netdev_notifier_changeupper_info *info; + + info = ptr; + upper_netdev = info->upper_dev; + } else { + upper_netdev = netdev_master_upper_dev_get(netdev); } + INIT_LIST_HEAD(&lag_work->netdev_list.node); + if (upper_netdev) { + struct net_device *tmp_nd; + struct ice_lag_netdev_list *nd_list; + + rcu_read_lock(); + for_each_netdev_in_bond_rcu(upper_netdev, tmp_nd) { + nd_list = kzalloc(sizeof(*nd_list), GFP_KERNEL); + if (!nd_list) + continue; + + nd_list->netdev = tmp_nd; + list_add(&nd_list->node, &lag_work->netdev_list.node); + } + rcu_read_unlock(); + } + + memcpy(&lag_work->info, ptr, sizeof(lag_work->info)); + INIT_WORK(&lag_work->lag_task, ice_lag_process_event); + queue_work(ice_lag_wq, &lag_work->lag_task); return NOTIFY_DONE; } /** * ice_register_lag_handler - register LAG handler on netdev - * @lag: LAG struct + * @lag: lag info struct */ static int ice_register_lag_handler(struct ice_lag *lag) { @@ -341,7 +1625,7 @@ static int ice_register_lag_handler(struct ice_lag *lag) /** * ice_unregister_lag_handler - unregister LAG handler on netdev - * @lag: LAG struct + * @lag: lag info struct */ static void ice_unregister_lag_handler(struct ice_lag *lag) { @@ -355,6 +1639,21 @@ static void ice_unregister_lag_handler(struct ice_lag *lag) } } +/** + * ice_lag_check_nvm_support - Check for NVM support for LAG + * @pf: PF struct + */ +static void ice_lag_check_nvm_support(struct ice_pf *pf) +{ + struct ice_hw_dev_caps *caps; + + caps = &pf->hw.dev_caps; + if (caps->common_cap.roce_lag) + ice_set_feature_support(pf, ICE_F_LAG); + else + ice_clear_feature_support(pf, ICE_F_LAG); +} + /** * ice_init_lag - initialize support for LAG * @pf: PF struct @@ -368,6 +1667,9 @@ int ice_init_lag(struct ice_pf *pf) struct ice_lag *lag; struct ice_vsi *vsi; int err; + u8 i; + + ice_lag_check_nvm_support(pf); pf->lag = kzalloc(sizeof(*lag), GFP_KERNEL); if (!pf->lag) @@ -385,9 +1687,14 @@ int ice_init_lag(struct ice_pf *pf) lag->netdev = vsi->netdev; lag->role = ICE_LAG_NONE; lag->bonded = false; - lag->peer_netdev = NULL; + lag->bond_id = -1; + lag->bond_mode = -1; lag->upper_netdev = NULL; lag->notif_block.notifier_call = NULL; + lag->netdev_head = NULL; + ice_for_each_traffic_class(i) + memset(&pf->lag->rdma_qset[i], 0, + sizeof(struct iidc_rdma_qset_params)); err = ice_register_lag_handler(lag); if (err) { @@ -425,11 +1732,7 @@ void ice_deinit_lag(struct ice_pf *pf) if (lag->pf) ice_unregister_lag_handler(lag); - if (lag->upper_netdev) - dev_put(lag->upper_netdev); - - if (lag->peer_netdev) - dev_put(lag->peer_netdev); + flush_workqueue(ice_lag_wq); kfree(lag); diff --git a/drivers/thirdparty/ice/ice_lag.h b/drivers/thirdparty/ice/ice_lag.h index 1603c0f973c9..f5807b42abeb 100644 --- a/drivers/thirdparty/ice/ice_lag.h +++ b/drivers/thirdparty/ice/ice_lag.h @@ -6,6 +6,10 @@ #ifdef HAVE_NETDEV_UPPER_INFO #include +#include "ice.h" + +#define ICE_LAG_INVALID_PORT 0xFF +#define ICE_LAG_SINGLE_FILTER_SIZE 0xC /* LAG roles for netdev */ enum ice_lag_role { @@ -17,24 +21,101 @@ enum ice_lag_role { struct ice_pf; +struct ice_lag_netdev_list { + struct list_head node; + struct net_device *netdev; +}; + /* LAG info struct */ struct ice_lag { struct ice_pf *pf; /* backlink to PF struct */ + struct iidc_rdma_qset_params rdma_qset[IEEE_8021QAZ_MAX_TCS]; + struct ice_vsi *rdma_vsi; struct net_device *netdev; /* this PF's netdev */ - struct net_device *peer_netdev; struct net_device *upper_netdev; /* upper bonding netdev */ + struct list_head *netdev_head; struct notifier_block notif_block; + int bond_id; /* identify which bond we are in */ + s32 bond_mode; u8 bonded:1; /* currently bonded */ - u8 master:1; /* this is a master */ + u8 primary:1; /* this is primary */ u8 handler:1; /* did we register a rx_netdev_handler */ /* each thing blocking bonding will increment this value by one. * If this value is zero, then bonding is allowed. */ u16 dis_lag; u8 role; + struct ice_rule_query_data fltr; + u16 action_idx; +}; + +/* LAG workqueue struct */ +struct ice_lag_work { + struct work_struct lag_task; + struct ice_lag_netdev_list netdev_list; + struct ice_lag *lag; + unsigned long event; + struct net_device *event_netdev; + union { + struct netdev_notifier_changeupper_info changeupper_info; + struct netdev_notifier_bonding_info bonding_info; + } info; }; int ice_init_lag(struct ice_pf *pf); +int ice_lag_move_node_sync(struct ice_hw *old_hw, struct ice_hw *new_hw, + struct ice_vsi *new_vsi, + struct iidc_rdma_qset_params *qset); void ice_deinit_lag(struct ice_pf *pf); +struct ice_lag *ice_lag_find_primary(struct ice_lag *lag); +rx_handler_result_t ice_lag_nop_handler(struct sk_buff **pskb); + +/** + * ice_disable_lag - increment LAG disable count + * @lag: LAG struct + */ +static inline void ice_disable_lag(struct ice_lag *lag) +{ + /* If LAG this PF is not already disabled, disable it */ + rtnl_lock(); + if (!netdev_is_rx_handler_busy(lag->netdev)) { + if (!netdev_rx_handler_register(lag->netdev, + ice_lag_nop_handler, + NULL)) + lag->handler = true; + } + rtnl_unlock(); + lag->dis_lag++; +} + +/** + * ice_enable_lag - decrement disable count for a PF + * @lag: LAG struct + * + * Decrement the disable counter for a port, and if that count reaches + * zero, then remove the no-op Rx handler from that netdev + */ +static inline void ice_enable_lag(struct ice_lag *lag) +{ + if (lag->dis_lag) + lag->dis_lag--; + if (!lag->dis_lag && lag->handler) { + rtnl_lock(); + netdev_rx_handler_unregister(lag->netdev); + rtnl_unlock(); + lag->handler = false; + } +} + +/** + * ice_is_lag_dis - is LAG disabled + * @lag: LAG struct + * + * Return true if bonding is disabled + */ +static inline bool ice_is_lag_dis(struct ice_lag *lag) +{ + return !!(lag->dis_lag); +} #endif /* HAVE_NETDEV_UPPER_INFO */ #endif /* _ICE_LAG_H_ */ diff --git a/drivers/thirdparty/ice/ice_lan_tx_rx.h b/drivers/thirdparty/ice/ice_lan_tx_rx.h index 72adcf278824..d69b12a9a28a 100644 --- a/drivers/thirdparty/ice/ice_lan_tx_rx.h +++ b/drivers/thirdparty/ice/ice_lan_tx_rx.h @@ -218,7 +218,6 @@ struct ice_fltr_desc { (0x1ULL << ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S) #define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES 0x1ULL - enum ice_rx_desc_status_bits { /* Note: These are predefined bit offsets */ ICE_RX_DESC_STATUS_DD_S = 0, @@ -249,7 +248,6 @@ enum ice_rx_desc_status_bits { #define ICE_RXD_QW1_STATUS_TSYNVALID_S ICE_RX_DESC_STATUS_TSYNVALID_S #define ICE_RXD_QW1_STATUS_TSYNVALID_M BIT_ULL(ICE_RXD_QW1_STATUS_TSYNVALID_S) - enum ice_rx_desc_fltstat_values { ICE_RX_DESC_FLTSTAT_NO_DATA = 0, ICE_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */ @@ -257,7 +255,6 @@ enum ice_rx_desc_fltstat_values { ICE_RX_DESC_FLTSTAT_RSS_HASH = 3, }; - #define ICE_RXD_QW1_ERROR_S 19 #define ICE_RXD_QW1_ERROR_M (0xFFUL << ICE_RXD_QW1_ERROR_S) @@ -356,7 +353,6 @@ enum ice_rx_ptype_payload_layer { ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, }; - #define ICE_RXD_QW1_LEN_PBUF_S 38 #define ICE_RXD_QW1_LEN_PBUF_M (0x3FFFULL << ICE_RXD_QW1_LEN_PBUF_S) @@ -366,7 +362,6 @@ enum ice_rx_ptype_payload_layer { #define ICE_RXD_QW1_LEN_SPH_S 63 #define ICE_RXD_QW1_LEN_SPH_M BIT_ULL(ICE_RXD_QW1_LEN_SPH_S) - enum ice_rx_desc_ext_status_bits { /* Note: These are predefined bit offsets */ ICE_RX_DESC_EXT_STATUS_L2TAG2P_S = 0, @@ -377,7 +372,6 @@ enum ice_rx_desc_ext_status_bits { ICE_RX_DESC_EXT_STATUS_PELONGB_S = 11, }; - enum ice_rx_desc_pe_status_bits { /* Note: These are predefined bit offsets */ ICE_RX_DESC_PE_STATUS_QPID_S = 0, /* 18 BITS */ @@ -398,7 +392,6 @@ enum ice_rx_desc_pe_status_bits { #define ICE_RX_PROG_STATUS_DESC_QW1_PROGID_M \ (0x7UL << ICE_RX_PROG_STATUS_DESC_QW1_PROGID_S) - #define ICE_RX_PROG_STATUS_DESC_QW1_ERROR_S 19 #define ICE_RX_PROG_STATUS_DESC_QW1_ERROR_M \ (0x3FUL << ICE_RX_PROG_STATUS_DESC_QW1_ERROR_S) @@ -642,7 +635,6 @@ struct ice_32b_rx_flex_desc_nic_2 { } flex_ts; }; - /* Receive Flex Descriptor profile IDs: There are a total * of 64 profiles where profile IDs 0/1 are for legacy; and * profiles 2-63 are flex profiles that can be programmed @@ -820,6 +812,14 @@ enum ice_rx_flex_desc_exstat_bits { ICE_RX_FLEX_DESC_EXSTAT_OVERSIZE_S = 3, }; +/* + * For ice_32b_rx_flex_desc.ts_low: + * [0]: Timestamp-low validity bit + * [1:7]: Timestamp-low value + */ +#define ICE_RX_FLEX_DESC_TS_L_VALID_S 0x01 +#define ICE_RX_FLEX_DESC_TS_L_VALID_M ICE_RX_FLEX_DESC_TS_L_VALID_S +#define ICE_RX_FLEX_DESC_TS_L_M 0xFE #define ICE_RXQ_CTX_SIZE_DWORDS 8 #define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32)) @@ -967,6 +967,11 @@ struct ice_tx_ctx_desc { __le64 qw1; }; +#define ICE_TX_GCS_DESC_START 0 /* 7 BITS */ +#define ICE_TX_GCS_DESC_OFFSET 7 /* 4 BITS */ +#define ICE_TX_GCS_DESC_TYPE 11 /* 2 BITS */ +#define ICE_TX_GCS_DESC_ENA 13 /* 1 BIT */ + #define ICE_TXD_CTX_QW1_DTYPE_S 0 #define ICE_TXD_CTX_QW1_DTYPE_M (0xFUL << ICE_TXD_CTX_QW1_DTYPE_S) @@ -1036,7 +1041,6 @@ enum ice_tx_ctx_desc_eipt_offload { #define ICE_TXD_CTX_QW0_L4T_CS_S 23 #define ICE_TXD_CTX_QW0_L4T_CS_M BIT_ULL(ICE_TXD_CTX_QW0_L4T_CS_S) - #define ICE_LAN_TXQ_MAX_QGRPS 127 #define ICE_LAN_TXQ_MAX_QDIS 1023 @@ -1090,7 +1094,6 @@ struct ice_tx_cmpltnq { u8 cmpl_type; } __packed; - /* LAN Tx Completion Queue Context */ struct ice_tx_cmpltnq_ctx { u64 base; @@ -1118,7 +1121,6 @@ struct ice_tx_drbell_fmt { u32 db; }; - /* LAN Tx Doorbell Queue Context */ struct ice_tx_drbell_q_ctx { u64 base; @@ -1396,17 +1398,4 @@ static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype) return ice_ptype_lkup[ptype]; } -#define ICE_LINK_SPEED_UNKNOWN 0 -#define ICE_LINK_SPEED_10MBPS 10 -#define ICE_LINK_SPEED_100MBPS 100 -#define ICE_LINK_SPEED_1000MBPS 1000 -#define ICE_LINK_SPEED_2500MBPS 2500 -#define ICE_LINK_SPEED_5000MBPS 5000 -#define ICE_LINK_SPEED_10000MBPS 10000 -#define ICE_LINK_SPEED_20000MBPS 20000 -#define ICE_LINK_SPEED_25000MBPS 25000 -#define ICE_LINK_SPEED_40000MBPS 40000 -#define ICE_LINK_SPEED_50000MBPS 50000 -#define ICE_LINK_SPEED_100000MBPS 100000 - #endif /* _ICE_LAN_TX_RX_H_ */ diff --git a/drivers/thirdparty/ice/ice_lib.c b/drivers/thirdparty/ice/ice_lib.c index 820dcab6d774..5d99a4cbbfe9 100644 --- a/drivers/thirdparty/ice/ice_lib.c +++ b/drivers/thirdparty/ice/ice_lib.c @@ -8,6 +8,7 @@ #include "ice_dcb_lib.h" #include "ice_devlink.h" #include "ice_vsi_vlan_ops.h" +#include "ice_irq.h" /** * ice_vsi_type_str - maps VSI type enum to string equivalents @@ -37,6 +38,27 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type) } } +/** + * ice_vsi_requires_vf - Does this VSI type always require a VF? + * @vsi_type: the VSI type + * + * Returns true if the VSI type *must* have a VF pointer. Returns false + * otherwise. In particular, VSI types which may *optionally* have a VF + * pointer return false. + * + * Used to WARN in cases where we always expect a VF pointer to be non-NULL. + */ +static int ice_vsi_requires_vf(enum ice_vsi_type vsi_type) +{ + switch (vsi_type) { + case ICE_VSI_ADI: + case ICE_VSI_VF: + return true; + default: + return false; + } +} + /** * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings * @vsi: the VSI being configured @@ -75,6 +97,8 @@ static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena) */ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) { + int alloc_txq = vsi->alloc_txq; + int alloc_rxq = vsi->alloc_rxq; struct ice_pf *pf = vsi->back; struct device *dev; @@ -93,20 +117,25 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) if (!vsi->rx_rings) goto err_rings; + if (vsi->type == ICE_VSI_PF && !ice_is_safe_mode(pf)) { + alloc_txq = pf->max_adq_qps; + alloc_rxq = pf->max_adq_qps; + } + #ifdef HAVE_XDP_SUPPORT /* XDP will have vsi->alloc_txq Tx queues as well, so double the size */ - vsi->txq_map = devm_kcalloc(dev, (2 * vsi->alloc_txq), + vsi->txq_map = devm_kcalloc(dev, (2 * alloc_txq), sizeof(*vsi->txq_map), GFP_KERNEL); #else - vsi->txq_map = devm_kcalloc(dev, vsi->alloc_txq, - sizeof(*vsi->txq_map), GFP_KERNEL); + vsi->txq_map = devm_kcalloc(dev, alloc_txq, sizeof(*vsi->txq_map), + GFP_KERNEL); #endif /* HAVE_XDP_SUPPORT */ if (!vsi->txq_map) goto err_txq_map; - vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq, - sizeof(*vsi->rxq_map), GFP_KERNEL); + vsi->rxq_map = devm_kcalloc(dev, alloc_rxq, sizeof(*vsi->rxq_map), + GFP_KERNEL); if (!vsi->rxq_map) goto err_rxq_map; @@ -120,8 +149,22 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) if (!vsi->q_vectors) goto err_vectors; +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_AF_XDP_ZC_SUPPORT + vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL); + if (!vsi->af_xdp_zc_qps) + goto err_zc_qps; +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ +#endif /* HAVE_XDP_SUPPORT */ + return 0; +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_AF_XDP_ZC_SUPPORT +err_zc_qps: + devm_kfree(dev, vsi->q_vectors); +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ +#endif /* HAVE_XDP_SUPPORT */ err_vectors: devm_kfree(dev, vsi->rxq_map); err_rxq_map: @@ -142,6 +185,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi) switch (vsi->type) { case ICE_VSI_PF: case ICE_VSI_OFFLOAD_MACVLAN: + case ICE_VSI_ADI: case ICE_VSI_VMDQ2: case ICE_VSI_SWITCHDEV_CTRL: case ICE_VSI_CTRL: @@ -165,25 +209,24 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi) /** * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI * @vsi: the VSI being configured - * @vf_id: ID of the VF being configured + * @vf: the VF associated with this VSI, if any * * Return 0 on success and a negative value on error */ -static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) +static void ice_vsi_set_num_qs(struct ice_vsi *vsi, struct ice_vf *vf) { + enum ice_vsi_type vsi_type = vsi->type; struct ice_pf *pf = vsi->back; - struct ice_vf *vf = NULL; - if (vsi->type == ICE_VSI_VF) - vsi->vf_id = vf_id; - else - vsi->vf_id = ICE_INVAL_VFID; - switch (vsi->type) { + if (WARN_ON(!vf && ice_vsi_requires_vf(vsi_type))) + return; + + switch (vsi_type) { case ICE_VSI_PF: /* default to 1 Tx queue per MSI-X to not hurt our performance */ vsi->alloc_txq = min3(pf->num_lan_msix, ice_get_avail_txq_count(pf), - (u16)num_online_cpus()); + pf->max_qps); if (vsi->req_txq) { vsi->alloc_txq = vsi->req_txq; vsi->num_txq = vsi->req_txq; @@ -198,7 +241,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) /* default to 1 Rx queue per MSI-X to not hurt our performance */ vsi->alloc_rxq = min3(pf->num_lan_msix, ice_get_avail_rxq_count(pf), - (u16)num_online_cpus()); + pf->max_qps); if (vsi->req_rxq) { vsi->alloc_rxq = vsi->req_rxq; vsi->num_rxq = vsi->req_rxq; @@ -216,17 +259,25 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) vsi->alloc_rxq = ICE_DFLT_RXQ_VMDQ_VSI; vsi->num_q_vectors = ICE_DFLT_VEC_VMDQ_VSI; break; + case ICE_VSI_ADI: + vsi->alloc_txq = pf->vfs.num_qps_per; + vsi->alloc_rxq = pf->vfs.num_qps_per; + + /* For SIOV VFs, we reserve vectors required for Qs and 1 extra + * for OICR. + */ + vsi->num_q_vectors = pf->vfs.num_msix_per; + break; case ICE_VSI_SWITCHDEV_CTRL: - /* The number of queues for ctrl vsi is equal to number of VFs. + /* The number of queues for ctrl VSI is equal to number of VFs. * Each ring is associated to the corresponding VF_PR netdev. */ - vsi->alloc_txq = pf->num_alloc_vfs; - vsi->alloc_rxq = pf->num_alloc_vfs; + vsi->alloc_txq = ice_get_num_vfs(pf); + vsi->alloc_rxq = vsi->alloc_txq; vsi->num_q_vectors = 1; break; case ICE_VSI_VF: - vf = &pf->vf[vsi->vf_id]; - /* pf->num_msix_per_vf includes (VF miscellaneous vector + + /* pf->vfs.num_msix_per includes (VF miscellaneous vector + * data queue interrupts). Since vsi->num_q_vectors is number * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the * original vector count @@ -243,7 +294,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) vsi->alloc_txq = vf->num_vf_qs; vsi->alloc_rxq = vf->num_vf_qs; - vsi->num_q_vectors = pf->num_msix_per_vf - + vsi->num_q_vectors = pf->vfs.num_msix_per - ICE_NONQ_VECS_VF; } break; @@ -261,7 +312,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) vsi->alloc_rxq = 1; break; default: - dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi->type); + dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi_type); break; } @@ -305,22 +356,22 @@ void ice_vsi_delete(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; struct ice_vsi_ctx *ctxt; - enum ice_status status; + int status; ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) return; if (vsi->type == ICE_VSI_VF) - ctxt->vf_num = vsi->vf_id; + ctxt->vf_num = vsi->vf->vf_id; ctxt->vsi_num = vsi->vsi_num; memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); if (status) - dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %s\n", - vsi->vsi_num, ice_stat_str(status)); + dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n", + vsi->vsi_num, status); kfree(ctxt); } @@ -336,6 +387,14 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi) dev = ice_pf_to_dev(pf); +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_AF_XDP_ZC_SUPPORT + if (vsi->af_xdp_zc_qps) { + bitmap_free(vsi->af_xdp_zc_qps); + vsi->af_xdp_zc_qps = NULL; + } +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ +#endif /* HAVE_XDP_SUPPORT */ /* free the ring and vector containers */ if (vsi->q_vectors) { devm_kfree(dev, vsi->q_vectors); @@ -376,7 +435,7 @@ static void ice_vsi_free_rss_global_lut_memory(struct ice_vsi *vsi) */ static void ice_vsi_free_rss_global_lut(struct ice_vsi *vsi) { - enum ice_status status; + int status; if (!vsi->global_lut_id) return; @@ -401,8 +460,8 @@ static void ice_vsi_free_rss_global_lut(struct ice_vsi *vsi) */ static void ice_vsi_alloc_rss_global_lut(struct ice_vsi *vsi) { - enum ice_status status; struct device *dev; + int status; if (vsi->type != ICE_VSI_VF) return; @@ -459,8 +518,7 @@ int ice_vsi_clear(struct ice_vsi *vsi) pf->vsi[vsi->idx] = NULL; if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL) pf->next_vsi = vsi->idx; - if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL && - vsi->vf_id != ICE_INVAL_VFID) + if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL && vsi->vf) pf->next_vsi = vsi->idx; ice_vsi_free_arrays(vsi); @@ -517,13 +575,16 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d { struct ice_q_vector *q_vector = (struct ice_q_vector *)data; struct ice_pf *pf = q_vector->vsi->back; - int i; + struct ice_vf *vf; + unsigned int bkt; if (!q_vector->tx.ring && !q_vector->rx.ring) return IRQ_HANDLED; - ice_for_each_vf(pf, i) - napi_schedule(&pf->vf[i].repr->q_vector->napi); + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) + napi_schedule(&vf->repr->q_vector->napi); + rcu_read_unlock(); return IRQ_HANDLED; } @@ -534,17 +595,24 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d * @vsi_type: type of VSI * @ch: ptr to channel * @tc: traffic class number for VF ADQ - * @vf_id: ID of the VF being configured + * @vf: VF for ICE_VSI_VF and ICE_VSI_CTRL + * + * The VF pointer is used for ICE_VSI_VF and ICE_VSI_CTRL. For ICE_VSI_CTRL, + * it may be NULL in the case there is no association with a VF. For + * ICE_VSI_VF the VF pointer *must not* be NULL. * * returns a pointer to a VSI on success, NULL on failure. */ static struct ice_vsi * ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, - struct ice_channel *ch, u16 vf_id, u8 tc) + struct ice_channel *ch, struct ice_vf *vf, u8 tc) { struct device *dev = ice_pf_to_dev(pf); struct ice_vsi *vsi = NULL; + if (WARN_ON(!vf && ice_vsi_requires_vf(vsi_type))) + return NULL; + /* Need to protect the allocation of the VSIs at the PF level */ mutex_lock(&pf->sw_mutex); @@ -563,17 +631,19 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, vsi->type = vsi_type; vsi->back = pf; + /* For VSIs which don't have a connected VF, this will be NULL */ + vsi->vf = vf; if (vsi_type == ICE_VSI_VF) vsi->vf_adq_tc = tc; set_bit(ICE_VSI_DOWN, vsi->state); - if (vsi_type == ICE_VSI_VF) - ice_vsi_set_num_qs(vsi, vf_id); - else if (vsi_type != ICE_VSI_CHNL) - ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); + /* Don't set queues for ICE_VSI_CHNL */ + if (vsi_type != ICE_VSI_CHNL) + ice_vsi_set_num_qs(vsi, vf); switch (vsi->type) { case ICE_VSI_OFFLOAD_MACVLAN: + case ICE_VSI_ADI: case ICE_VSI_VMDQ2: case ICE_VSI_PF: if (ice_vsi_alloc_arrays(vsi)) @@ -616,7 +686,7 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, goto unlock_pf; } - if (vsi->type == ICE_VSI_CTRL && vf_id == ICE_INVAL_VFID) { + if (vsi->type == ICE_VSI_CTRL && !vf) { /* Use the last VSI slot as the index for PF control VSI */ vsi->idx = pf->num_alloc_vsi - 1; pf->ctrl_vsi_idx = vsi->idx; @@ -631,8 +701,8 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, pf->next_vsi); } - if (vsi->type == ICE_VSI_CTRL && vf_id != ICE_INVAL_VFID) - pf->vf[vf_id].ctrl_vsi_idx = vsi->idx; + if (vsi->type == ICE_VSI_CTRL && vf) + vf->ctrl_vsi_idx = vsi->idx; goto unlock_pf; err_rings: @@ -679,19 +749,18 @@ static int ice_alloc_fd_res(struct ice_vsi *vsi) return -EPERM; /* PF main VSI gets only 64 FD resources from guaranteed pool - * when ADQ is configured. This is current policy, change as needed + * when ADQ is configured. */ #define ICE_PF_VSI_GFLTR 64 - /* determines FD filter resources per VSI from shared(best effort) and + /* determine FD filter resources per VSI from shared(best effort) and * dedicated pool */ if (vsi->type == ICE_VSI_PF) { vsi->num_gfltr = g_val; #ifdef NETIF_F_HW_TC - /* if MQPRIO ic configured, main VSI doesn't get all - * FD resources from guaranteed pool. Current policy is, - * PF VSI gets 64 FD resources + /* if MQPRIO is configured, main VSI doesn't get all FD + * resources from guaranteed pool. PF VSI gets 64 FD resources */ if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { if (g_val < ICE_PF_VSI_GFLTR) @@ -770,6 +839,11 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi) }; int ret; + if (vsi->type == ICE_VSI_PF && !ice_is_safe_mode(pf)) { + rx_qs_cfg.q_count = pf->max_adq_qps; + tx_qs_cfg.q_count = pf->max_adq_qps; + } + if (vsi->type == ICE_VSI_CHNL) return 0; @@ -793,16 +867,24 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi) void ice_vsi_put_qs(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; - int i; + u16 alloc_txq, alloc_rxq; + u16 i; + + alloc_txq = vsi->alloc_txq; + alloc_rxq = vsi->alloc_rxq; + if (vsi->type == ICE_VSI_PF && !ice_is_safe_mode(pf)) { + alloc_txq = pf->max_adq_qps; + alloc_rxq = pf->max_adq_qps; + } mutex_lock(&pf->avail_q_mutex); - for (i = 0; i < vsi->alloc_txq; i++) { + for (i = 0; i < alloc_txq; i++) { clear_bit(vsi->txq_map[i], pf->avail_txqs); vsi->txq_map[i] = ICE_INVAL_Q_INDEX; } - for (i = 0; i < vsi->alloc_rxq; i++) { + for (i = 0; i < alloc_rxq; i++) { clear_bit(vsi->rxq_map[i], pf->avail_rxqs); vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; } @@ -822,14 +904,14 @@ bool ice_is_safe_mode(struct ice_pf *pf) } /** - * ice_is_peer_ena + * ice_is_aux_ena * @pf: pointer to the PF struct * * returns true if peer devices/drivers are supported, false otherwise */ -bool ice_is_peer_ena(struct ice_pf *pf) +bool ice_is_aux_ena(struct ice_pf *pf) { - return test_bit(ICE_FLAG_PEER_ENA, pf->flags); + return test_bit(ICE_FLAG_AUX_ENA, pf->flags); } /** @@ -842,15 +924,15 @@ bool ice_is_peer_ena(struct ice_pf *pf) static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; - enum ice_status status; + int status; if (ice_is_safe_mode(pf)) return; status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx); if (status) - dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %s\n", - vsi->vsi_num, ice_stat_str(status)); + dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n", + vsi->vsi_num, status); } /** @@ -902,11 +984,12 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi) vsi->rss_size = min_t(u16, vsi->num_rxq, BIT(cap->rss_table_entry_width)); else - vsi->rss_size = min_t(u16, num_online_cpus(), + vsi->rss_size = min_t(u16, pf->max_qps, BIT(cap->rss_table_entry_width)); vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; break; case ICE_VSI_OFFLOAD_MACVLAN: + case ICE_VSI_ADI: case ICE_VSI_VMDQ2: case ICE_VSI_SWITCHDEV_CTRL: vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; @@ -974,6 +1057,9 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt) (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 << ICE_AQ_VSI_OUTER_TAG_TYPE_S) & ICE_AQ_VSI_OUTER_TAG_TYPE_M; + ctxt->info.outer_vlan_flags |= + ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING << + ICE_AQ_VSI_OUTER_VLAN_EMODE_S; } /* Have 1:1 UP mapping for both ingress/egress tables */ table |= ICE_UP_TABLE_TRANSLATE(0, 0); @@ -996,7 +1082,7 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt) * @vsi: the VSI being configured * @ctxt: VSI context structure */ -static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) +static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) { u16 offset = 0, qmap = 0, tx_count = 0, pow = 0; u16 num_txq_per_tc, num_rxq_per_tc; @@ -1011,12 +1097,21 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) vsi->tc_cfg.ena_tc = 1; } - num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC); - if (!num_rxq_per_tc) - num_rxq_per_tc = 1; - num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc; - if (!num_txq_per_tc) - num_txq_per_tc = 1; + /* For VF VSI, all queues should be mapped to TC0 and other TCs are + * assigned to queue 0 by default + */ + if (vsi->type == ICE_VSI_VF) { + num_rxq_per_tc = qcount_rx; + num_txq_per_tc = qcount_tx; + } else { + num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, + ICE_MAX_RXQS_PER_TC); + if (!num_rxq_per_tc) + num_rxq_per_tc = 1; + num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc; + if (!num_txq_per_tc) + num_txq_per_tc = 1; + } /* find the (rounded up) power-of-2 of qcount */ pow = (u16)order_base_2(num_rxq_per_tc); @@ -1033,8 +1128,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) * Setup number and offset of Rx queues for all TCs for the VSI */ ice_for_each_traffic_class(i) { - if (!(vsi->tc_cfg.ena_tc & BIT(i))) { - /* TC is not enabled */ + if (!(vsi->tc_cfg.ena_tc & BIT(i)) || + (i != 0 && vsi->type == ICE_VSI_VF)) { vsi->tc_cfg.tc_info[i].qoffset = 0; vsi->tc_cfg.tc_info[i].qcount_rx = 1; vsi->tc_cfg.tc_info[i].qcount_tx = 1; @@ -1069,7 +1164,18 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) else vsi->num_rxq = num_rxq_per_tc; + if (vsi->num_rxq > vsi->alloc_rxq) { + dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", + vsi->num_rxq, vsi->alloc_rxq); + return -EINVAL; + } + vsi->num_txq = tx_count; + if (vsi->num_txq > vsi->alloc_txq) { + dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", + vsi->num_txq, vsi->alloc_txq); + return -EINVAL; + } if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); @@ -1087,6 +1193,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) */ ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); + + return 0; } /** @@ -1228,6 +1336,8 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) if (!ctxt) return -ENOMEM; + ice_set_dflt_vsi_ctx(hw, ctxt); + switch (vsi->type) { case ICE_VSI_CTRL: case ICE_VSI_LB: @@ -1238,12 +1348,13 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) case ICE_VSI_OFFLOAD_MACVLAN: case ICE_VSI_VMDQ2: case ICE_VSI_SWITCHDEV_CTRL: + case ICE_VSI_ADI: ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2; break; case ICE_VSI_VF: ctxt->flags = ICE_AQ_VSI_TYPE_VF; /* VF number here is the absolute VF number (0-255) */ - ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id; + ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id; break; default: ret = -ENODEV; @@ -1265,9 +1376,9 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; } - ice_set_dflt_vsi_ctx(hw, ctxt); if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) ice_set_fd_vsi_ctx(ctxt, vsi); + /* if the switch is in VEB mode, allow VSI loopback */ if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) ctxt->info.sw_flags |= (ICE_AQ_VSI_SW_FLAG_ALLOW_LB | @@ -1290,7 +1401,10 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) if (vsi->type == ICE_VSI_CHNL) { ice_chnl_vsi_setup_q_map(vsi, ctxt); } else { - ice_vsi_setup_q_map(vsi, ctxt); + ret = ice_vsi_setup_q_map(vsi, ctxt); + if (ret) + goto out; + if (!init_vsi) /* means VSI being updated */ /* must to indicate which section of VSI context are * being modified @@ -1445,9 +1559,40 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) return ice_search_res(res, needed, id); } +/** + * ice_get_vf_ctrl_res - Get VF control VSI resource + * @pf: pointer to the PF structure + * @vsi: the VSI to allocate a resource for + * + * Look up whether another VF has already allocated the control VSI resource. + * If so, re-use this resource so that we share it among all VFs. + * + * Otherwise, allocate the resource and return it. + */ +static int ice_get_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi) +{ + struct ice_vf *vf; + unsigned int bkt; + int base; + + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) { + if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) { + base = pf->vsi[vf->ctrl_vsi_idx]->base_vector; + rcu_read_unlock(); + return base; + } + } + rcu_read_unlock(); + + return ice_get_res(pf, pf->irq_tracker, vsi->num_q_vectors, + ICE_RES_VF_CTRL_VEC_ID); +} + /** * ice_vsi_setup_vector_base - Set up the base vector for the given VSI * @vsi: ptr to the VSI + * @tc: traffic class. Used for VF ADQ * * This should only be called after ice_vsi_alloc() which allocates the * corresponding SW VSI structure and initializes num_queue_pairs for the @@ -1455,7 +1600,7 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) * * Returns 0 on success or negative on failure */ -static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) +static int ice_vsi_setup_vector_base(struct ice_vsi *vsi, u8 __maybe_unused tc) { struct ice_pf *pf = vsi->back; struct device *dev; @@ -1464,8 +1609,17 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) dev = ice_pf_to_dev(pf); /* SRIOV doesn't grab irq_tracker entries for each VSI */ - if (vsi->type == ICE_VSI_VF) + if (vsi->type == ICE_VSI_VF) { + /* Assign the base_vector to allow determining the vector + * index. Note that VF VSIs MSI-X index is relative to the VF + * space and does not get tracked individually in the software + * IRQ tracker. + */ + vsi->base_vector = ICE_NONQ_VECS_VF; + /* Adjust base_vector for VSIs corresponding to non-zero TC */ + vsi->base_vector += vsi->vf->ch[tc].offset; return 0; + } if (vsi->type == ICE_VSI_CHNL) return 0; @@ -1475,22 +1629,14 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) return -EEXIST; } - num_q_vectors = vsi->num_q_vectors; + if (vsi->type == ICE_VSI_PF) + num_q_vectors = pf->max_adq_qps; + else + num_q_vectors = vsi->num_q_vectors; + /* reserve slots from OS requested IRQs */ - if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) { - int i; - - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; - - if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI) { - base = pf->vsi[vf->ctrl_vsi_idx]->base_vector; - break; - } - } - if (i == pf->num_alloc_vfs) - base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, - ICE_RES_VF_CTRL_VEC_ID); + if (vsi->type == ICE_VSI_CTRL && vsi->vf) { + base = ice_get_vf_ctrl_res(pf, vsi); } else { base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, vsi->idx); @@ -1571,8 +1717,10 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->q_index = i; ring->reg_idx = vsi->txq_map[i]; ring->vsi = vsi; + ring->tx_tstamps = &pf->ptp.port.tx; ring->dev = dev; ring->count = vsi->num_tx_desc; + ring->txq_teid = ICE_INVAL_TEID; if (dvm_ena) ring->flags |= ICE_TX_FLAGS_VLAN_TAG_LOC_L2TAG2; else @@ -1595,6 +1743,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->netdev = vsi->netdev; ring->dev = dev; ring->count = vsi->num_rx_desc; + ring->cached_phctime = pf->ptp.cached_phc_time; WRITE_ONCE(vsi->rx_rings[i], ring); } @@ -1672,6 +1821,22 @@ void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) kfree(lut); } +/** + * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI + * @vsi: VSI to be configured + * @disable: set to true to have FCS / CRC in the frame data + */ +void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable) +{ + int i; + + ice_for_each_rxq(vsi, i) + if (disable) + vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS; + else + vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS; +} + /** * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI * @vsi: VSI to be configured @@ -1687,7 +1852,7 @@ int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) #ifdef NETIF_F_HW_TC if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size && (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))) { - vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size); + vsi->rss_size = vsi->ch_rss_size; } else { vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq); @@ -1717,7 +1882,7 @@ int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size); if (err) { - dev_err(dev, "set_rss_lut failed, error %d\n", err); + ice_dev_err_errno(dev, err, "set_rss_lut failed"); goto ice_vsi_cfg_rss_exit; } @@ -1734,7 +1899,7 @@ int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) err = ice_set_rss_key(vsi, key); if (err) - dev_err(dev, "set_rss_key failed, error %d\n", err); + ice_dev_err_errno(dev, err, "set_rss_key failed"); kfree(key); ice_vsi_cfg_rss_exit: @@ -1742,6 +1907,60 @@ ice_vsi_cfg_rss_exit: return err; } +/** + * ice_get_valid_rss_size - return valid number of RSS queues + * @hw: pointer to the HW structure + * @new_size: requested RSS queues + */ +int ice_get_valid_rss_size(struct ice_hw *hw, int new_size) +{ + struct ice_hw_common_caps *caps = &hw->func_caps.common_cap; + + return min_t(int, new_size, BIT(caps->rss_table_entry_width)); +} + +/** + * ice_vsi_set_dflt_rss_lut - set default RSS LUT with requested RSS size + * @vsi: VSI to reconfigure RSS LUT on + * @req_rss_size: requested range of queue numbers for hashing + * + * Set the VSI's RSS parameters, configure the RSS LUT based on these. + */ +int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size) +{ + struct ice_pf *pf = vsi->back; + struct device *dev; + struct ice_hw *hw; + int err; + u8 *lut; + + dev = ice_pf_to_dev(pf); + hw = &pf->hw; + + if (!req_rss_size) + return -EINVAL; + + lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); + if (!lut) + return -ENOMEM; + + /* set RSS LUT parameters */ + if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) + vsi->rss_size = 1; + else + vsi->rss_size = ice_get_valid_rss_size(hw, req_rss_size); + + /* create/set RSS LUT */ + ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); + err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size); + if (err) + ice_dev_err_errno(dev, err, "Cannot set RSS lut, aq_err %s", + ice_aq_str(hw->adminq.sq_last_status)); + + kfree(lut); + return err; +} + /** * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows * @vsi: VSI to be configured @@ -1753,8 +1972,8 @@ ice_vsi_cfg_rss_exit: static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; + int status; dev = ice_pf_to_dev(pf); if (ice_is_safe_mode(pf)) { @@ -1765,11 +1984,10 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi) status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA); if (status) - dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %s\n", - vsi->vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n", + vsi->vsi_num, status); } - static const struct ice_rss_hash_cfg default_rss_cfgs[] = { /* configure RSS for IPv4 with input set IP src/dst */ {ICE_FLOW_SEG_HDR_IPV4, ICE_FLOW_HASH_IPV4, ICE_RSS_ANY_HEADERS, false}, @@ -1793,6 +2011,9 @@ static const struct ice_rss_hash_cfg default_rss_cfgs[] = { /* configure RSS for sctp6 with input set IPv6 src/dst */ {ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6, ICE_HASH_SCTP_IPV6, ICE_RSS_ANY_HEADERS, false}, + /* configure RSS for IPSEC ESP SPI with input set MAC_IPV4_SPI */ + { ICE_FLOW_SEG_HDR_ESP, + ICE_FLOW_HASH_ESP_SPI, ICE_RSS_ANY_HEADERS, false }, }; /** @@ -1811,7 +2032,6 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - enum ice_status status; struct device *dev; u32 i; @@ -1824,6 +2044,7 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) for (i = 0; i < ARRAY_SIZE(default_rss_cfgs); i++) { const struct ice_rss_hash_cfg *cfg = &default_rss_cfgs[i]; + int status; status = ice_add_rss_cfg(hw, vsi_handle, cfg); if (status) @@ -1909,6 +2130,17 @@ void ice_update_eth_stats(struct ice_vsi *vsi) */ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi) { +#ifdef CONFIG_ICE_USE_SKB + u16 max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; + + if (!vsi->netdev) + max_frame = ICE_RXBUF_2048; + else if (vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD > max_frame) + max_frame = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; + + vsi->max_frame = max_frame; + vsi->rx_buf_len = max_frame; +#else /* CONFIG_ICE_USE_SKB */ if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; vsi->rx_buf_len = ICE_RXBUF_2048; @@ -1926,6 +2158,7 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi) vsi->rx_buf_len = ICE_RXBUF_2048; #endif } +#endif /* CONFIG_ICE_USE_SKB */ } /** @@ -2017,13 +2250,14 @@ setup_rings: /** * ice_vsi_cfg_txqs - Configure the VSI for Tx * @vsi: the VSI being configured - * @rings: Tx ring array to be configured + * @tx_rings: Tx ring array to be configured + * @count: number of Tx ring array elements * * Return 0 on success and a negative value on error * Configure the Tx VSI for operation. */ static int -ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings) +ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **tx_rings, u16 count) { struct ice_aqc_add_tx_qgrp *qg_buf; u16 q_idx = 0; @@ -2035,8 +2269,8 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings) qg_buf->num_txqs = 1; - for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) { - err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf); + for (q_idx = 0; q_idx < count; q_idx++) { + err = ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf); if (err) goto err_cfg_txqs; } @@ -2055,7 +2289,7 @@ err_cfg_txqs: */ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi) { - return ice_vsi_cfg_txqs(vsi, vsi->tx_rings); + return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq); } #ifdef HAVE_XDP_SUPPORT @@ -2073,13 +2307,13 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi) int i; #endif /* HAVE_AF_XDP_ZC_SUPPORT */ - ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings); + ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq); #ifdef HAVE_AF_XDP_ZC_SUPPORT if (ret) return ret; for (i = 0; i < vsi->num_xdp_txq; i++) - vsi->xdp_rings[i]->xsk_pool = ice_xsk_umem(vsi->xdp_rings[i]); + vsi->xdp_rings[i]->xsk_pool = ice_xsk_pool(vsi->xdp_rings[i]); #endif /* HAVE_AF_XDP_ZC_SUPPORT */ return ret; @@ -2173,6 +2407,72 @@ void ice_set_q_vector_intrl(struct ice_q_vector *q_vector) } } +/** + * ice_vsi_get_q_vector_q_base - get vector's base numbers of Tx and Rx queues + * @vsi: related VSI + * @vector_id: index of the vector in VSI + * @txq: pointer to a return value of Tx base queue number + * @rxq: pointer to a return value of Rx base queue number + */ +void ice_vsi_get_q_vector_q_base(struct ice_vsi *vsi, u16 vector_id, u16 *txq, + u16 *rxq) +{ + int i; + + *txq = 0; + *rxq = 0; + + for (i = 0; i < vector_id; i++) { + struct ice_q_vector *q_vector = vsi->q_vectors[i]; + + *txq += q_vector->num_ring_tx; + *rxq += q_vector->num_ring_rx; + } +} + +/** + * ice_vsi_cfg_single_msix - MSIX interrupt config for a single vector in the HW + * @vsi: the VSI being configured + * @vector_id: index of the vector in VSI + * + * This configures an MSIX mode interrupt for the PF VSI, and should not be + * used for the VF VSI. + */ +static void ice_vsi_cfg_single_msix(struct ice_vsi *vsi, u16 vector_id) +{ + struct ice_q_vector *q_vector = vsi->q_vectors[vector_id]; + u16 reg_idx = q_vector->reg_idx; + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + u16 txq, rxq; + int q; + + ice_vsi_get_q_vector_q_base(vsi, vector_id, &txq, &rxq); + + ice_cfg_itr(hw, q_vector); + + /* Both Transmit Queue Interrupt Cause Control register + * and Receive Queue Interrupt Cause control register + * expects MSIX_INDX field to be the vector index + * within the function space and not the absolute + * vector index across PF or across device. + * For SR-IOV VF VSIs queue vector index always starts + * with 1 since first vector index(0) is used for OICR + * in VF space. Since VMDq and other PF VSIs are within + * the PF function space, use the vector index that is + * tracked for this PF. + */ + for (q = 0; q < q_vector->num_ring_tx; q++) { + ice_cfg_txq_interrupt(vsi, txq, reg_idx, q_vector->tx.itr_idx); + txq++; + } + + for (q = 0; q < q_vector->num_ring_rx; q++) { + ice_cfg_rxq_interrupt(vsi, rxq, reg_idx, q_vector->rx.itr_idx); + rxq++; + } +} + /** * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW * @vsi: the VSI being configured @@ -2182,40 +2482,10 @@ void ice_set_q_vector_intrl(struct ice_q_vector *q_vector) */ void ice_vsi_cfg_msix(struct ice_vsi *vsi) { - struct ice_pf *pf = vsi->back; - struct ice_hw *hw = &pf->hw; - u16 txq = 0, rxq = 0; - int i, q; + int i; - for (i = 0; i < vsi->num_q_vectors; i++) { - struct ice_q_vector *q_vector = vsi->q_vectors[i]; - u16 reg_idx = q_vector->reg_idx; - - ice_cfg_itr(hw, q_vector); - - /* Both Transmit Queue Interrupt Cause Control register - * and Receive Queue Interrupt Cause control register - * expects MSIX_INDX field to be the vector index - * within the function space and not the absolute - * vector index across PF or across device. - * For SR-IOV VF VSIs queue vector index always starts - * with 1 since first vector index(0) is used for OICR - * in VF space. Since VMDq and other PF VSIs are within - * the PF function space, use the vector index that is - * tracked for this PF. - */ - for (q = 0; q < q_vector->num_ring_tx; q++) { - ice_cfg_txq_interrupt(vsi, txq, reg_idx, - q_vector->tx.itr_idx); - txq++; - } - - for (q = 0; q < q_vector->num_ring_rx; q++) { - ice_cfg_rxq_interrupt(vsi, rxq, reg_idx, - q_vector->rx.itr_idx); - rxq++; - } - } + ice_for_each_q_vector(vsi, i) + ice_vsi_cfg_single_msix(vsi, i); } /** @@ -2246,17 +2516,18 @@ int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi) * @rst_src: reset source * @rel_vmvf_num: Relative ID of VF/VM * @rings: Tx ring array to be stopped + * @count: number of Tx ring array elements */ static int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, - u16 rel_vmvf_num, struct ice_ring **rings) + u16 rel_vmvf_num, struct ice_ring **rings, u16 count) { u16 q_idx; if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) return -EINVAL; - for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) { + for (q_idx = 0; q_idx < count; q_idx++) { struct ice_txq_meta txq_meta = { }; int status; @@ -2284,7 +2555,7 @@ int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, u16 rel_vmvf_num) { - return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings); + return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq); } #ifdef HAVE_XDP_SUPPORT @@ -2294,7 +2565,7 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, */ int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi) { - return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings); + return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq); } #endif /* HAVE_XDP_SUPPORT */ @@ -2313,8 +2584,6 @@ bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi) return (vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA); } - - static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) { if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) { @@ -2350,7 +2619,7 @@ static int ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi, } if (vsi->type == ICE_VSI_VF) { - struct ice_vf *vf = &vsi->back->vf[vsi->vf_id]; + struct ice_vf *vf = vsi->vf; q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector, tc); @@ -2381,11 +2650,11 @@ clear_reg_idx: */ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) { - enum ice_status (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag, - enum ice_sw_fwd_act_type act); + int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag, + enum ice_sw_fwd_act_type act); struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; + int status; dev = ice_pf_to_dev(pf); eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth; @@ -2404,9 +2673,9 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) } if (status) - dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %s\n", + dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n", create ? "adding" : "removing", tx ? "TX" : "RX", - vsi->vsi_num, ice_stat_str(status)); + vsi->vsi_num, status); } /** @@ -2426,7 +2695,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi) struct ice_port_info *port_info; struct ice_pf *pf = vsi->back; u32 agg_node_id_start = 0; - enum ice_status status; + int status; /* create (as needed) scheduler aggregator node and move VSI into * corresponding aggregator node @@ -2454,13 +2723,13 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi) * per aggregator node can be only 64. Following code handles * aggregator(s) for VF VSIs, either selects a agg_node which * was already created provided num_vsis < 64, otherwise - * select next available node, which woll be created + * select next available node, which will be created */ max_agg_nodes = ICE_MAX_VF_AGG_NODES; agg_node_id_start = ICE_VF_AGG_NODE_ID_START; agg_node_iter = &pf->vf_agg_node[0]; break; -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS case ICE_VSI_OFFLOAD_MACVLAN: /* there can be 'n' offloaded NACVLAN, hence select the desired * aggregator node for offloaded MACVLAN VSI @@ -2548,9 +2817,8 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi) * @pf: board private structure * @pi: pointer to the port_info instance * @vsi_type: VSI type - * @vf_id: defines VF ID to which this VSI connects. This field is meant to be - * used only for ICE_VSI_VF VSI type. For other VSI types, should - * fill-in ICE_INVAL_VFID as input. + * @vf: pointer to the VF associated with this VSI, if any. VSIs not + * connected to a VF should pass NULL. * @ch: ptr to channel * @tc: traffic class for VF ADQ * @@ -2561,22 +2829,18 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi) */ struct ice_vsi * ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, - enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch, - u8 tc) + enum ice_vsi_type vsi_type, struct ice_vf *vf, + struct ice_channel *ch, u8 tc) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct device *dev = ice_pf_to_dev(pf); - enum ice_status status; struct ice_vsi *vsi; int ret, i; if (vsi_type == ICE_VSI_CHNL) - vsi = ice_vsi_alloc(pf, vsi_type, ch, ICE_INVAL_VFID, 0); - else if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL) { - vsi = ice_vsi_alloc(pf, vsi_type, NULL, vf_id, tc); - } else { - vsi = ice_vsi_alloc(pf, vsi_type, NULL, ICE_INVAL_VFID, tc); - } + vsi = ice_vsi_alloc(pf, vsi_type, ch, vf, 0); + else + vsi = ice_vsi_alloc(pf, vsi_type, NULL, vf, tc); if (!vsi) { dev_err(dev, "could not allocate VSI\n"); @@ -2588,9 +2852,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, if (vsi->type == ICE_VSI_PF) vsi->ethtype = ETH_P_PAUSE; - if (vsi->type == ICE_VSI_VF || vsi->type == ICE_VSI_CTRL) - vsi->vf_id = vf_id; - ice_alloc_fd_res(vsi); if (vsi_type != ICE_VSI_CHNL) { @@ -2619,6 +2880,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, switch (vsi->type) { case ICE_VSI_CTRL: case ICE_VSI_OFFLOAD_MACVLAN: + case ICE_VSI_ADI: case ICE_VSI_VMDQ2: case ICE_VSI_SWITCHDEV_CTRL: case ICE_VSI_PF: @@ -2626,7 +2888,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, if (ret) goto unroll_vsi_init; - ret = ice_vsi_setup_vector_base(vsi); + ret = ice_vsi_setup_vector_base(vsi, 0); if (ret) goto unroll_alloc_q_vector; @@ -2641,10 +2903,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ice_vsi_map_rings_to_vectors(vsi); ice_vsi_reset_stats(vsi); - /* Perform an initial read of the statistics registers now to - * set the baseline before the VSI becomes operational. - */ - ice_update_eth_stats(vsi); /* ICE_VSI_CTRL does not need RSS so skip RSS processing */ if (vsi->type != ICE_VSI_CTRL) @@ -2678,6 +2936,10 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, if (ret) goto unroll_alloc_q_vector; + ret = ice_vsi_setup_vector_base(vsi, tc); + if (ret) + goto unroll_alloc_q_vector; + ret = ice_vsi_set_q_vectors_reg_idx(vsi, tc); if (ret) goto unroll_vector_base; @@ -2717,11 +2979,11 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, } dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc); - status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, - max_txqs); - if (status) { - dev_err(dev, "VSI %d failed lan queue config, error %s\n", - vsi->vsi_num, ice_stat_str(status)); + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, + max_txqs); + if (ret) { + dev_err(dev, "VSI %d failed LAN queue config, error %d\n", + vsi->vsi_num, ret); goto unroll_clear_rings; } @@ -2741,7 +3003,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ice_cfg_sw_lldp(vsi, true, true); } - if (!vsi->agg_node) ice_set_agg_vsi(vsi); @@ -2765,43 +3026,54 @@ unroll_vsi_alloc: return NULL; } +/** + * ice_vsi_release_single_msix - Clear the queue to single IRQ mapping in HW + * @vsi: the VSI being cleaned up + * @vector_id: index of the vector in VSI + */ +static void ice_vsi_release_single_msix(struct ice_vsi *vsi, u16 vector_id) +{ + struct ice_q_vector *q_vector = vsi->q_vectors[vector_id]; + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + u16 txq, rxq; + int q; + + ice_vsi_get_q_vector_q_base(vsi, vector_id, &txq, &rxq); + + ice_write_intrl(q_vector, 0); + for (q = 0; q < q_vector->num_ring_tx; q++) { + ice_write_itr(&q_vector->tx, 0); + wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); +#ifdef HAVE_XDP_SUPPORT + if (ice_is_xdp_ena_vsi(vsi)) { + u32 xdp_txq = (u32)txq + vsi->num_xdp_txq; + + wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0); + } +#endif /* HAVE_XDP_SUPPORT */ + txq++; + } + + for (q = 0; q < q_vector->num_ring_rx; q++) { + ice_write_itr(&q_vector->rx, 0); + wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); + rxq++; + } + + ice_flush(hw); +} + /** * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW * @vsi: the VSI being cleaned up */ static void ice_vsi_release_msix(struct ice_vsi *vsi) { - struct ice_pf *pf = vsi->back; - struct ice_hw *hw = &pf->hw; - u32 txq = 0; - u32 rxq = 0; - int i, q; + int i; - for (i = 0; i < vsi->num_q_vectors; i++) { - struct ice_q_vector *q_vector = vsi->q_vectors[i]; - - ice_write_intrl(q_vector, 0); - for (q = 0; q < q_vector->num_ring_tx; q++) { - ice_write_itr(&q_vector->tx, 0); - wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); -#ifdef HAVE_XDP_SUPPORT - if (ice_is_xdp_ena_vsi(vsi)) { - u32 xdp_txq = txq + vsi->num_xdp_txq; - - wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0); - } -#endif /* HAVE_XDP_SUPPORT */ - txq++; - } - - for (q = 0; q < q_vector->num_ring_rx; q++) { - ice_write_itr(&q_vector->rx, 0); - wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); - rxq++; - } - } - - ice_flush(hw); + ice_for_each_q_vector(vsi, i) + ice_vsi_release_single_msix(vsi, i); } /** @@ -2822,11 +3094,13 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) return; vsi->irqs_ready = false; + ice_free_cpu_rx_rmap(vsi); + ice_for_each_q_vector(vsi, i) { u16 vector = i + base; int irq_num; - irq_num = pf->msix_entries[vector].vector; + irq_num = ice_get_irq_num(pf, vector); /* free only the irqs that were actually requested */ if (!vsi->q_vectors[i] || @@ -2835,7 +3109,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) continue; /* clear the affinity notifier in the IRQ descriptor */ - irq_set_affinity_notifier(irq_num, NULL); + if (!IS_ENABLED(CONFIG_RFS_ACCEL)) + irq_set_affinity_notifier(irq_num, NULL); /* clear the affinity_mask in the IRQ descriptor */ irq_set_affinity_hint(irq_num, NULL); @@ -2882,21 +3157,6 @@ void ice_vsi_free_rx_rings(struct ice_vsi *vsi) */ void ice_vsi_close(struct ice_vsi *vsi) { - enum ice_close_reason reason = ICE_REASON_INTERFACE_DOWN; - - if (test_bit(ICE_CORER_REQ, vsi->back->state)) - reason = ICE_REASON_CORER_REQ; - if (test_bit(ICE_GLOBR_REQ, vsi->back->state)) - reason = ICE_REASON_GLOBR_REQ; - if (test_bit(ICE_PFR_REQ, vsi->back->state)) - reason = ICE_REASON_PFR_REQ; - if (!ice_is_safe_mode(vsi->back) && vsi->type == ICE_VSI_PF) { - int ret = ice_for_each_peer(vsi->back, &reason, ice_peer_close); - - if (ret) - dev_dbg(ice_pf_to_dev(vsi->back), "Peer device did not implement close function\n"); - } - if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) ice_down(vsi); ice_vsi_free_irq(vsi); @@ -2928,20 +3188,20 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked) if (!locked) rtnl_unlock(); } -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS if (err) return err; if (test_bit(ICE_FLAG_MACVLAN_ENA, vsi->back->flags) && !ice_is_adq_active(vsi->back)) err = ice_vsi_cfg_netdev_tc0(vsi); -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ } else if (vsi->type == ICE_VSI_CTRL) { err = ice_vsi_open_ctrl(vsi); -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS } else if (vsi->type == ICE_VSI_OFFLOAD_MACVLAN) { err = ice_vsi_open(vsi); -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ } return err; @@ -2973,10 +3233,10 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked) } } else if (vsi->type == ICE_VSI_CTRL) { ice_vsi_close(vsi); -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS } else if (vsi->type == ICE_VSI_OFFLOAD_MACVLAN) { ice_vsi_close(vsi); -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { ice_vsi_close(vsi); } @@ -3036,7 +3296,7 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) return; ice_for_each_q_vector(vsi, i) - synchronize_irq(pf->msix_entries[i + base].vector); + synchronize_irq(ice_get_irq_num(pf, i + base)); } /** @@ -3054,6 +3314,37 @@ void ice_napi_del(struct ice_vsi *vsi) netif_napi_del(&vsi->q_vectors[v_idx]->napi); } +/** + * ice_free_vf_ctrl_res - Free the VF control VSI resource + * @pf: pointer to PF structure + * @vsi: the VSI to free resources for + * + * Check if the VF control VSI resource is still in use. If no VF is using it + * any more, release the VSI resource. Otherwise, leave it to be cleaned up + * once no other VF uses it. + */ +static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi) +{ + struct ice_vf *vf; + unsigned int bkt; + + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) { + if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) { + rcu_read_unlock(); + return; + } + } + rcu_read_unlock(); + + /* No other VFs left that have control VSI. It is now safe to reclaim + * SW interrupts back to the common pool. + */ + ice_free_res(pf->irq_tracker, vsi->base_vector, + ICE_RES_VF_CTRL_VEC_ID); + pf->num_avail_sw_msix += vsi->num_q_vectors; +} + /** * ice_vsi_release - Delete a VSI and free its resources * @vsi: the VSI being removed @@ -3063,6 +3354,7 @@ void ice_napi_del(struct ice_vsi *vsi) int ice_vsi_release(struct ice_vsi *vsi) { struct ice_pf *pf; + int err; if (!vsi->back) return -ENODEV; @@ -3087,8 +3379,6 @@ int ice_vsi_release(struct ice_vsi *vsi) ice_rss_clean(vsi); /* Disable VSI and free resources */ - if (vsi->type != ICE_VSI_LB) - ice_vsi_dis_irq(vsi); ice_vsi_close(vsi); /* SR-IOV determines needed MSIX resources all at once instead of per @@ -3096,48 +3386,24 @@ int ice_vsi_release(struct ice_vsi *vsi) * many interrupts each VF needs. SR-IOV MSIX resources are also * cleared in the same manner. */ - if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) { - int i; - - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; - - if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI) - break; - } - if (i == pf->num_alloc_vfs) { - /* No other VFs left that have control VSI, reclaim SW - * interrupts back to the common pool - */ - ice_free_res(pf->irq_tracker, vsi->base_vector, - ICE_RES_VF_CTRL_VEC_ID); - pf->num_avail_sw_msix += vsi->num_q_vectors; - } + if (vsi->type == ICE_VSI_CTRL && vsi->vf) { + ice_free_vf_ctrl_res(pf, vsi); } else if (vsi->type != ICE_VSI_VF) { /* reclaim SW interrupts back to the common pool */ ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); pf->num_avail_sw_msix += vsi->num_q_vectors; } - if (!ice_is_safe_mode(pf)) { - if (vsi->type == ICE_VSI_PF) { - ice_fltr_remove_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX, - ICE_DROP_PACKET); - ice_cfg_sw_lldp(vsi, true, false); - /* The Rx rule will only exist to remove if the LLDP FW - * engine is currently stopped - */ - if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) - ice_cfg_sw_lldp(vsi, false, false); - } - } - ice_fltr_remove_all(vsi); ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); + err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); + if (err) + dev_info(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n", + vsi->vsi_num, err); ice_vsi_delete(vsi); ice_vsi_free_q_vectors(vsi); - if (vsi->netdev) { + if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) { if (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state)) { unregister_netdev(vsi->netdev); clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); @@ -3182,8 +3448,8 @@ ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi, ice_for_each_q_vector(vsi, i) { struct ice_q_vector *q_vector = vsi->q_vectors[i]; - coalesce[i].itr_tx = q_vector->tx.itr_setting; - coalesce[i].itr_rx = q_vector->rx.itr_setting; + coalesce[i].itr_tx = q_vector->tx.itr_settings; + coalesce[i].itr_rx = q_vector->rx.itr_settings; coalesce[i].intrl = q_vector->intrl; if (i < vsi->num_txq) @@ -3215,15 +3481,15 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi, if ((size && !coalesce) || !vsi) return; - /* there are a couple of cases that have to be handled here: + /* There are a couple of cases that have to be handled here: * 1. The case where the number of queue vectors stays the same, but - * the number of tx or rx rings changes (the first for loop) + * the number of Tx or Rx rings changes (the first for loop) * 2. The case where the number of queue vectors increased (the * second for loop) */ for (i = 0; i < size && i < vsi->num_q_vectors; i++) { - /* there are 2 cases to handle here and they are the same for - * both TX and RX: + /* There are 2 cases to handle here and they are the same for + * both Tx and Rx: * if the entry was valid previously (coalesce[i].[tr]x_valid * and the loop variable is less than the number of rings * allocated, then write the previous values @@ -3239,26 +3505,26 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi, */ if (i < vsi->alloc_rxq && coalesce[i].rx_valid) { rc = &vsi->q_vectors[i]->rx; - rc->itr_setting = coalesce[i].itr_rx; + rc->itr_settings = coalesce[i].itr_rx; ice_write_itr(rc, rc->itr_setting); } else if (i < vsi->alloc_rxq) { rc = &vsi->q_vectors[i]->rx; - rc->itr_setting = coalesce[0].itr_rx; + rc->itr_settings = coalesce[0].itr_rx; ice_write_itr(rc, rc->itr_setting); } if (i < vsi->alloc_txq && coalesce[i].tx_valid) { rc = &vsi->q_vectors[i]->tx; - rc->itr_setting = coalesce[i].itr_tx; + rc->itr_settings = coalesce[i].itr_tx; ice_write_itr(rc, rc->itr_setting); } else if (i < vsi->alloc_txq) { rc = &vsi->q_vectors[i]->tx; - rc->itr_setting = coalesce[0].itr_tx; + rc->itr_settings = coalesce[0].itr_tx; ice_write_itr(rc, rc->itr_setting); } vsi->q_vectors[i]->intrl = coalesce[i].intrl; - ice_write_intrl(vsi->q_vectors[i], coalesce[i].intrl); + ice_set_q_vector_intrl(vsi->q_vectors[i]); } /* the number of queue vectors increased so write whatever is in @@ -3267,16 +3533,16 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi, for (; i < vsi->num_q_vectors; i++) { /* transmit */ rc = &vsi->q_vectors[i]->tx; - rc->itr_setting = coalesce[0].itr_tx; + rc->itr_settings = coalesce[0].itr_tx; ice_write_itr(rc, rc->itr_setting); /* receive */ rc = &vsi->q_vectors[i]->rx; - rc->itr_setting = coalesce[0].itr_rx; + rc->itr_settings = coalesce[0].itr_rx; ice_write_itr(rc, rc->itr_setting); vsi->q_vectors[i]->intrl = coalesce[0].intrl; - ice_write_intrl(vsi->q_vectors[i], coalesce[0].intrl); + ice_set_q_vector_intrl(vsi->q_vectors[i]); } } @@ -3292,9 +3558,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct ice_coalesce_stored *coalesce; int prev_num_q_vectors = 0; - struct ice_vf *vf = NULL; enum ice_vsi_type vtype; - enum ice_status status; struct ice_pf *pf; int ret, i; @@ -3303,8 +3567,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) pf = vsi->back; vtype = vsi->type; - if (vtype == ICE_VSI_VF) - vf = &pf->vf[vsi->vf_id]; + + if (WARN_ON(!vsi->vf && ice_vsi_requires_vf(vtype))) + return -EINVAL; ice_vsi_init_vlan_ops(vsi); @@ -3316,6 +3581,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce); ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); + ret = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); + if (ret) + dev_info(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n", + vsi->vsi_num, ret); ice_vsi_free_q_vectors(vsi); /* SR-IOV determines needed MSIX resources all at once instead of per @@ -3341,10 +3610,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) ice_vsi_clear_rings(vsi); ice_vsi_free_arrays(vsi); ice_vsi_free_rss_global_lut(vsi); - if (vtype == ICE_VSI_VF) - ice_vsi_set_num_qs(vsi, vf->vf_id); - else - ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); + ice_vsi_set_num_qs(vsi, vsi->vf); ret = ice_vsi_alloc_arrays(vsi); if (ret < 0) @@ -3364,6 +3630,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) switch (vtype) { case ICE_VSI_CTRL: case ICE_VSI_OFFLOAD_MACVLAN: + case ICE_VSI_ADI: case ICE_VSI_VMDQ2: case ICE_VSI_SWITCHDEV_CTRL: case ICE_VSI_PF: @@ -3371,7 +3638,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) if (ret) goto err_rings; - ret = ice_vsi_setup_vector_base(vsi); + ret = ice_vsi_setup_vector_base(vsi, 0); if (ret) goto err_vectors; @@ -3385,10 +3652,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) ice_vsi_map_rings_to_vectors(vsi); ice_vsi_reset_stats(vsi); - /* Perform an initial read of the statistics registers now to - * set the baseline before the VSI becomes operational. - */ - ice_update_eth_stats(vsi); #ifdef HAVE_XDP_SUPPORT if (ice_is_xdp_ena_vsi(vsi)) { vsi->num_xdp_txq = vsi->alloc_rxq; @@ -3405,6 +3668,11 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) */ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) ice_vsi_cfg_rss_lut_key(vsi); + + /* disable or enable CRC stripping */ + if (vsi->netdev) + ice_vsi_cfg_crc_strip(vsi, !!(vsi->netdev->features & + NETIF_F_RXFCS)); break; case ICE_VSI_VF: ret = ice_vsi_alloc_q_vectors(vsi); @@ -3451,22 +3719,22 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) #ifdef NETIF_F_HW_TC if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) - /* If MQPRIO ise set, means channel code path, hence for main + /* If MQPRIO is set, means channel code path, hence for main * VSI's, use TC as 1 */ - status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); else #endif /* NETIF_F_HW_TC */ - status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, - vsi->tc_cfg.ena_tc, max_txqs); - - if (status) { - dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %s\n", - vsi->vsi_num, ice_stat_str(status)); + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, + vsi->tc_cfg.ena_tc, max_txqs); + if (ret) { + dev_err(ice_pf_to_dev(pf), "VSI %d failed LAN queue config, error %d\n", + vsi->vsi_num, ret); if (init_vsi) { ret = -EIO; goto err_vectors; } else { + kfree(coalesce); return ice_schedule_reset(pf, ICE_RESET_PFR); } } @@ -3630,7 +3898,7 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) * * Prepares VSI tc_config to have queue configurations based on MQPRIO options. */ -static void +static int ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt, u8 ena_tc) { @@ -3679,7 +3947,18 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt, /* Set actual Tx/Rx queue pairs */ vsi->num_txq = offset + qcount_tx; + if (vsi->num_txq > vsi->alloc_txq) { + dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", + vsi->num_txq, vsi->alloc_txq); + return -EINVAL; + } + vsi->num_rxq = offset + qcount_rx; + if (vsi->num_rxq > vsi->alloc_rxq) { + dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", + vsi->num_rxq, vsi->alloc_rxq); + return -EINVAL; + } /* Setup queue TC[0].qmap for given VSI context */ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); @@ -3697,6 +3976,8 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt, dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq); dev_dbg(ice_pf_to_dev(vsi->back), "%s: all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n", __func__, vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc); + + return 0; } #endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ @@ -3712,7 +3993,6 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct ice_pf *pf = vsi->back; struct ice_vsi_ctx *ctx; - enum ice_status status; struct device *dev; int i, ret = 0; u8 num_tc = 0; @@ -3756,36 +4036,36 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) #ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO if (vsi->type == ICE_VSI_PF && test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) - ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc); + ret = ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc); else - ice_vsi_setup_q_map(vsi, ctx); + ret = ice_vsi_setup_q_map(vsi, ctx); #else - ice_vsi_setup_q_map(vsi, ctx); + ret = ice_vsi_setup_q_map(vsi, ctx); #endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ + if (ret) + goto out; + /* must to indicate which section of VSI context are being modified */ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); - status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); - if (status) { + ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); + if (ret) { dev_info(dev, "Failed VSI Update\n"); - ret = -EIO; goto out; } #ifdef NETIF_F_HW_TC if (vsi->type == ICE_VSI_PF && test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) - status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, - max_txqs); + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); else #endif /* NETIF_F_HW_TC */ - status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, - vsi->tc_cfg.ena_tc, max_txqs); + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, + vsi->tc_cfg.ena_tc, max_txqs); - if (status) { - dev_err(dev, "VSI %d failed TC config, error %s\n", - vsi->vsi_num, ice_stat_str(status)); - ret = -EIO; + if (ret) { + dev_err(dev, "VSI %d failed TC config, error %d\n", + vsi->vsi_num, ret); goto out; } ice_vsi_update_q_map(vsi, ctx); @@ -3897,192 +4177,132 @@ void ice_update_rx_ring_stats(struct ice_ring *rx_ring, u64 pkts, u64 bytes) u64_stats_update_end(&rx_ring->syncp); } -/** - * ice_status_to_errno - convert from enum ice_status to Linux errno - * @err: ice_status value to convert - */ -int ice_status_to_errno(enum ice_status err) -{ - switch (err) { - case ICE_SUCCESS: - return 0; - case ICE_ERR_DOES_NOT_EXIST: - return -ENOENT; - case ICE_ERR_OUT_OF_RANGE: - return -ENOTTY; - case ICE_ERR_PARAM: - return -EINVAL; - case ICE_ERR_NO_MEMORY: - return -ENOMEM; - case ICE_ERR_MAX_LIMIT: - return -EAGAIN; - default: - return -EINVAL; - } -} - - -/** - * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used - * @sw: switch to check if its default forwarding VSI is free - * - * Return true if the default forwarding VSI is already being used, else returns - * false signalling that it's available to use. - */ -bool ice_is_dflt_vsi_in_use(struct ice_sw *sw) -{ - return (sw->dflt_vsi && sw->dflt_vsi_ena); -} - /** * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI - * @sw: switch for the default forwarding VSI to compare against * @vsi: VSI to compare against default forwarding VSI * * If this VSI passed in is the default forwarding VSI then return true, else * return false */ -bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi) +bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi) { - return (sw->dflt_vsi == vsi && sw->dflt_vsi_ena); + return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL); } /** * ice_set_dflt_vsi - set the default forwarding VSI - * @sw: switch used to assign the default forwarding VSI * @vsi: VSI getting set as the default forwarding VSI on the switch * - * If the VSI passed in is already the default VSI and it's enabled just return - * success. + * If the VSI passed in is already the default VSI, just return success. * - * If there is already a default VSI on the switch and it's enabled then return - * -EEXIST since there can only be one default VSI per switch. - * - * Otherwise try to set the VSI passed in as the switch's default VSI and - * return the result. + * Otherwise try to set the VSI passed in as the switch's default VSI and + * return the result. */ -int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi) +int ice_set_dflt_vsi(struct ice_vsi *vsi) { - enum ice_status status; struct device *dev; + int status; - if (!sw || !vsi) + if (!vsi) return -EINVAL; dev = ice_pf_to_dev(vsi->back); /* the VSI passed in is already the default VSI */ - if (ice_is_vsi_dflt_vsi(sw, vsi)) { + if (ice_is_vsi_dflt_vsi(vsi)) { dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n", vsi->vsi_num); return 0; } - /* another VSI is already the default VSI for this switch */ - if (ice_is_dflt_vsi_in_use(sw)) { - dev_err(dev, "Default forwarding VSI %d already in use, disable it and try again\n", - sw->dflt_vsi->vsi_num); - return -EEXIST; - } - status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX); if (status) { - dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %s\n", - vsi->vsi_num, ice_stat_str(status)); + dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n", + vsi->vsi_num, status); return -EIO; } - sw->dflt_vsi = vsi; - sw->dflt_vsi_ena = true; - return 0; } /** * ice_clear_dflt_vsi - clear the default forwarding VSI - * @sw: switch used to clear the default VSI + * @vsi: VSI to remove from filter list * - * If the switch has no default VSI or it's not enabled then return error. + * If the VSI passed in is not the default VSI, just return success. * * Otherwise try to clear the default VSI and return the result. */ -int ice_clear_dflt_vsi(struct ice_sw *sw) +int ice_clear_dflt_vsi(struct ice_vsi *vsi) { - struct ice_vsi *dflt_vsi; - enum ice_status status; struct device *dev; + int status; - if (!sw) + if (!vsi) return -EINVAL; - dev = ice_pf_to_dev(sw->pf); + dev = ice_pf_to_dev(vsi->back); - dflt_vsi = sw->dflt_vsi; + /* the VSI passed in is not a default VSI */ + if (!ice_is_vsi_dflt_vsi(vsi)) { + dev_dbg(dev, "VSI %d passed in is not the default forwarding VSI, nothing to do\n", + vsi->vsi_num); + return 0; + } - /* there is no default VSI configured */ - if (!ice_is_dflt_vsi_in_use(sw)) - return -ENODEV; - - status = ice_cfg_dflt_vsi(dflt_vsi->port_info, dflt_vsi->idx, false, + status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false, ICE_FLTR_RX); if (status) { - dev_err(dev, "Failed to clear the default forwarding VSI %d, error %s\n", - dflt_vsi->vsi_num, ice_stat_str(status)); + dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n", + vsi->vsi_num, status); return -EIO; } - sw->dflt_vsi = NULL; - sw->dflt_vsi_ena = false; - return 0; } - /** * ice_get_link_speed_mbps - get link speed in Mbps * @vsi: the VSI whose link speed is being queried * - * Return current VSI link speed, else ICE_LINK_SPEED_UNKNOWN (0) is - * returned. + * Return current VSI link speed and 0 if the speed is unknown. */ int ice_get_link_speed_mbps(struct ice_vsi *vsi) { switch (vsi->port_info->phy.link_info.link_speed) { case ICE_AQ_LINK_SPEED_100GB: - return ICE_LINK_SPEED_100000MBPS; + return SPEED_100000; case ICE_AQ_LINK_SPEED_50GB: - return ICE_LINK_SPEED_50000MBPS; + return SPEED_50000; case ICE_AQ_LINK_SPEED_40GB: - return ICE_LINK_SPEED_40000MBPS; + return SPEED_40000; case ICE_AQ_LINK_SPEED_25GB: - return ICE_LINK_SPEED_25000MBPS; + return SPEED_25000; case ICE_AQ_LINK_SPEED_20GB: - return ICE_LINK_SPEED_20000MBPS; + return SPEED_20000; case ICE_AQ_LINK_SPEED_10GB: - return ICE_LINK_SPEED_10000MBPS; + return SPEED_10000; case ICE_AQ_LINK_SPEED_5GB: - return ICE_LINK_SPEED_5000MBPS; + return SPEED_5000; case ICE_AQ_LINK_SPEED_2500MB: - return ICE_LINK_SPEED_2500MBPS; + return SPEED_2500; case ICE_AQ_LINK_SPEED_1000MB: - return ICE_LINK_SPEED_1000MBPS; + return SPEED_1000; case ICE_AQ_LINK_SPEED_100MB: - return ICE_LINK_SPEED_100MBPS; + return SPEED_100; case ICE_AQ_LINK_SPEED_10MB: - return ICE_LINK_SPEED_10MBPS; + return SPEED_10; case ICE_AQ_LINK_SPEED_UNKNOWN: default: - return ICE_LINK_SPEED_UNKNOWN; + return 0; } } - /** * ice_get_link_speed_kbps - get link speed in Kbps * @vsi: the VSI whose link speed is being queried * - * Return current VSI link speed, else ICE_LINK_SPEED_UNKNOWN (0) is - * returned. + * Return current VSI link speed and 0 if the speed is unknown. */ int ice_get_link_speed_kbps(struct ice_vsi *vsi) { @@ -4105,8 +4325,8 @@ int ice_get_link_speed_kbps(struct ice_vsi *vsi) int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate) { struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; + int status; int speed; dev = ice_pf_to_dev(pf); @@ -4132,7 +4352,7 @@ int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate) dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n", min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx); - return -EIO; + return status; } dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n", @@ -4144,7 +4364,7 @@ int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate) if (status) { dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n", ice_vsi_type_str(vsi->type), vsi->idx); - return -EIO; + return status; } dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n", @@ -4166,8 +4386,8 @@ int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate) int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate) { struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; + int status; int speed; dev = ice_pf_to_dev(pf); @@ -4193,7 +4413,7 @@ int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate) dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n", max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx); - return -EIO; + return status; } dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n", @@ -4205,7 +4425,7 @@ int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate) if (status) { dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n", ice_vsi_type_str(vsi->type), vsi->idx); - return -EIO; + return status; } dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n", @@ -4225,7 +4445,7 @@ int ice_set_link(struct ice_vsi *vsi, bool ena) struct device *dev = ice_pf_to_dev(vsi->back); struct ice_port_info *pi = vsi->port_info; struct ice_hw *hw = pi->hw; - enum ice_status status; + int status; if (vsi->type != ICE_VSI_PF) return -EINVAL; @@ -4237,14 +4457,14 @@ int ice_set_link(struct ice_vsi *vsi, bool ena) * a success code. Return an error if FW returns an error code other * than ICE_AQ_RC_EMODE */ - if (status == ICE_ERR_AQ_ERROR) { + if (status == -EIO) { if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) - dev_warn(dev, "can't set link to %s, err %s aq_err %s. not fatal, continuing\n", - (ena ? "ON" : "OFF"), ice_stat_str(status), + dev_warn(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n", + (ena ? "ON" : "OFF"), status, ice_aq_str(hw->adminq.sq_last_status)); } else if (status) { - dev_err(dev, "can't set link to %s, err %s aq_err %s\n", - (ena ? "ON" : "OFF"), ice_stat_str(status), + dev_err(dev, "can't set link to %s, err %d aq_err %s\n", + (ena ? "ON" : "OFF"), status, ice_aq_str(hw->adminq.sq_last_status)); return -EIO; } @@ -4315,6 +4535,52 @@ void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx) ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; } +/** + * ice_check_mtu_valid - check if specified MTU can be set for a netdev + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 if MTU is valid, negative otherwise + */ +int ice_check_mtu_valid(struct net_device *netdev, int new_mtu) +{ +#ifdef HAVE_NETDEVICE_MIN_MAX_MTU +#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU + if (new_mtu < netdev->extended->min_mtu) { + netdev_err(netdev, "new MTU invalid. min_mtu is %d\n", + netdev->extended->min_mtu); + return -EINVAL; + } else if (new_mtu > netdev->extended->max_mtu) { + netdev_err(netdev, "new MTU invalid. max_mtu is %d\n", + netdev->extended->min_mtu); + return -EINVAL; + } +#else /* HAVE_RHEL7_EXTENDED_MIN_MAX_MTU */ + if (new_mtu < (int)netdev->min_mtu) { + netdev_err(netdev, "new MTU invalid. min_mtu is %d\n", + netdev->min_mtu); + return -EINVAL; + } else if (new_mtu > (int)netdev->max_mtu) { + netdev_err(netdev, "new MTU invalid. max_mtu is %d\n", + netdev->min_mtu); + return -EINVAL; + } +#endif /* HAVE_RHEL7_EXTENDED_MIN_MAX_MTU */ +#else /* HAVE_NETDEVICE_MIN_MAX_MTU */ + if (new_mtu < ETH_MIN_MTU) { + netdev_err(netdev, "new MTU invalid. min_mtu is %d\n", + ETH_MIN_MTU); + return -EINVAL; + } else if (new_mtu > ICE_MAX_MTU) { + netdev_err(netdev, "new MTU invalid. max_mtu is %d\n", + ICE_MAX_MTU); + return -EINVAL; + } +#endif /* HAVE_NETDEVICE_MIN_MAX_MTU */ + + return 0; +} + /** * ice_vsi_add_vlan_zero - add VLAN 0 filter(s) for this VSI * @vsi: VSI used to add VLAN filters @@ -4382,6 +4648,14 @@ int ice_vsi_del_vlan_zero(struct ice_vsi *vsi) if (err && err != -EEXIST) return err; + /* when deleting the last VLAN filter, make sure to disable the VLAN + * promisc mode so the filter isn't left by accident + */ + err = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_VLAN_PROMISC_BITS, 0); + if (err) + return err; + return 0; } @@ -4397,9 +4671,14 @@ static u16 ice_vsi_num_zero_vlans(struct ice_vsi *vsi) #define ICE_DVM_NUM_ZERO_VLAN_FLTRS 2 #define ICE_SVM_NUM_ZERO_VLAN_FLTRS 1 /* no VLAN 0 filter is created when a port VLAN is active */ - if (vsi->type == ICE_VSI_VF && - ice_vf_is_port_vlan_ena(&vsi->back->vf[vsi->vf_id])) - return 0; + if (vsi->type == ICE_VSI_VF) { + if (WARN_ON(!vsi->vf)) + return 0; + + if (ice_vf_is_port_vlan_ena(vsi->vf)) + return 0; + } + if (ice_is_dvm_ena(&vsi->back->hw)) return ICE_DVM_NUM_ZERO_VLAN_FLTRS; else @@ -4407,7 +4686,7 @@ static u16 ice_vsi_num_zero_vlans(struct ice_vsi *vsi) } /** - * ice_vsi_has_non_zero_vlans - check is VSI has any non-zero VLANs + * ice_vsi_has_non_zero_vlans - check if VSI has any non-zero VLANs * @vsi: VSI used to determine if any non-zero VLANs have been added */ bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi) @@ -4481,8 +4760,41 @@ void ice_init_feature_support(struct ice_pf *pf) case ICE_DEV_ID_E810_XXV_QSFP: case ICE_DEV_ID_E810_XXV_SFP: ice_set_feature_support(pf, ICE_F_DSCP); +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) + if (ice_is_phy_rclk_present(&pf->hw)) + ice_set_feature_support(pf, ICE_F_PHY_RCLK); + /* If we don't own the timer - don't enable other caps */ + if (!pf->hw.func_caps.ts_func_info.src_tmr_owned) + break; + + ice_set_feature_support(pf, ICE_F_PTP_EXTTS); + if (ice_is_clock_mux_present_e810t(&pf->hw)) + ice_set_feature_support(pf, ICE_F_SMA_CTRL); + if (ice_is_cgu_present(&pf->hw)) + ice_set_feature_support(pf, ICE_F_CGU); + if (ice_is_gps_present_e810t(&pf->hw) && + ice_gnss_is_gps_present(&pf->hw)) + ice_set_feature_support(pf, ICE_F_GNSS); + if (ice_is_e810t(&pf->hw)) + ice_set_feature_support(pf, ICE_F_FIXED_TIMING_PINS); +#endif /* CONFIG_PTP_1588_CLOCK */ + break; +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) + case ICE_DEV_ID_E823L_BACKPLANE: + case ICE_DEV_ID_E823L_SFP: + case ICE_DEV_ID_E823L_10G_BASE_T: + case ICE_DEV_ID_E823L_1GBE: + case ICE_DEV_ID_E823L_QSFP: + case ICE_DEV_ID_E823C_BACKPLANE: + case ICE_DEV_ID_E823C_SFP: + case ICE_DEV_ID_E823C_10G_BASE_T: + case ICE_DEV_ID_E823C_SGMII: + case ICE_DEV_ID_E823C_QSFP: + if (ice_is_phy_rclk_present(&pf->hw)) + ice_set_feature_support(pf, ICE_F_PHY_RCLK); ice_set_feature_support(pf, ICE_F_PTP_EXTTS); break; +#endif /* CONFIG_PTP_1588_CLOCK */ default: break; } diff --git a/drivers/thirdparty/ice/ice_lib.h b/drivers/thirdparty/ice/ice_lib.h index 4331d2786d75..7e9d2a51e857 100644 --- a/drivers/thirdparty/ice/ice_lib.h +++ b/drivers/thirdparty/ice/ice_lib.h @@ -53,10 +53,13 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc); int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi); +int ice_get_valid_rss_size(struct ice_hw *hw, int new_size); +int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size); + struct ice_vsi * ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, - enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch, - u8 tc); + enum ice_vsi_type vsi_type, struct ice_vf *vf, + struct ice_channel *ch, u8 tc); void ice_napi_del(struct ice_vsi *vsi); @@ -90,14 +93,14 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi); void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena); +void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable); + void ice_update_tx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes); void ice_update_rx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes); void ice_vsi_cfg_frame_size(struct ice_vsi *vsi); -int ice_status_to_errno(enum ice_status err); - void ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio, bool __maybe_unused ena_ts); @@ -109,15 +112,15 @@ irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data); void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl); void ice_write_itr(struct ice_ring_container *rc, u16 itr); void ice_set_q_vector_intrl(struct ice_q_vector *q_vector); +void ice_vsi_get_q_vector_q_base(struct ice_vsi *vsi, u16 vector_id, u16 *txq, + u16 *rxq); -enum ice_status -ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set); +int ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set); bool ice_is_safe_mode(struct ice_pf *pf); -bool ice_is_peer_ena(struct ice_pf *pf); -bool ice_is_dflt_vsi_in_use(struct ice_sw *sw); -bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi); -int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi); -int ice_clear_dflt_vsi(struct ice_sw *sw); +bool ice_is_aux_ena(struct ice_pf *pf); +bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi); +int ice_set_dflt_vsi(struct ice_vsi *vsi); +int ice_clear_dflt_vsi(struct ice_vsi *vsi); int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate); int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate); int ice_get_link_speed_kbps(struct ice_vsi *vsi); @@ -130,6 +133,7 @@ void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx); #endif /* HAVE_METADATA_PORT_INFO */ void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx); void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx); +int ice_check_mtu_valid(struct net_device *netdev, int new_mtu); int ice_vsi_add_vlan_zero(struct ice_vsi *vsi); int ice_vsi_del_vlan_zero(struct ice_vsi *vsi); bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi); diff --git a/drivers/thirdparty/ice/ice_main.c b/drivers/thirdparty/ice/ice_main.c index 7151053ca28c..9df9e85bfb02 100644 --- a/drivers/thirdparty/ice/ice_main.c +++ b/drivers/thirdparty/ice/ice_main.c @@ -23,12 +23,13 @@ #include "ice_tc_lib.h" #include "ice_vsi_vlan_ops.h" #include "ice_fwlog.h" +#include "ice_irq.h" #define DRV_VERSION_MAJOR 1 -#define DRV_VERSION_MINOR 6 -#define DRV_VERSION_BUILD 7 +#define DRV_VERSION_MINOR 10 +#define DRV_VERSION_BUILD 1 -#define DRV_VERSION "1.6.7.1.1" +#define DRV_VERSION "1.10.1.2" #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" #ifdef ICE_ADD_PROBES #define DRV_VERSION_EXTRA "_probes" @@ -62,8 +63,6 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXX MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); #endif /* !CONFIG_DYNAMIC_DEBUG */ - - static ushort fwlog_level = ICE_FWLOG_LEVEL_NONE; module_param(fwlog_level, ushort, 0644); MODULE_PARM_DESC(fwlog_level, "FW event level to log. All levels <= to the specified value are enabled. Values: 0=none, 1=error, 2=warning, 3=normal, 4=verbose. Invalid values: >=5\n"); @@ -105,12 +104,27 @@ static unsigned long fwlog_events; /* no enabled events by default */ module_param(fwlog_events, ulong, 0644); MODULE_PARM_DESC(fwlog_events, "FW events to log (32-bit mask)\n"); -static struct workqueue_struct *ice_wq; +/** + * ice_hw_to_dev - Get device pointer from the hardware structure + * @hw: pointer to the device HW structure + * + * Used to access the device pointer from compilation units which can't easily + * include the definition of struct ice_pf without leading to circular header + * dependencies. + */ +struct device *ice_hw_to_dev(struct ice_hw *hw) +{ + struct ice_pf *pf = container_of(hw, struct ice_pf, hw); + + return &pf->pdev->dev; +} + +struct workqueue_struct *ice_wq; +struct workqueue_struct *ice_lag_wq; static const struct net_device_ops ice_netdev_recovery_ops; static const struct net_device_ops ice_netdev_safe_mode_ops; static const struct net_device_ops ice_netdev_ops; -static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); static void ice_vsi_release_all(struct ice_pf *pf); @@ -120,16 +134,15 @@ static int ice_rebuild_channels(struct ice_pf *pf); static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr); #endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ - bool netif_is_ice(struct net_device *dev) { return dev && (dev->netdev_ops == &ice_netdev_ops); } -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS static void ice_deinit_macvlan(struct ice_vsi *vsi); -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ #ifdef HAVE_TC_INDIR_BLOCK #if defined(HAVE_TC_FLOW_INDIR_DEV) && defined(HAVE_FLOW_INDIR_BLOCK_QDISC) static int @@ -221,13 +234,13 @@ static void ice_chnl_subtask_handle_interrupt(struct ice_pf *pf) static inline void ice_flush_vsi_fd_fltrs(struct ice_vsi *vsi) { struct device *dev = ice_pf_to_dev(vsi->back); - enum ice_status status; + int status; status = ice_clear_vsi_fd_table(&vsi->back->hw, vsi->vsi_num); if (status) - dev_err(dev, "Failed to clear FD table for %s, vsi_num: %u, status: %s\n", + dev_err(dev, "Failed to clear FD table for %s, vsi_num: %u, status: %d\n", ice_vsi_type_str(vsi->type), vsi->vsi_num, - ice_stat_str(status)); + status); } /** @@ -465,7 +478,6 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf) */ static int ice_init_mac_fltr(struct ice_pf *pf) { - enum ice_status status; struct ice_vsi *vsi; u8 *perm_addr; @@ -474,11 +486,7 @@ static int ice_init_mac_fltr(struct ice_pf *pf) return -EINVAL; perm_addr = vsi->port_info->mac.perm_addr; - status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); - if (status) - return -EIO; - - return 0; + return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); } /** @@ -518,6 +526,14 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; + /* Under some circumstances, we might receive a request to delete our + * own device address from our uc list. Because we store the device + * address in the VSI's MAC filter list, we need to ignore such + * requests and not delete our device address from this list. + */ + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr, ICE_FWD_TO_VSI)) return -EINVAL; @@ -534,8 +550,7 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) { return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) || - test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) || - test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); + test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); } /** @@ -546,18 +561,22 @@ static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) */ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m) { - enum ice_status status; + int status; if (vsi->type != ICE_VSI_PF) return 0; - if (ice_vsi_has_non_zero_vlans(vsi)) - status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m); - else - status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0, + if (ice_vsi_has_non_zero_vlans(vsi)) { + promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX); + status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, + promisc_m); + } else { + status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, + promisc_m, 0, vsi->port_info->lport); + } - if (status) + if (status && status != -EEXIST) return -EIO; return 0; @@ -571,23 +590,41 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m) */ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m) { - enum ice_status status; + int status; if (vsi->type != ICE_VSI_PF) return 0; - if (ice_vsi_has_non_zero_vlans(vsi)) - status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m); - else - status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0, + if (ice_vsi_has_non_zero_vlans(vsi)) { + promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX); + status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, + promisc_m); + } else { + status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, + promisc_m, 0, vsi->port_info->lport); + } - if (status) - return -EIO; - - return 0; + return status; } +#if IS_ENABLED(CONFIG_NET_DEVLINK) +#ifdef HAVE_NDO_GET_DEVLINK_PORT +/** + * ice_get_devlink_port - Get devlink port from netdev + * @netdev: the netdevice structure + */ +static struct devlink_port *ice_get_devlink_port(struct net_device *netdev) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + + if (!ice_is_switchdev_running(pf)) + return NULL; + + return &pf->devlink_port; +} +#endif /* HAVE_NDO_GET_DEVLINK_PORT */ +#endif /* CONFIG_NET_DEVLINK */ /** * ice_vsi_sync_fltr - Update the VSI filter list to the HW @@ -603,10 +640,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) bool promisc_forced_on = false; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - enum ice_status status = 0; u32 changed_flags = 0; - u8 promisc_m; - int err = 0; + int err; if (!vsi->netdev) return -EINVAL; @@ -623,7 +658,6 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) if (ice_vsi_fltr_changed(vsi)) { clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); - clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); /* grab the netdev's addr_list_lock */ netif_addr_lock_bh(netdev); @@ -636,25 +670,23 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) } /* Remove MAC addresses in the unsync list */ - status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); + err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); ice_fltr_free_list(dev, &vsi->tmp_unsync_list); - if (status) { + if (err) { netdev_err(netdev, "Failed to delete MAC filters\n"); /* if we failed because of alloc failures, just bail */ - if (status == ICE_ERR_NO_MEMORY) { - err = -ENOMEM; + if (err == -ENOMEM) goto out; - } } /* Add MAC addresses in the sync list */ - status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); + err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); ice_fltr_free_list(dev, &vsi->tmp_sync_list); /* If filter is added successfully or already exists, do not go into * 'if' condition and report it as error. Instead continue processing * rest of the function. */ - if (status && status != ICE_ERR_ALREADY_EXISTS) { + if (err && err != -EEXIST) { netdev_err(netdev, "Failed to add MAC filters\n"); /* If there is no more space for new umac filters, VSI * should go into promiscuous mode. There should be some @@ -667,36 +699,22 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n", vsi->vsi_num); } else { - err = -EIO; goto out; } } + err = 0; /* check for changes in promiscuous modes */ if (changed_flags & IFF_ALLMULTI) { if (vsi->current_netdev_flags & IFF_ALLMULTI) { - if (ice_vsi_has_non_zero_vlans(vsi)) - promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; - else - promisc_m = ICE_MCAST_PROMISC_BITS; - - err = ice_set_promisc(vsi, promisc_m); + err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS); if (err) { - netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n", - vsi->vsi_num); vsi->current_netdev_flags &= ~IFF_ALLMULTI; goto out_promisc; } } else { /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */ - if (ice_vsi_has_non_zero_vlans(vsi)) - promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; - else - promisc_m = ICE_MCAST_PROMISC_BITS; - - err = ice_clear_promisc(vsi, promisc_m); + err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS); if (err) { - netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n", - vsi->vsi_num); vsi->current_netdev_flags |= IFF_ALLMULTI; goto out_promisc; } @@ -707,33 +725,29 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) { clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); if (vsi->current_netdev_flags & IFF_PROMISC) { + /* Apply Rx filter rule to get traffic from wire */ - if (!ice_is_dflt_vsi_in_use(pf->first_sw)) { - err = ice_set_dflt_vsi(pf->first_sw, vsi); - if (err && err != -EEXIST) { - netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n", - err, vsi->vsi_num); - vsi->current_netdev_flags &= - ~IFF_PROMISC; - goto out_promisc; - } - vlan_ops->dis_rx_filtering(vsi); + err = ice_set_dflt_vsi(vsi); + if (err) { + netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n", + err, vsi->vsi_num); + vsi->current_netdev_flags &= ~IFF_PROMISC; + goto out_promisc; } + vlan_ops->dis_rx_filtering(vsi); } else { /* Clear Rx filter to remove traffic from wire */ - if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) { - err = ice_clear_dflt_vsi(pf->first_sw); - if (err) { - netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n", - err, vsi->vsi_num); - vsi->current_netdev_flags |= - IFF_PROMISC; - goto out_promisc; - } - if (vsi->current_netdev_flags & - NETIF_F_HW_VLAN_CTAG_FILTER) - vlan_ops->ena_rx_filtering(vsi); + err = ice_clear_dflt_vsi(vsi); + if (err) { + netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n", + err, vsi->vsi_num); + vsi->current_netdev_flags |= + IFF_PROMISC; + goto out_promisc; } + if (vsi->current_netdev_flags & + NETIF_F_HW_VLAN_CTAG_FILTER) + vlan_ops->ena_rx_filtering(vsi); } } goto exit; @@ -792,12 +806,55 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++) pf->vf_agg_node[node].num_vsis = 0; -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS for (node = 0; node < ICE_MAX_MACVLAN_AGG_NODES; node++) pf->macvlan_agg_node[node].num_vsis = 0; #endif } +#ifdef HAVE_UDP_TUNNEL_NIC_INFO +static int ice_udp_tunnel_add(struct net_device *netdev, unsigned int table, + unsigned int idx, struct udp_tunnel_info *ti); +static int ice_udp_tunnel_del(struct net_device *netdev, unsigned int table, + unsigned int idx, struct udp_tunnel_info *ti); + +static void ice_udp_tunnel_prepare(struct ice_pf *pf) +{ + int vxlan_cnt = 0, geneve_cnt = 0, i; + struct ice_hw *hw; + + hw = &pf->hw; + for (i = 0; i < hw->tnl.count; i++) { + if (hw->tnl.tbl[i].boost_entry) { + if (hw->tnl.tbl[i].type == TNL_VXLAN) + vxlan_cnt++; + else if (hw->tnl.tbl[i].type == TNL_GENEVE) + geneve_cnt++; + } + } + + pf->udp_tunnel_nic.set_port = ice_udp_tunnel_add; + pf->udp_tunnel_nic.unset_port = ice_udp_tunnel_del; + pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; +#ifdef HAVE_UDP_TUNNEL_NIC_SHARED + pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared; +#endif /* HAVE_UDP_TUNNEL_NIC_SHARED */ + i = 0; + if (vxlan_cnt) { + pf->udp_tunnel_nic.tables[i].n_entries = vxlan_cnt; + pf->udp_tunnel_nic.tables[i].tunnel_types = + UDP_TUNNEL_TYPE_VXLAN; + i++; + } + if (geneve_cnt) { + pf->udp_tunnel_nic.tables[i].n_entries = + geneve_cnt; + pf->udp_tunnel_nic.tables[i].tunnel_types = + UDP_TUNNEL_TYPE_GENEVE; + } +} +#endif /* HAVE_UDP_TUNNEL_NIC_INFO */ + #ifdef HAVE_TC_SETUP_CLSFLOWER /** * ice_remove_tc_fltrs - clear TC filters configuration @@ -880,7 +937,8 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) #ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO struct ice_vsi *vsi; #endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ - unsigned int i; + struct ice_vf *vf; + unsigned int bkt; dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type); @@ -888,13 +946,21 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) return; + ice_unplug_aux_devs(pf); + /* Notify VFs of impending reset */ if (ice_check_sq_alive(hw, &hw->mailboxq)) ice_vc_notify_reset(pf); - /* Disable VFs until reset is completed */ - ice_for_each_vf(pf, i) - ice_set_vf_state_qs_dis(&pf->vf[i]); + mutex_lock(&pf->vfs.table_lock); + ice_for_each_vf(pf, bkt, vf) { + /* Disable VFs until reset is completed */ + ice_set_vf_state_qs_dis(vf); + + if (vf->vf_ops->clear_reset_state) + vf->vf_ops->clear_reset_state(vf); + } + mutex_unlock(&pf->vfs.table_lock); #ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO /* release ADQ specific HW and SW resources */ @@ -902,7 +968,7 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) if (!vsi) goto skip; - /* to be on safer size, reset orig_rss_size so that normal flow + /* to be on safe side, reset orig_rss_size so that normal flow * of deciding rss_size can take precedence */ vsi->orig_rss_size = 0; @@ -914,8 +980,8 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) } else { ice_remove_q_channels(vsi, true); - /* for other reset type, do not support "rebuild - * of channel, hence reset needed info + /* for other reset type, do not support channel rebuild + * hence reset needed info */ vsi->old_ena_tc = 0; vsi->all_enatc = 0; @@ -943,18 +1009,20 @@ skip: /* disable the VSIs and their queues that are not already DOWN */ ice_pf_dis_all_vsi(pf, false); - if (test_bit(ICE_FLAG_PTP_ENA, pf->flags)) - ice_ptp_release(pf); + if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) + ice_ptp_prepare_for_reset(pf); + + if (ice_is_feature_supported(pf, ICE_F_GNSS)) + ice_gnss_exit(pf); if (hw->port_info) ice_sched_clear_port(hw->port_info); - ice_shutdown_all_ctrlq(hw); + ice_shutdown_all_ctrlq(hw, false); set_bit(ICE_PREPARED_FOR_RESET, pf->state); } - /** * ice_print_recovery_msg - print recovery mode message * @dev: pointer to the device instance @@ -1011,7 +1079,9 @@ static void ice_remove_recovery_mode(struct ice_pf *pf) ice_reset(&pf->hw, ICE_RESET_PFR); pci_disable_pcie_error_reporting(pf->pdev); +#ifndef HAVE_DEVLINK_NOTIFY_REGISTER ice_devlink_unregister(pf); +#endif /* !HAVE_DEVLINK_NOTIFY_REGISTER */ } /** @@ -1121,7 +1191,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) clear_bit(ICE_PREPARED_FOR_RESET, pf->state); clear_bit(ICE_PFR_REQ, pf->state); wake_up(&pf->reset_wait_queue); - ice_reset_all_vfs(pf, true); + ice_reset_all_vfs(pf); } } @@ -1154,9 +1224,7 @@ static void ice_reset_subtask(struct ice_pf *pf) /* return if no valid reset type requested */ if (reset_type == ICE_RESET_INVAL) return; - if (ice_is_peer_ena(pf)) - ice_for_each_peer(pf, &reset_type, - ice_close_peer_for_reset); + ice_prepare_for_reset(pf, reset_type); /* make sure we are ready to rebuild */ @@ -1173,8 +1241,6 @@ static void ice_reset_subtask(struct ice_pf *pf) return; } - - /* came out of reset. check if an NVM rollback happened */ if (ice_get_fw_mode(&pf->hw) == ICE_FW_MODE_ROLLBACK) ice_print_rollback_msg(&pf->hw); @@ -1191,7 +1257,7 @@ static void ice_reset_subtask(struct ice_pf *pf) clear_bit(ICE_CORER_REQ, pf->state); clear_bit(ICE_GLOBR_REQ, pf->state); wake_up(&pf->reset_wait_queue); - ice_reset_all_vfs(pf, true); + ice_reset_all_vfs(pf); return; } @@ -1213,7 +1279,6 @@ static void ice_reset_subtask(struct ice_pf *pf) } } - /** * ice_sync_udp_fltr_subtask - sync the VSI filter list with HW * @pf: board private structure @@ -1256,12 +1321,12 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) { struct ice_aqc_get_phy_caps_data *caps; const char *an_advertised; - enum ice_status status; const char *fec_req; const char *speed; const char *fec; const char *fc; const char *an; + int status; if (!vsi) return; @@ -1408,7 +1473,6 @@ static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) } } - /** * ice_set_dflt_mib - send a default config MIB to the FW * @pf: private PF struct @@ -1424,9 +1488,9 @@ static void ice_set_dflt_mib(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); u8 mib_type, *buf, *lldpmib = NULL; - u16 len, typelen, offset = 0; struct ice_lldp_org_tlv *tlv; struct ice_hw *hw = &pf->hw; + u16 len, typelen, offset; u32 ouisubtype; mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; @@ -1455,7 +1519,7 @@ static void ice_set_dflt_mib(struct ice_pf *pf) */ buf[5] = 0x64; len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; - offset += len + 2; + offset = len + 2; tlv = (struct ice_lldp_org_tlv *) ((char *)tlv + sizeof(tlv->typelen) + len); @@ -1497,6 +1561,29 @@ static void ice_set_dflt_mib(struct ice_pf *pf) kfree(lldpmib); } +/** + * ice_check_phy_fw_load - check if PHY FW load failed + * @pf: pointer to PF struct + * @link_cfg_err: bitmap from the link info structure + * + * check if external PHY FW load failed and print an error message if it did + */ +static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err) +{ + if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) { + clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); + return; + } + + if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags)) + return; + + if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) { + dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n"); + set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); + } +} + /** * ice_check_module_power * @pf: pointer to PF struct @@ -1529,6 +1616,20 @@ static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err) } } +/** + * ice_check_link_cfg_err - check if link configuration failed + * @pf: pointer to the PF struct + * @link_cfg_err: bitmap from the link info structure + * + * print if any link configuration failure happens due to the value in the + * link_cfg_err parameter in the link info structure + */ +static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err) +{ + ice_check_module_power(pf, link_cfg_err); + ice_check_phy_fw_load(pf, link_cfg_err); +} + /** * ice_link_event - process the link event * @pf: PF that the link event is associated with @@ -1544,10 +1645,11 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, { struct device *dev = ice_pf_to_dev(pf); struct ice_phy_info *phy_info; - enum ice_status status; + struct iidc_event *iev; struct ice_vsi *vsi; u16 old_link_speed; bool old_link; + int status; phy_info = &pi->phy; phy_info->link_info_old = phy_info->link_info; @@ -1560,11 +1662,11 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, */ status = ice_update_link_info(pi); if (status) - dev_dbg(dev, "Failed to update link status on port %d, err %s aq_err %s\n", - pi->lport, ice_stat_str(status), + dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n", + pi->lport, status, ice_aq_str(pi->hw->adminq.sq_last_status)); - ice_check_module_power(pf, pi->phy.link_info.link_cfg_err); + ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); /* Check if the link state is up after updating link info, and treat * this event as an UP event since the link is actually UP now. @@ -1572,6 +1674,15 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, if (phy_info->link_info.link_info & ICE_AQ_LINK_UP) link_up = true; + iev = kzalloc(sizeof(*iev), GFP_KERNEL); + if (!iev) + return -ENOMEM; + + set_bit(IIDC_EVENT_LINK_CHNG, iev->type); + iev->info.link_up = link_up; + ice_send_event_to_auxs(pf, iev); + kfree(iev); + vsi = ice_get_main_vsi(pf); if (!vsi || !vsi->port_info) return -EINVAL; @@ -1583,27 +1694,23 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, ice_set_link(vsi, false); } - /* if the old link up/down and speed is the same as the new */ if (link_up == old_link && link_speed == old_link_speed) return 0; - if (!ice_is_e810(&pf->hw)) - ice_ptp_link_change(pf, pf->hw.pf_id, link_up); + ice_ptp_link_change(pf, pf->hw.pf_id, link_up); - if (ice_is_dcb_active(pf)) { - if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) - ice_dcb_rebuild(pf); - } else { + /* Need to check if number of TC > 1 or any PFC enabled */ + if (test_bit(ICE_FLAG_DCB_ENA, pf->flags) || + pf->hw.port_info->qos_cfg.local_dcbx_cfg.pfc.pfcena) + ice_dcb_rebuild(pf); + else if (link_up) ice_set_dflt_mib(pf); - } ice_vsi_link_event(vsi, link_up); ice_print_link_msg(vsi, link_up); ice_vc_notify_link_state(pf); - - return 0; } @@ -1626,8 +1733,6 @@ static void ice_watchdog_subtask(struct ice_pf *pf) return; pf->serv_tmr_prev = jiffies; - if (!ice_is_e810(&pf->hw)) - ice_ptp_set_timestamp_offsets(pf); /* Update the stats for active netdevs so the network stack * can look at updated numbers whenever it cares to */ @@ -1655,7 +1760,8 @@ static int ice_init_link_events(struct ice_port_info *pi) } mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA | - ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL)); + ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL | + ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL)); if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) { dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n", @@ -1693,13 +1799,12 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) !!(link_data->link_info & ICE_AQ_LINK_UP), le16_to_cpu(link_data->link_speed)); if (status) - dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n", - status); + ice_dev_dbg_errno(ice_pf_to_dev(pf), status, + "Could not process link event"); return status; } - /** * ice_print_health_status_string - Print message for given FW health event * @pf: pointer to the PF structure @@ -1872,6 +1977,11 @@ ice_print_health_status_string(struct ice_pf *pf, netdev_err(netdev, "Possible Solution: Update to the latest NVM image.\n"); netdev_err(netdev, "Port Number: %d.\n", internal_data1); break; + case ICE_AQC_HEALTH_STATUS_ERR_PHY_FW_LOAD: + netdev_err(netdev, "Failed to load the firmware image in the external PHY.\n"); + netdev_err(netdev, "Possible Solution: Update to the latest NVM image.\n"); + netdev_err(netdev, "Port Number: %d.\n", internal_data1); + break; default: break; } @@ -1911,7 +2021,6 @@ static void ice_process_health_status_event(struct ice_pf *pf, } } - enum ice_aq_task_state { ICE_AQ_TASK_WAITING = 0, ICE_AQ_TASK_COMPLETE, @@ -2163,15 +2272,15 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) return 0; do { - enum ice_status ret; u16 opcode; + int ret; ret = ice_clean_rq_elem(hw, cq, &event, &pending); - if (ret == ICE_ERR_AQ_NO_WORK) + if (ret == -EALREADY) break; if (ret) { - dev_err(dev, "%s Receive Queue event error %s\n", qtype, - ice_stat_str(ret)); + dev_err(dev, "%s Receive Queue event error %d\n", qtype, + ret); break; } @@ -2201,6 +2310,11 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) case ice_aqc_opc_get_health_status: ice_process_health_status_event(pf, &event); break; +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) + case ice_aqc_opc_event_cgu_err: + ice_ptp_process_cgu_err(&pf->hw, &event); + break; +#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ default: dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n", qtype, opcode); @@ -2395,7 +2509,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - unsigned int i; + struct ice_vf *vf; + unsigned int bkt; u32 reg; if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) { @@ -2421,6 +2536,10 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", event, queue, pf_num, vf_num); +#ifdef HAVE_DEVLINK_HEALTH + ice_devlink_report_mdd_event(pf, ICE_MDD_SRC_TX_PQM, pf_num, + vf_num, event, queue); +#endif /* HAVE_DEVLINK_HEALTH */ wr32(hw, GL_MDET_TX_PQM, 0xffffffff); } @@ -2438,6 +2557,10 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", event, queue, pf_num, vf_num); +#ifdef HAVE_DEVLINK_HEALTH + ice_devlink_report_mdd_event(pf, ICE_MDD_SRC_TX_TCLAN, pf_num, + vf_num, event, queue); +#endif /* HAVE_DEVLINK_HEALTH */ wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); } @@ -2455,6 +2578,10 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (netif_msg_rx_err(pf)) dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", event, queue, pf_num, vf_num); +#ifdef HAVE_DEVLINK_HEALTH + ice_devlink_report_mdd_event(pf, ICE_MDD_SRC_RX, pf_num, + vf_num, event, queue); +#endif /* HAVE_DEVLINK_HEALTH */ wr32(hw, GL_MDET_RX, 0xffffffff); } @@ -2483,47 +2610,46 @@ static void ice_handle_mdd_event(struct ice_pf *pf) /* Check to see if one of the VFs caused an MDD event, and then * increment counters and set print pending */ - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; - - reg = rd32(hw, VP_MDET_TX_PQM(i)); + mutex_lock(&pf->vfs.table_lock); + ice_for_each_vf(pf, bkt, vf) { + reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id)); if (reg & VP_MDET_TX_PQM_VALID_M) { - wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); + wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF); vf->mdd_tx_events.count++; set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n", - i); + vf->vf_id); } - reg = rd32(hw, VP_MDET_TX_TCLAN(i)); + reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id)); if (reg & VP_MDET_TX_TCLAN_VALID_M) { - wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); + wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF); vf->mdd_tx_events.count++; set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n", - i); + vf->vf_id); } - reg = rd32(hw, VP_MDET_TX_TDPU(i)); + reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id)); if (reg & VP_MDET_TX_TDPU_VALID_M) { - wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); + wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF); vf->mdd_tx_events.count++; set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n", - i); + vf->vf_id); } - reg = rd32(hw, VP_MDET_RX(i)); + reg = rd32(hw, VP_MDET_RX(vf->vf_id)); if (reg & VP_MDET_RX_VALID_M) { - wr32(hw, VP_MDET_RX(i), 0xFFFF); + wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF); vf->mdd_rx_events.count++; set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); if (netif_msg_rx_err(pf)) dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n", - i); + vf->vf_id); /* Since the queue is disabled on VF Rx MDD events, the * PF can be configured to reset the VF through ethtool @@ -2534,10 +2660,11 @@ static void ice_handle_mdd_event(struct ice_pf *pf) * reset, so print the event prior to reset. */ ice_print_vf_rx_mdd_event(vf); - ice_reset_vf(&pf->vf[i], false); + ice_reset_vf(vf, ICE_VF_RESET_LOCK); } } } + mutex_unlock(&pf->vfs.table_lock); ice_print_vfs_mdd_events(pf); } @@ -2628,19 +2755,16 @@ static int ice_init_nvm_phy_type(struct ice_port_info *pi) { struct ice_aqc_get_phy_caps_data *pcaps; struct ice_pf *pf = pi->hw->back; - enum ice_status status; - int err = 0; + int err; pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); if (!pcaps) return -ENOMEM; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps, - NULL); - - if (status) { + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, + pcaps, NULL); + if (err) { dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); - err = -EIO; goto out; } @@ -2739,8 +2863,7 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi) struct ice_aqc_get_phy_caps_data *pcaps; struct ice_phy_info *phy = &pi->phy; struct ice_pf *pf = pi->hw->back; - enum ice_status status; - int err = 0; + int err; if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) return -EIO; @@ -2750,14 +2873,13 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi) return -ENOMEM; if (ice_fw_supports_report_dflt_cfg(pi->hw)) - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, - pcaps, NULL); + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, + pcaps, NULL); else - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, - pcaps, NULL); - if (status) { + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, + pcaps, NULL); + if (err) { dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); - err = -EIO; goto err_out; } @@ -2811,8 +2933,7 @@ static int ice_configure_phy(struct ice_vsi *vsi) struct ice_aqc_set_phy_cfg_data *cfg; struct ice_phy_info *phy = &pi->phy; struct ice_pf *pf = vsi->back; - enum ice_status status; - int err = 0; + int err; /* Ensure we have media as we cannot configure a medialess port */ if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) @@ -2832,12 +2953,11 @@ static int ice_configure_phy(struct ice_vsi *vsi) return -ENOMEM; /* Get current PHY config */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, - NULL); - if (status) { - dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n", - vsi->vsi_num, ice_stat_str(status)); - err = -EIO; + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, + NULL); + if (err) { + dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n", + vsi->vsi_num, err); goto done; } @@ -2851,15 +2971,14 @@ static int ice_configure_phy(struct ice_vsi *vsi) /* Use PHY topology as baseline for configuration */ memset(pcaps, 0, sizeof(*pcaps)); if (ice_fw_supports_report_dflt_cfg(pi->hw)) - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, - pcaps, NULL); + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, + pcaps, NULL); else - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, - pcaps, NULL); - if (status) { - dev_err(dev, "Failed to get PHY caps, VSI %d error %s\n", - vsi->vsi_num, ice_stat_str(status)); - err = -EIO; + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, + pcaps, NULL); + if (err) { + dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n", + vsi->vsi_num, err); goto done; } @@ -2896,8 +3015,12 @@ static int ice_configure_phy(struct ice_vsi *vsi) /* FEC */ ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req); - /* Can't provide what was requested; use PHY capabilities */ - if (cfg->link_fec_opt != + /* Can't provide what was requested; use PHY capabilities. + * The user can force FEC disabled Auto mode via ethtool private + * flag allow-no-fec-modules-in-auto, so allow ICE_FEC_DIS_AUTO. + */ + if (phy->curr_user_fec_req != ICE_FEC_DIS_AUTO && + cfg->link_fec_opt != (cfg->link_fec_opt & pcaps->link_fec_options)) { cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; cfg->link_fec_opt = pcaps->link_fec_options; @@ -2911,12 +3034,10 @@ static int ice_configure_phy(struct ice_vsi *vsi) /* Enable link and link update */ cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; - status = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); - if (status) { - dev_err(dev, "Failed to set phy config, VSI %d error %s\n", - vsi->vsi_num, ice_stat_str(status)); - err = -EIO; - } + err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); + if (err) + dev_err(dev, "Failed to set phy config, VSI %d error %d\n", + vsi->vsi_num, err); kfree(cfg); done: @@ -2951,7 +3072,7 @@ static void ice_check_media_subtask(struct ice_pf *pf) if (err) return; - ice_check_module_power(pf, pi->phy.link_info.link_cfg_err); + ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) @@ -2974,7 +3095,6 @@ static void ice_check_media_subtask(struct ice_pf *pf) } } - /** * ice_find_tnl - return the matching tunnel entry if it exists * @pf: pointer to PF struct @@ -3027,7 +3147,7 @@ static void ice_handle_tunnel(struct ice_pf *pf) struct ice_tnl_entry *entry, *tmp; LIST_HEAD(tmp_del_list); LIST_HEAD(tmp_add_list); - enum ice_status status; + int status; if (list_empty(&pf->tnl_list)) return; @@ -3061,12 +3181,12 @@ static void ice_handle_tunnel(struct ice_pf *pf) struct ice_tnl_entry *tnl; status = ice_create_tunnel(&pf->hw, entry->type, entry->port); - if (status == ICE_ERR_OUT_OF_RANGE) + if (status == -EIO) dev_dbg(dev, "Max tunneled UDP ports reached, port %d not added\n", entry->port); else if (status) - dev_err(dev, "Error adding UDP tunnel - %s for tnl port %u\n", - ice_stat_str(status), entry->port); + dev_err(dev, "Error adding UDP tunnel - %d for tnl port %u\n", + status, entry->port); /* delete entry from original tunnel list if failed to add, * otherwise set state to ACTIVE @@ -3113,6 +3233,35 @@ static void ice_service_task(struct work_struct *work) ice_service_task_complete(pf); return; } + if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) { + struct iidc_event *event; + + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (event) { + set_bit(IIDC_EVENT_CRIT_ERR, event->type); + /* report the entire OICR value to AUX driver */ + pf->oicr_err_reg = event->info.reg; + ice_send_event_to_auxs(pf, event); + kfree(event); + } + } + if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) { + struct iidc_event *event; + + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (event) { + set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type); + ice_send_event_to_auxs(pf, event); + kfree(event); + } + } + + if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags)) + ice_unplug_aux_devs(pf); + + if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) { + ice_plug_aux_devs(pf); + } /* If we are in FW recovery mode, need to exit service tasks here */ if (test_bit(ICE_RECOVERY_MODE, pf->state)) @@ -3129,17 +3278,12 @@ static void ice_service_task(struct work_struct *work) ice_service_task_complete(pf); return; } - - /* Invoke remaining initialization of peer_objs */ - ice_for_each_peer(pf, NULL, ice_finish_init_peer_obj); - ice_chnl_subtask_handle_interrupt(pf); ice_channel_sync_global_cntrs(pf); ice_process_vflr_event(pf); ice_sync_udp_fltr_subtask(pf); ice_clean_mailboxq_subtask(pf); ice_clean_sbq_subtask(pf); - ice_clean_ptp_subtask(pf); ice_sync_arfs_fltrs(pf); ice_flush_fdir_ctx(pf); @@ -3264,6 +3408,79 @@ static int ice_vsi_ena_irq(struct ice_vsi *vsi) return 0; } +/** + * ice_vsi_req_single_irq_msix - get a single MSI-X vector from the OS for VSI + * @vsi: the VSI being configured + * @basename: name for the vector + * @vector_id: index of the vector in VSI + */ +static +int ice_vsi_req_single_irq_msix(struct ice_vsi *vsi, char *basename, + u16 vector_id) +{ + struct ice_q_vector *q_vector = vsi->q_vectors[vector_id]; + struct ice_pf *pf = vsi->back; + int base = vsi->base_vector; + u16 rx_irq_idx, tx_irq_idx; + struct device *dev; + int irq_num, err; + + dev = ice_pf_to_dev(pf); + irq_num = ice_get_irq_num(pf, base + vector_id); + + ice_vsi_get_q_vector_q_base(vsi, vector_id, &tx_irq_idx, &rx_irq_idx); + + if (q_vector->tx.ring && q_vector->rx.ring) { + if (q_vector->num_ring_rx == 1) { + snprintf(q_vector->name, sizeof(q_vector->name), + "%s-%s-%u", basename, "TxRx", + rx_irq_idx); + } else { + u32 num_rx = q_vector->num_ring_rx; + + snprintf(q_vector->name, sizeof(q_vector->name), + "%s-%s-%u-%u", basename, "TxRx", + rx_irq_idx, rx_irq_idx + num_rx - 1); + } + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name), + "%s-%s-%u", basename, "rx", rx_irq_idx); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name), + "%s-%s-%u", basename, "tx", tx_irq_idx); + } else { + /* skip this unused q_vector */ + return 0; + } + if (vsi->type == ICE_VSI_CTRL && vsi->vf) + err = devm_request_irq(dev, irq_num, vsi->irq_handler, + IRQF_SHARED, q_vector->name, + q_vector); + else + err = devm_request_irq(dev, irq_num, vsi->irq_handler, + 0, q_vector->name, q_vector); + if (err) { + netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", + err); + return err; + } + + /* register for affinity change notifications */ + if (!IS_ENABLED(CONFIG_RFS_ACCEL)) { + struct irq_affinity_notify *affinity_notify; + + affinity_notify = &q_vector->affinity_notify; + affinity_notify->notify = ice_irq_affinity_notify; + affinity_notify->release = ice_irq_affinity_release; + irq_set_affinity_notifier(irq_num, affinity_notify); + } + + /* assign the mask for this irq */ + irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); + + return 0; +} + /** * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI * @vsi: the VSI being configured @@ -3271,60 +3488,24 @@ static int ice_vsi_ena_irq(struct ice_vsi *vsi) */ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) { - int q_vectors = vsi->num_q_vectors; struct ice_pf *pf = vsi->back; int base = vsi->base_vector; struct device *dev; - int rx_int_idx = 0; - int tx_int_idx = 0; int vector, err; int irq_num; dev = ice_pf_to_dev(pf); - for (vector = 0; vector < q_vectors; vector++) { - struct ice_q_vector *q_vector = vsi->q_vectors[vector]; - - irq_num = pf->msix_entries[base + vector].vector; - - if (q_vector->tx.ring && q_vector->rx.ring) { - snprintf(q_vector->name, sizeof(q_vector->name) - 1, - "%s-%s-%d", basename, "TxRx", rx_int_idx++); - tx_int_idx++; - } else if (q_vector->rx.ring) { - snprintf(q_vector->name, sizeof(q_vector->name) - 1, - "%s-%s-%d", basename, "rx", rx_int_idx++); - } else if (q_vector->tx.ring) { - snprintf(q_vector->name, sizeof(q_vector->name) - 1, - "%s-%s-%d", basename, "tx", tx_int_idx++); - } else { - /* skip this unused q_vector */ - continue; - } - if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) - err = devm_request_irq(dev, irq_num, vsi->irq_handler, - IRQF_SHARED, q_vector->name, - q_vector); - else - err = devm_request_irq(dev, irq_num, vsi->irq_handler, - 0, q_vector->name, q_vector); - if (err) { - netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", - err); + ice_for_each_q_vector(vsi, vector) { + err = ice_vsi_req_single_irq_msix(vsi, basename, vector); + if (err) goto free_q_irqs; - } + } - /* register for affinity change notifications */ - if (!IS_ENABLED(CONFIG_RFS_ACCEL)) { - struct irq_affinity_notify *affinity_notify; - - affinity_notify = &q_vector->affinity_notify; - affinity_notify->notify = ice_irq_affinity_notify; - affinity_notify->release = ice_irq_affinity_release; - irq_set_affinity_notifier(irq_num, affinity_notify); - } - - /* assign the mask for this irq */ - irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); + err = ice_set_cpu_rx_rmap(vsi); + if (err) { + netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n", + vsi->vsi_num, ERR_PTR(err)); + goto free_q_irqs; } vsi->irqs_ready = true; @@ -3333,7 +3514,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) free_q_irqs: while (vector) { vector--; - irq_num = pf->msix_entries[base + vector].vector; + irq_num = ice_get_irq_num(pf, base + vector); if (!IS_ENABLED(CONFIG_RFS_ACCEL)) irq_set_affinity_notifier(irq_num, NULL); irq_set_affinity_hint(irq_num, NULL); @@ -3374,7 +3555,7 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) goto free_xdp_rings; ice_set_ring_xdp(xdp_ring); #ifdef HAVE_AF_XDP_ZC_SUPPORT - xdp_ring->xsk_pool = ice_xsk_umem(xdp_ring); + xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring); #endif /* HAVE_AF_XDP_ZC_SUPPORT */ } @@ -3398,14 +3579,13 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) int i; old_prog = xchg(&vsi->xdp_prog, prog); - if (old_prog) + if (old_prog && old_prog != prog) bpf_prog_put(old_prog); ice_for_each_rxq(vsi, i) WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); } -#ifdef HAVE_XDP_SUPPORT #ifdef ICE_ADD_PROBES /** * ice_clear_xdp_stats - clear all Rx XDP statistics on VSI @@ -3423,7 +3603,120 @@ static void ice_clear_xdp_stats(struct ice_vsi *vsi) } } #endif /* ICE_ADD_PROBES */ -#endif /* HAVE_XDP_SUPPORT */ + +/** + * ice_ch_vsi_map_xdp_rings_to_vecs - map channel VSIs q_vectors to + * xdp rings based on qps_per_poller settings. + * @vsi: VSI to operate on + */ +static void ice_ch_vsi_map_xdp_rings_to_vecs(struct ice_vsi *vsi) +{ + struct ice_channel *ch; + u8 qps_per_poller; + + list_for_each_entry(ch, &vsi->ch_list, list) { + struct ice_q_vector *ch_q_vector = NULL; + struct ice_vsi *ch_vsi = ch->ch_vsi; + u16 num_qps = ch_vsi->num_txq; + int i; + + ch_vsi->num_xdp_txq = num_qps; + + qps_per_poller = ch->qps_per_poller; + if (qps_per_poller <= 1) + continue; + + for (i = 0; i < num_qps; i++) { + struct ice_ring *xdp_ring; + + xdp_ring = vsi->xdp_rings[ch->base_q + i]; + if (!xdp_ring || !xdp_ring->q_vector) + continue; + + if (!ch_q_vector || (i % qps_per_poller == 0)) { + ch_q_vector = xdp_ring->q_vector; + continue; + } + + xdp_ring->q_vector = ch_q_vector; + xdp_ring->next = ch_q_vector->tx.ring; + ch_q_vector->tx.ring = xdp_ring; + } + } +} + +/** + * ice_ch_vsi_map_rings_to_vecs - map channel VSIs q_vectors to + * tx/rx rings based on the qps_per_poller settings. + * @vsi: VSI to operate on + */ +static void ice_ch_vsi_map_rings_to_vecs(struct ice_vsi *vsi) +{ + struct ice_channel *ch; + u8 qps_per_poller; + + list_for_each_entry(ch, &vsi->ch_list, list) { + struct ice_q_vector *ch_q_vector = NULL; + struct ice_vsi *ch_vsi; + u16 num_qps, base_q; + int i; + + ch_vsi = ch->ch_vsi; + num_qps = ch_vsi->num_rxq; + + qps_per_poller = ch->qps_per_poller; + if (qps_per_poller <= 1) + continue; + + base_q = ch->base_q; + + for (i = 0; i < num_qps; i++) { + struct ice_ring *tx_ring, *rx_ring; + struct ice_q_vector *q_vector; + + tx_ring = vsi->tx_rings[base_q + i]; + rx_ring = vsi->rx_rings[base_q + i]; + if (!tx_ring || !tx_ring->q_vector || !rx_ring) + continue; + + if (!ch_q_vector || (i % qps_per_poller == 0)) { + ch_q_vector = tx_ring->q_vector; + continue; + } + + q_vector = tx_ring->q_vector; + q_vector->num_ring_tx = 0; + q_vector->num_ring_rx = 0; + q_vector->tx.ring = NULL; + q_vector->rx.ring = NULL; + + tx_ring->q_vector = ch_q_vector; + tx_ring->next = ch_q_vector->tx.ring; + ch_q_vector->tx.ring = tx_ring; + ch_q_vector->num_ring_tx++; + + rx_ring->q_vector = ch_q_vector; + rx_ring->next = ch_q_vector->rx.ring; + ch_q_vector->rx.ring = rx_ring; + ch_q_vector->num_ring_rx++; + } + } +} + +/** + * ice_ch_vsi_update_ring_vecs - update channel VSIs q_vectors to + * ring mappings. + * @vsi: VSI to operate on + */ +void ice_ch_vsi_update_ring_vecs(struct ice_vsi *vsi) +{ + rtnl_lock(); + ice_dis_vsi(vsi, true); + ice_vsi_map_rings_to_vectors(vsi); + ice_ch_vsi_map_rings_to_vecs(vsi); + ice_ena_vsi(vsi, true); + rtnl_unlock(); +} /** * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP @@ -3447,9 +3740,9 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) .vsi_map_offset = vsi->alloc_txq, .mapping_mode = ICE_VSI_MAP_CONTIG }; - enum ice_status status; struct device *dev; int i, v_idx; + int status; dev = ice_pf_to_dev(pf); vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, @@ -3483,6 +3776,9 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) xdp_rings_rem -= xdp_rings_per_v; } + if (vsi->all_numtc) + ice_ch_vsi_map_xdp_rings_to_vecs(vsi); + /* omit the scheduler update if in reset path; XDP queues will be * taken into account at the end of ice_vsi_rebuild, where * ice_cfg_vsi_lan is being called @@ -3499,8 +3795,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); if (status) { - dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n", - ice_stat_str(status)); + dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n", + status); goto clear_xdp_rings; } ice_vsi_assign_bpf_prog(vsi, prog); @@ -3539,7 +3835,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi) int i, v_idx; /* q_vectors are freed in reset path so there's no point in detaching - * rings; in case of rebuild being triggered not from reset reset bits + * rings; in case of rebuild being triggered not from reset bits * in pf->state won't be set, so additionally check first q_vector * against NULL */ @@ -3646,11 +3942,9 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, xdp_ring_err = ice_prepare_xdp_rings(vsi, prog); if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); -#ifdef HAVE_XDP_SUPPORT #ifdef ICE_ADD_PROBES ice_clear_xdp_stats(vsi); #endif /* ICE_ADD_PROBES */ -#endif /* HAVE_XDP_SUPPORT */ } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { xdp_ring_err = ice_destroy_xdp_rings(vsi); if (xdp_ring_err) @@ -3675,16 +3969,41 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, return (ret || xdp_ring_err) ? -ENOMEM : 0; } +/** + * ice_xdp_safe_mode - XDP handler for safe mode + * @dev: netdevice + * @xdp: XDP command + */ +static int ice_xdp_safe_mode(struct net_device __always_unused *dev, + struct netdev_bpf *xdp) +{ + NL_SET_ERR_MSG_MOD(xdp->extack, + "Please provide working DDP firmware package in order to use XDP. Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst"); + +#ifdef HAVE_XDP_QUERY_PROG + /* In safe mode, when running kernel that still has XDP_QUERY command + * implemented, driver has to return *success* even if XDP is not + * supported in safe mode. This is because with ndo_bpf implemented + * (to present proper extack message) driver can be occasionally + * called with XDP_QUERY action and would need to return correct values. + */ + if (xdp->command == XDP_QUERY_PROG) { +#ifndef NO_NETDEV_BPF_PROG_ATTACHED + xdp->prog_attached = false; +#endif /* !NO_NETDEV_BPF_PROG_ATTACHED */ + xdp->prog_id = 0; + return 0; + } +#endif /* HAVE_XDP_QUERY_PROG */ + return -EOPNOTSUPP; +} + /** * ice_xdp - implements XDP handler * @dev: netdevice * @xdp: XDP command */ -#ifdef HAVE_NDO_BPF static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) -#else -static int ice_xdp(struct net_device *dev, struct netdev_xdp *xdp) -#endif { struct ice_netdev_priv *np = netdev_priv(dev); struct ice_vsi *vsi = np->vsi; @@ -3708,7 +4027,7 @@ static int ice_xdp(struct net_device *dev, struct netdev_xdp *xdp) #ifdef HAVE_AF_XDP_ZC_SUPPORT case XDP_SETUP_XSK_POOL: #ifdef HAVE_NETDEV_BPF_XSK_POOL - return ice_xsk_umem_setup(vsi, xdp->xsk.pool, + return ice_xsk_pool_setup(vsi, xdp->xsk.pool, #else return ice_xsk_umem_setup(vsi, xdp->xsk.umem, #endif /* HAVE_NETDEV_BPF_XSK_POOL */ @@ -3855,7 +4174,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) if (oicr & PFINT_OICR_TSYN_TX_M) { ena_mask &= ~PFINT_OICR_TSYN_TX_M; - set_bit(ICE_PTP_TX_TS_READY, pf->state); + if (!hw->reset_ongoing) + ret = IRQ_WAKE_THREAD; } if (oicr & PFINT_OICR_TSYN_EVNT_M) { @@ -3867,23 +4187,14 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) GLTSYN_STAT_EVENT1_M | GLTSYN_STAT_EVENT2_M); ena_mask &= ~PFINT_OICR_TSYN_EVNT_M; - set_bit(ICE_PTP_EXT_TS_READY, pf->state); + kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work); } if (oicr & (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)) { - struct ice_event *event; - + pf->oicr_err_reg |= oicr; + set_bit(ICE_AUX_ERR_PENDING, pf->state); ena_mask &= ~PFINT_OICR_HMC_ERR_M; ena_mask &= ~PFINT_OICR_PE_CRITERR_M; ena_mask &= ~PFINT_OICR_PE_PUSH_M; - event = kzalloc(sizeof(*event), GFP_ATOMIC); - if (event) { - set_bit(ICE_EVENT_CRIT_ERR, event->type); - event->reporter = NULL; - /* report the entire OICR value to peer */ - event->info.reg = oicr; - ice_for_each_peer(pf, event, ice_peer_check_for_reg); - kfree(event); - } } /* Report any remaining unexpected interrupts */ @@ -3899,7 +4210,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) ice_service_task_schedule(pf); } } - ret = IRQ_HANDLED; + if (!ret) + ret = IRQ_HANDLED; ice_service_task_schedule(pf); ice_irq_dynamic_ena(hw, NULL, NULL); @@ -3907,6 +4219,27 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) return ret; } +/** + * ice_misc_intr_thread_fn - misc interrupt thread function + * @irq: interrupt number + * @data: pointer to a q_vector + */ +static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data) +{ + struct ice_pf *pf = (struct ice_pf *)data; + irqreturn_t ret = IRQ_HANDLED; + bool irq_handled; + + if (ice_is_reset_in_progress(pf->state)) + return ret; + + irq_handled = ice_ptp_process_ts(pf); + if (!irq_handled) + ret = IRQ_WAKE_THREAD; + + return ret; +} + /** * ice_dis_ctrlq_interrupts - disable control queue interrupts * @hw: pointer to HW structure @@ -3937,6 +4270,7 @@ static void ice_dis_ctrlq_interrupts(struct ice_hw *hw) */ static void ice_free_irq_msix_misc(struct ice_pf *pf) { + int irq_num = ice_get_irq_num(pf, pf->oicr_idx); struct ice_hw *hw = &pf->hw; ice_dis_ctrlq_interrupts(hw); @@ -3945,11 +4279,8 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf) wr32(hw, PFINT_OICR_ENA, 0); ice_flush(hw); - if (pf->msix_entries) { - synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); - devm_free_irq(ice_pf_to_dev(pf), - pf->msix_entries[pf->oicr_idx].vector, pf); - } + synchronize_irq(irq_num); + devm_free_irq(ice_pf_to_dev(pf), irq_num, pf); pf->num_avail_sw_msix += 1; ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID); @@ -4019,11 +4350,12 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) pf->num_avail_sw_msix -= 1; pf->oicr_idx = (u16)oicr_idx; - err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector, - ice_misc_intr, 0, pf->int_name, pf); + err = devm_request_threaded_irq(dev, ice_get_irq_num(pf, pf->oicr_idx), + ice_misc_intr, ice_misc_intr_thread_fn, + 0, pf->int_name, pf); if (err) { - dev_err(dev, "devm_request_irq for %s failed: %d\n", - pf->int_name, err); + ice_dev_err_errno(dev, err, "devm_request_threaded_irq for %s failed", + pf->int_name); ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); pf->num_avail_sw_msix += 1; return err; @@ -4076,6 +4408,9 @@ static void ice_set_ops(struct net_device *netdev) return; } netdev->netdev_ops = &ice_netdev_ops; +#ifdef HAVE_UDP_TUNNEL_NIC_INFO + netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic; +#endif ice_set_ethtool_ops(netdev); } @@ -4175,16 +4510,20 @@ static void ice_set_netdev_features(struct net_device *netdev) * type turns on the other has to be turned off. This is enforced by the * ice_fix_features() ndo callback. */ - if (is_dvm_ena) { + if (is_dvm_ena) netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; - } +#ifdef HAVE_NDO_DFWD_OPS -#ifdef HAVE_NETDEV_SB_DEV /* Enable macvlan offloads */ if (test_bit(ICE_FLAG_VMDQ_ENA, pf->flags)) netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ + + /* Leave CRC / FCS stripping enabled by default, but allow the value to + * be changed at runtime + */ + netdev->hw_features |= NETIF_F_RXFCS; } /** @@ -4195,21 +4534,27 @@ static void ice_set_netdev_features(struct net_device *netdev) */ static int ice_cfg_netdev(struct ice_vsi *vsi) { + int alloc_txq = vsi->alloc_txq; + int alloc_rxq = vsi->alloc_rxq; struct ice_netdev_priv *np; struct net_device *netdev; u8 mac_addr[ETH_ALEN]; -#ifdef HAVE_NETDEV_SB_DEV + if (vsi->type == ICE_VSI_PF) { + alloc_txq = vsi->back->max_adq_qps; + alloc_rxq = vsi->back->max_adq_qps; + } + +#ifdef HAVE_NDO_DFWD_OPS /* Inform Kernel beforehand about max number of MACVLAN queues * supported. */ netdev = alloc_etherdev_mqs(sizeof(*np), - ICE_MAX_MACVLANS + vsi->alloc_txq, - ICE_MAX_MACVLANS + vsi->alloc_rxq); -#else /* !HAVE_NETDEV_SB_DEV */ - netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, - vsi->alloc_rxq); -#endif /* !HAVE_NETDEV_SB_DEV */ + ICE_MAX_MACVLANS + alloc_txq, + ICE_MAX_MACVLANS + alloc_rxq); +#else /* !HAVE_NDO_DFWD_OPS */ + netdev = alloc_etherdev_mqs(sizeof(*np), alloc_txq, alloc_rxq); +#endif /* !HAVE_NDO_DFWD_OPS */ if (!netdev) return -ENOMEM; @@ -4225,7 +4570,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) if (vsi->type == ICE_VSI_PF) { SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back)); ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); - ether_addr_copy(netdev->dev_addr, mac_addr); + eth_hw_addr_set(netdev, mac_addr); ether_addr_copy(netdev->perm_addr, mac_addr); } @@ -4275,7 +4620,7 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) static struct ice_vsi * ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) { - return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID, NULL, 0); + return ice_vsi_setup(pf, pi, ICE_VSI_PF, NULL, NULL, 0); } #ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO @@ -4283,11 +4628,11 @@ static struct ice_vsi * ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, struct ice_channel *ch) { - return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, ICE_INVAL_VFID, ch, 0); + return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, NULL, ch, 0); } #endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS /** * ice_macvlan_vsi_setup - Set up a MACVLAN VSI * @pf: board private structure @@ -4299,10 +4644,10 @@ ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, static struct ice_vsi * ice_macvlan_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) { - return ice_vsi_setup(pf, pi, ICE_VSI_OFFLOAD_MACVLAN, ICE_INVAL_VFID, + return ice_vsi_setup(pf, pi, ICE_VSI_OFFLOAD_MACVLAN, NULL, NULL, 0); } -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ /** * ice_ctrl_vsi_setup - Set up a control VSI @@ -4315,7 +4660,7 @@ ice_macvlan_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) static struct ice_vsi * ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) { - return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID, NULL, 0); + return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, NULL, NULL, 0); } /** @@ -4329,7 +4674,7 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) struct ice_vsi * ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) { - return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID, NULL, 0); + return ice_vsi_setup(pf, pi, ICE_VSI_LB, NULL, NULL, 0); } /** @@ -4352,6 +4697,20 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) if (!vid) return 0; + while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) + usleep_range(1000, 2000); + + /* Add multicast promisc rule for the VLAN ID to be added if + * all-multicast is currently enabled. + */ + if (vsi->current_netdev_flags & IFF_ALLMULTI) { + ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_VLAN_PROMISC_BITS, + vid, vsi->port_info->lport); + if (ret) + goto finish; + } + vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged @@ -4359,8 +4718,25 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) */ vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0, ICE_FWD_TO_VSI); ret = vlan_ops->add_vlan(vsi, &vlan); - if (!ret) - set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); + if (ret) + goto finish; + + /* If all-multicast is currently enabled and this VLAN ID is only one + * besides VLAN-0 we have to update look-up type of multicast promisc + * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN. + */ + if ((vsi->current_netdev_flags & IFF_ALLMULTI) && + ice_vsi_num_non_zero_vlans(vsi) == 1) { + ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_PROMISC_BITS, 0, + vsi->port_info->lport); + ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_VLAN_PROMISC_BITS, 0, + vsi->port_info->lport); + } + +finish: + clear_bit(ICE_CFG_BUSY, vsi->state); return ret; } @@ -4393,10 +4769,35 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0, ICE_FWD_TO_VSI); ret = vlan_ops->del_vlan(vsi, &vlan); if (ret) - return ret; + goto finish; - set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); - return 0; + /* Remove multicast promisc rule for the removed VLAN ID if + * all-multicast is enabled. + */ + if (vsi->current_netdev_flags & IFF_ALLMULTI) + ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_VLAN_PROMISC_BITS, vid, + vsi->port_info->lport); + + if (!ice_vsi_has_non_zero_vlans(vsi)) { + /* Update look-up type of multicast promisc rule for VLAN 0 + * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when + * all-multicast is enabled and VLAN 0 is the only VLAN rule. + */ + if (vsi->current_netdev_flags & IFF_ALLMULTI) { + ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_VLAN_PROMISC_BITS, + 0, vsi->port_info->lport); + ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_PROMISC_BITS, 0, + vsi->port_info->lport); + } + } + +finish: + clear_bit(ICE_CFG_BUSY, vsi->state); + + return ret; } /** @@ -4434,7 +4835,6 @@ static void ice_pf_reset_stats(struct ice_pf *pf) #endif } - #ifdef HAVE_TC_INDIR_BLOCK #ifdef HAVE_FLOW_BLOCK_API /** @@ -4510,6 +4910,26 @@ static int ice_tc_indir_block_register(struct ice_vsi *vsi) #endif /* HAVE_TC_INDIR_BLOCK */ +/** + * ice_setup_default_rules - Setup default ICE rules + * @vsi: VSI struct to set up the default rules for + * + * Returns 0 on success, negative value on failure + */ +static int ice_setup_default_rules(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + struct device *dev; + + dev = ice_pf_to_dev(pf); + + if (ice_init_mac_fltr(pf)) { + dev_err(dev, "error initializing MAC filter\n"); + return -EINVAL; + } + return 0; +} + /** * ice_setup_pf_sw - Setup the HW switch on startup or after reset * @pf: board private structure @@ -4518,7 +4938,6 @@ static int ice_tc_indir_block_register(struct ice_vsi *vsi) */ static int ice_setup_pf_sw(struct ice_pf *pf) { - struct device *dev = ice_pf_to_dev(pf); bool dvm = ice_is_dvm_ena(&pf->hw); struct ice_vsi *vsi; int status = 0; @@ -4526,7 +4945,6 @@ static int ice_setup_pf_sw(struct ice_pf *pf) if (ice_is_reset_in_progress(pf->state)) return -EBUSY; - status = ice_aq_set_port_params(pf->hw.port_info, 0, false, false, dvm, NULL); if (status) @@ -4540,10 +4958,8 @@ static int ice_setup_pf_sw(struct ice_pf *pf) INIT_LIST_HEAD(&vsi->ch_list); status = ice_cfg_netdev(vsi); - if (status) { - status = -ENODEV; + if (status) goto unroll_vsi_setup; - } /* netdev has to be configured before setting frame size */ ice_vsi_cfg_frame_size(vsi); @@ -4552,7 +4968,7 @@ static int ice_setup_pf_sw(struct ice_pf *pf) /* init indirect block notifications */ status = ice_tc_indir_block_register(vsi); if (status) { - dev_err(dev, "Failed to register netdev notifier\n"); + dev_err(ice_pf_to_dev(pf), "Failed to register netdev notifier\n"); goto unroll_cfg_netdev; } #endif /* HAVE_TC_INDIR_BLOCK */ @@ -4566,18 +4982,11 @@ static int ice_setup_pf_sw(struct ice_pf *pf) */ ice_napi_add(vsi); - status = ice_init_mac_fltr(pf); + status = ice_setup_default_rules(vsi); + if (status) goto unroll_napi_add; - status = ice_set_cpu_rx_rmap(vsi); - if (status) { - dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n", - vsi->vsi_num, status); - status = -EINVAL; - goto unroll_napi_add; - } - return status; unroll_napi_add: @@ -4644,9 +5053,12 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf) static void ice_deinit_pf(struct ice_pf *pf) { ice_service_task_stop(pf); + mutex_destroy(&pf->lag_mutex); + mutex_destroy(&pf->adev_mutex); mutex_destroy(&pf->sw_mutex); mutex_destroy(&pf->tc_mutex); mutex_destroy(&pf->avail_q_mutex); + mutex_destroy(&pf->vfs.table_lock); if (pf->avail_txqs) { bitmap_free(pf->avail_txqs); @@ -4659,6 +5071,25 @@ static void ice_deinit_pf(struct ice_pf *pf) } } +#define ICE_PF_PCI_BAR_0 0 +/** + * ice_set_siov_cap - enable S-IOV in PF flags + * @pf: PF struct + */ +static void ice_set_siov_cap(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + u64 size; + int pos; + + pos = pci_find_ext_capability(pf->pdev, PCI_EXT_CAP_ID_PASID); + size = pci_resource_len(pf->pdev, ICE_PF_PCI_BAR_0); + if (pos && size >= SZ_128M) { + set_bit(ICE_FLAG_SIOV_CAPABLE, pf->flags); + dev_dbg(dev, "Device supports Scalable IOV\n"); + } +} + /** * ice_set_pf_caps - set PFs capability flags * @pf: pointer to the PF instance @@ -4671,11 +5102,12 @@ static void ice_set_pf_caps(struct ice_pf *pf) if (func_caps->common_cap.vmdq) set_bit(ICE_FLAG_VMDQ_ENA, pf->flags); clear_bit(ICE_FLAG_IWARP_ENA, pf->flags); - clear_bit(ICE_FLAG_PEER_ENA, pf->flags); - if (func_caps->common_cap.iwarp && IS_ENABLED(CONFIG_MFD_CORE)) { + clear_bit(ICE_FLAG_AUX_ENA, pf->flags); + if (func_caps->common_cap.iwarp) { set_bit(ICE_FLAG_IWARP_ENA, pf->flags); - set_bit(ICE_FLAG_PEER_ENA, pf->flags); + set_bit(ICE_FLAG_AUX_ENA, pf->flags); } + set_bit(ICE_FLAG_AUX_ENA, pf->flags); clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); if (func_caps->common_cap.dcb) set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); @@ -4684,9 +5116,10 @@ static void ice_set_pf_caps(struct ice_pf *pf) if (func_caps->common_cap.sr_iov_1_1) { set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); set_bit(ICE_FLAG_ESWITCH_CAPABLE, pf->flags); - pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs, - ICE_MAX_VF_COUNT); + pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs, + ICE_MAX_SRIOV_VFS); } + ice_set_siov_cap(pf); clear_bit(ICE_FLAG_RSS_ENA, pf->flags); if (func_caps->common_cap.rss_table_size) set_bit(ICE_FLAG_RSS_ENA, pf->flags); @@ -4707,9 +5140,9 @@ static void ice_set_pf_caps(struct ice_pf *pf) ice_alloc_fd_shrd_item(&pf->hw, &unused, func_caps->fd_fltr_best_effort); } - clear_bit(ICE_FLAG_PTP_ENA, pf->flags); + clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); if (func_caps->common_cap.ieee_1588) - set_bit(ICE_FLAG_PTP_ENA, pf->flags); + set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); pf->max_pf_txqs = func_caps->common_cap.num_txq; pf->max_pf_rxqs = func_caps->common_cap.num_rxq; @@ -4725,6 +5158,8 @@ static int ice_init_pf(struct ice_pf *pf) mutex_init(&pf->sw_mutex); mutex_init(&pf->tc_mutex); + mutex_init(&pf->adev_mutex); + mutex_init(&pf->lag_mutex); INIT_HLIST_HEAD(&pf->aq_wait_list); spin_lock_init(&pf->aq_wait_lock); @@ -4754,218 +5189,8 @@ static int ice_init_pf(struct ice_pf *pf) spin_lock_init(&pf->tnl_lock); INIT_LIST_HEAD(&pf->tnl_list); - return 0; -} - -static int ice_alloc_msix_entries(struct ice_pf *pf, u16 num_entries) -{ - u16 i; - - pf->msix_entries = devm_kcalloc(ice_pf_to_dev(pf), num_entries, - sizeof(*pf->msix_entries), GFP_KERNEL); - if (!pf->msix_entries) - return -ENOMEM; - - for (i = 0; i < num_entries; i++) - pf->msix_entries[i].entry = i; - - return 0; -} - -static void ice_free_msix_entries(struct ice_pf *pf) -{ - devm_kfree(ice_pf_to_dev(pf), pf->msix_entries); - pf->msix_entries = NULL; -} - -/** - * ice_ena_msix_range - request a range of MSI-X vectors from the OS - * @pf: board private structure - * - * The driver first tries to enable best-case scenario MSI-X vectors. If that - * doesn't succeeed then a fall-back method is employed. - * - * The fall-back logic is described below with each [#] being an attempt at - * enabling a certain number of MSI-X. If any of the steps succeed, then return - * the number of MSI-X enabled from pci_ena_msix_exact(). If any of the attempts - * fail, then goto the next step. - * - * Attempt [0]: Enable the best-case scenario MSI-X vectors. - * - * Attempt [1]: Enable MSI-X vectors with eswitch support disabled - * - * Attempt [2]: Enable MSI-X vectors with MACVLAN support disabled, which - * reduces the request by the MSI-X vectors needed for MACVLAN. - * - * Attempt [3]: Enable MSI-X vectors with the number of pf->num_lan_msix reduced - * by a factor of 2 from the previous attempts (i.e. num_online_cpus() / 2). - * Also, with the number of pf->num_rdma_msix reduced by a factor of ~2 from the - * previous attempts (i.e. num_online_cpus() / 2 + ICE_RDMA_NUM_AEQ_MSIX). - * - * Attempt [4]: Same as attempt [3], except reduce both by a factor of 4. - * - * Attempt [5]: Enable the bare-minimum MSI-X vectors. - * - * Also, if the adjusted_base_msix ever hits the mimimum required for LAN or - * RDMA, then just set the needed MSI-X for that feature to the minimum (similar - * to attempt [5]). - */ -static int ice_ena_msix_range(struct ice_pf *pf) -{ - int err = -ENOSPC, num_cpus, attempt, adjusted_msix_divisor = 1, needed; - struct device *dev = ice_pf_to_dev(pf); - - num_cpus = num_online_cpus(); - -#define ICE_MAX_ENABLE_MSIX_ATTEMPTS 5 - /* make multiple passes at enabling MSI-X vectors in case there aren't - * enough available for the best-case scenario - */ - for (attempt = 0; attempt <= ICE_MAX_ENABLE_MSIX_ATTEMPTS; attempt++) { - int adjusted_base_msix = num_cpus / adjusted_msix_divisor; - - /* attempt to enable minimum MSI-X range */ - if (attempt == ICE_MAX_ENABLE_MSIX_ATTEMPTS) { - needed = ICE_MIN_MSIX; - pf->num_lan_msix = ICE_MIN_LAN_MSIX; - - if (test_bit(ICE_FLAG_IWARP_ENA, pf->flags)) { - needed += ICE_MIN_RDMA_MSIX; - pf->num_rdma_msix = ICE_MIN_RDMA_MSIX; - } - } else { - if (adjusted_base_msix > ICE_MIN_LAN_MSIX) - pf->num_lan_msix = adjusted_base_msix; - else - pf->num_lan_msix = ICE_MIN_LAN_MSIX; - - needed = pf->num_lan_msix + ICE_OICR_MSIX; - - if (attempt == 0 && - test_bit(ICE_FLAG_ESWITCH_CAPABLE, pf->flags)) { - needed += ICE_ESWITCH_MSIX; - } else if (attempt == 1) { - dev_warn(dev, "Not enough MSI-X for eswitch support, disabling feature\n"); - clear_bit(ICE_FLAG_ESWITCH_CAPABLE, pf->flags); - } -#ifdef HAVE_NETDEV_SB_DEV - - /* only reserve MACVLAN MSI-X on the first and second - * attempt - */ - if ((attempt == 0 || attempt == 1) && - test_bit(ICE_FLAG_VMDQ_ENA, pf->flags)) { - needed += ICE_MAX_MACVLANS * ICE_DFLT_VEC_VMDQ_VSI; - } else if (attempt == 2) { - dev_warn(dev, "Not enough MSI-X for hardware MACVLAN support, disabling feature.\n"); - clear_bit(ICE_FLAG_VMDQ_ENA, pf->flags); - } -#endif /* HAVE_NETDEV_SB_DEV */ - - /* reserve vectors for RDMA peer driver */ - if (test_bit(ICE_FLAG_IWARP_ENA, pf->flags)) { - if (adjusted_base_msix > ICE_MIN_RDMA_MSIX) - pf->num_rdma_msix = adjusted_base_msix + - ICE_RDMA_NUM_AEQ_MSIX; - else - pf->num_rdma_msix = ICE_MIN_RDMA_MSIX; - - needed += pf->num_rdma_msix; - } - } - - if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) - needed += ICE_FDIR_MSIX; - - err = ice_alloc_msix_entries(pf, needed); - if (err) - goto err_out; - - dev_dbg(dev, "attempting to enable %d MSI-X vectors\n", needed); - err = pci_enable_msix_exact(pf->pdev, pf->msix_entries, needed); - if (err < 0) { - ice_free_msix_entries(pf); - dev_notice(dev, "Couldn't get %d MSI-X vectors due to OS, Platform, and/or PCI-function limitations. Reducing request and retrying.", - needed); - - /* MACVLAN support already disabled and we still failed - * to enable MSI-X, so make another attempt at enabling - * MSI-X by reducing the needed amount - */ - if (attempt > 1) - adjusted_msix_divisor *= 2; - } else { - if (pf->num_lan_msix != num_cpus) - dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n", - pf->num_lan_msix); - - if (test_bit(ICE_FLAG_IWARP_ENA, pf->flags) && - pf->num_rdma_msix != (num_cpus + ICE_RDMA_NUM_AEQ_MSIX)) - dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n", - pf->num_rdma_msix); - - return needed; - } - } - -err_out: - dev_err(dev, "failed to enable MSI-X vectors\n"); - pf->num_lan_msix = 0; - pf->num_rdma_msix = 0; - return err; -} - -/** - * ice_dis_msix - Disable MSI-X interrupt setup in OS - * @pf: board private structure - */ -static void ice_dis_msix(struct ice_pf *pf) -{ - pci_disable_msix(pf->pdev); - ice_free_msix_entries(pf); -} - -/** - * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme - * @pf: board private structure - */ -static void ice_clear_interrupt_scheme(struct ice_pf *pf) -{ - ice_dis_msix(pf); - - if (pf->irq_tracker) { - devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker); - pf->irq_tracker = NULL; - } -} - -/** - * ice_init_interrupt_scheme - Determine proper interrupt scheme - * @pf: board private structure to initialize - */ -static int ice_init_interrupt_scheme(struct ice_pf *pf) -{ - int vectors; - - vectors = ice_ena_msix_range(pf); - - if (vectors < 0) - return vectors; - - /* set up vector assignment tracking */ - pf->irq_tracker = - devm_kzalloc(ice_pf_to_dev(pf), - struct_size(pf->irq_tracker, list, vectors), - GFP_KERNEL); - if (!pf->irq_tracker) { - ice_dis_msix(pf); - return -ENOMEM; - } - - /* populate SW interrupts pool with number of OS granted IRQs. */ - pf->num_avail_sw_msix = (u16)vectors; - pf->irq_tracker->num_entries = (u16)vectors; - pf->irq_tracker->end = pf->irq_tracker->num_entries; + mutex_init(&pf->vfs.table_lock); + hash_init(pf->vfs.table); return 0; } @@ -5047,8 +5272,8 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) { struct ice_vsi *vsi = ice_get_main_vsi(pf); struct ice_vsi_ctx *ctxt; - enum ice_status status; struct ice_hw *hw; + int status; if (!vsi) return; @@ -5078,9 +5303,8 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (status) { - dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); } else { vsi->info.sec_flags = ctxt->info.sec_flags; vsi->info.sw_flags2 = ctxt->info.sw_flags2; @@ -5093,109 +5317,84 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) /** * ice_log_pkg_init - log result of DDP package load * @hw: pointer to hardware info - * @status: status of package load + * @state: state of package load */ -static void -ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) +static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state) { struct ice_pf *pf = hw->back; struct device *dev; dev = ice_pf_to_dev(pf); - switch (*status) { - case ICE_SUCCESS: - /* The package download AdminQ command returned success because - * this download succeeded or ICE_ERR_AQ_NO_WORK since there is - * already a package loaded on the device. - */ - if (hw->pkg_ver.major == hw->active_pkg_ver.major && - hw->pkg_ver.minor == hw->active_pkg_ver.minor && - hw->pkg_ver.update == hw->active_pkg_ver.update && - hw->pkg_ver.draft == hw->active_pkg_ver.draft && - !memcmp(hw->pkg_name, hw->active_pkg_name, - sizeof(hw->pkg_name))) { - if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST) - dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n", - hw->active_pkg_name, - hw->active_pkg_ver.major, - hw->active_pkg_ver.minor, - hw->active_pkg_ver.update, - hw->active_pkg_ver.draft); - else - dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", - hw->active_pkg_name, - hw->active_pkg_ver.major, - hw->active_pkg_ver.minor, - hw->active_pkg_ver.update, - hw->active_pkg_ver.draft); - } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || - hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { - dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", - hw->active_pkg_name, - hw->active_pkg_ver.major, - hw->active_pkg_ver.minor, - ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); - *status = ICE_ERR_NOT_SUPPORTED; - } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && - hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { - dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", - hw->active_pkg_name, - hw->active_pkg_ver.major, - hw->active_pkg_ver.minor, - hw->active_pkg_ver.update, - hw->active_pkg_ver.draft, - hw->pkg_name, - hw->pkg_ver.major, - hw->pkg_ver.minor, - hw->pkg_ver.update, - hw->pkg_ver.draft); - } else { - dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n"); - *status = ICE_ERR_NOT_SUPPORTED; - } + + switch (state) { + case ICE_DDP_PKG_SUCCESS: + dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + hw->active_pkg_ver.update, + hw->active_pkg_ver.draft); break; - case ICE_ERR_FW_DDP_MISMATCH: + case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: + dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + hw->active_pkg_ver.update, + hw->active_pkg_ver.draft); + break; + case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED: + dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); + break; + case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: + dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + hw->active_pkg_ver.update, + hw->active_pkg_ver.draft, + hw->pkg_name, + hw->pkg_ver.major, + hw->pkg_ver.minor, + hw->pkg_ver.update, + hw->pkg_ver.draft); + break; + case ICE_DDP_PKG_FW_MISMATCH: dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); break; - case ICE_ERR_BUF_TOO_SHORT: - case ICE_ERR_CFG: + case ICE_DDP_PKG_INVALID_FILE: dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n"); break; - case ICE_ERR_NOT_SUPPORTED: - /* Package File version not supported */ - if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ || - (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && - hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR)) - dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); - else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ || - (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && - hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR)) - dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", - ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); + case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH: + dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); break; - case ICE_ERR_AQ_ERROR: - switch (hw->pkg_dwnld_status) { - case ICE_AQ_RC_ENOSEC: - case ICE_AQ_RC_EBADSIG: - dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); - return; - case ICE_AQ_RC_ESVN: - dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); - return; - case ICE_AQ_RC_EBADMAN: - case ICE_AQ_RC_EBADBUF: - dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n"); - /* poll for reset to complete */ - if (ice_check_reset(hw)) - dev_err(dev, "Error resetting device. Please reload the driver\n"); - return; - default: - break; - } - /* fall-through */ + case ICE_DDP_PKG_FILE_VERSION_TOO_LOW: + dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", + ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); + break; + case ICE_DDP_PKG_NO_SEC_MANIFEST: + dev_err(dev, "The DDP package could not be loaded because its security manifest is missing. Please use a valid DDP Package. Entering Safe Mode.\n"); + break; + case ICE_DDP_PKG_FILE_SIGNATURE_INVALID: + dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); + break; + case ICE_DDP_PKG_SECURE_VERSION_NBR_TOO_LOW: + dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); + break; + case ICE_DDP_PKG_MANIFEST_INVALID: + case ICE_DDP_PKG_BUFFER_INVALID: + dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n"); + /* poll for reset to complete */ + if (ice_check_reset(hw)) + dev_err(dev, "Error resetting device. Please reload the driver\n"); + break; + case ICE_DDP_PKG_ERR: default: - dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n", - *status); + dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n"); break; } } @@ -5211,24 +5410,24 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) static void ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) { - enum ice_status status = ICE_ERR_PARAM; + enum ice_ddp_state state = ICE_DDP_PKG_ERR; struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; /* Load DDP Package */ if (firmware && !hw->pkg_copy) { - status = ice_copy_and_init_pkg(hw, firmware->data, - firmware->size); - ice_log_pkg_init(hw, &status); + state = ice_copy_and_init_pkg(hw, firmware->data, + firmware->size); + ice_log_pkg_init(hw, state); } else if (!firmware && hw->pkg_copy) { /* Reload package during rebuild after CORER/GLOBR reset */ - status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); - ice_log_pkg_init(hw, &status); + state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); + ice_log_pkg_init(hw, state); } else { dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n"); } - if (status) { + if (!ice_is_init_pkg_successful(state)) { /* Safe Mode */ clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); return; @@ -5280,7 +5479,19 @@ static int ice_prepare_for_safe_mode(struct ice_pf *pf) if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) ice_free_vfs(pf); -#ifdef HAVE_NETDEV_SB_DEV + if (test_bit(ICE_FLAG_SIOV_ENA, pf->flags)) { + u32 reg; + + ice_vdcm_deinit(pf->pdev); + + /* disable PASID mailbox */ + reg = rd32(&pf->hw, GL_MBX_PASID); + reg &= ~GL_MBX_PASID_PASID_MODE_M; + wr32(&pf->hw, GL_MBX_PASID, reg); + clear_bit(ICE_FLAG_SIOV_ENA, pf->flags); + } + +#ifdef HAVE_NDO_DFWD_OPS if (test_bit(ICE_FLAG_MACVLAN_ENA, pf->flags)) { int v; @@ -5291,7 +5502,7 @@ static int ice_prepare_for_safe_mode(struct ice_pf *pf) ice_deinit_macvlan(vsi); } } -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ ice_set_safe_mode_vlan_cfg(pf); @@ -5308,15 +5519,14 @@ static int ice_prepare_for_safe_mode(struct ice_pf *pf) ice_set_pf_caps(pf); err = ice_cfg_netdev(pf_vsi); if (err) { - dev_err(ice_pf_to_dev(pf), "could not allocate netdev, err %d\n", - err); + ice_dev_err_errno(ice_pf_to_dev(pf), err, + "could not allocate netdev"); return err; } return 0; } - /** * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines * @pf: pointer to the PF structure @@ -5336,9 +5546,9 @@ static void ice_verify_cacheline_size(struct ice_pf *pf) * ice_send_version - update firmware with driver version * @pf: PF struct * - * Returns ICE_SUCCESS on success, else error code + * Returns 0 on success, else error code */ -static enum ice_status ice_send_version(struct ice_pf *pf) +static int ice_send_version(struct ice_pf *pf) { struct ice_driver_ver dv; @@ -5361,18 +5571,19 @@ int ice_init_acl(struct ice_pf *pf) { struct ice_acl_tbl_params params; struct ice_hw *hw = &pf->hw; - enum ice_status status; int divider; u16 scen_id; + int status; /* Creates a single ACL table that consist of src_ip(4 byte), * dest_ip(4 byte), src_port(2 byte) and dst_port(2 byte) for a total * of 12 bytes (96 bits), hence 120 bit wide keys, i.e. 3 TCAM slices. - * If the given hardware card contains less than 8 PFs (ports) then - * each PF will have its own TCAM slices. For 8 PFs, a given slice will - * be shared by 2 different PFs. + * If the given hardware card contains less than + * ICE_TCAM_DIVIDER_THRESHOLD PFs (ports) then each PF will have its + * own TCAM slices. For ICE_TCAM_DIVIDER_THRESHOLD PFs, a given slice + * will be shared by 2 different PFs. */ - if (hw->dev_caps.num_funcs < 8) + if (hw->dev_caps.num_funcs < ICE_TCAM_DIVIDER_THRESHOLD) divider = ICE_ACL_ENTIRE_SLICE; else divider = ICE_ACL_HALF_SLICE; @@ -5383,13 +5594,11 @@ int ice_init_acl(struct ice_pf *pf) params.entry_act_pairs = 1; params.concurr = false; - status = ice_acl_create_tbl(hw, ¶ms); if (status) - return ice_status_to_errno(status); + return status; - return ice_status_to_errno(ice_acl_create_scen(hw, params.width, - params.depth, &scen_id)); + return ice_acl_create_scen(hw, params.width, params.depth, &scen_id); } /** @@ -5483,11 +5692,11 @@ static char *ice_get_opt_fw_name(struct ice_pf *pf) /** * ice_request_fw - Device initialization routine * @pf: pointer to the PF instance + * @firmware: double pointer to firmware struct */ -static void ice_request_fw(struct ice_pf *pf) +static int ice_request_fw(struct ice_pf *pf, const struct firmware **firmware) { char *opt_fw_filename = ice_get_opt_fw_name(pf); - const struct firmware *firmware = NULL; struct device *dev = ice_pf_to_dev(pf); int err = 0; @@ -5496,29 +5705,16 @@ static void ice_request_fw(struct ice_pf *pf) * and warning messages for other errors. */ if (opt_fw_filename) { - err = firmware_request_nowarn(&firmware, opt_fw_filename, dev); - if (err) { - kfree(opt_fw_filename); - goto dflt_pkg_load; - } - - /* request for firmware was successful. Download to device */ - ice_load_pkg(firmware, pf); + err = firmware_request_nowarn(firmware, opt_fw_filename, dev); kfree(opt_fw_filename); - release_firmware(firmware); - return; + if (!err) + return err; } - -dflt_pkg_load: - err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev); - if (err) { + err = request_firmware(firmware, ICE_DDP_PKG_FILE, dev); + if (err) dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n"); - return; - } - /* request for firmware was successful. Download to device */ - ice_load_pkg(firmware, pf); - release_firmware(firmware); + return err; } /** @@ -5574,8 +5770,8 @@ static void ice_print_wake_reason(struct ice_pf *pf) static void ice_config_health_events(struct ice_pf *pf, bool enable) { - enum ice_status ret; u8 enable_bits = 0; + int ret; if (!ice_is_fw_health_report_supported(&pf->hw)) return; @@ -5587,13 +5783,12 @@ ice_config_health_events(struct ice_pf *pf, bool enable) ret = ice_aq_set_health_status_config(&pf->hw, enable_bits, NULL); if (ret) - dev_err(ice_pf_to_dev(pf), "Failed to %sable firmware health events, err %s aq_err %s\n", - enable ? "en" : "dis", - ice_stat_str(ret), + dev_err(ice_pf_to_dev(pf), "Failed to %sable firmware health events, err %d aq_err %s\n", + enable ? "en" : "dis", ret, ice_aq_str(pf->hw.adminq.sq_last_status)); } -/* +/** * ice_register_netdev - register netdev and devlink port * @pf: pointer to the PF struct */ @@ -5633,7 +5828,6 @@ err_register_netdev: return err; } - /** * ice_pf_fwlog_is_input_valid - validate user input level/events * @pf: pointer to the PF struct @@ -5658,11 +5852,9 @@ ice_pf_fwlog_is_input_valid(struct ice_pf *pf, return false; } - return true; } - /** * ice_pf_fwlog_populate_cfg - populate FW log configuration * @cfg: configuration to populate @@ -5706,7 +5898,7 @@ static int ice_pf_fwlog_set(struct ice_pf *pf, struct ice_fwlog_user_input *user_input) { struct ice_fwlog_cfg cfg = {}; - enum ice_status status; + int status; if (!ice_pf_fwlog_is_input_valid(pf, user_input)) return -EINVAL; @@ -5717,7 +5909,7 @@ ice_pf_fwlog_set(struct ice_pf *pf, struct ice_fwlog_user_input *user_input) if (status) { dev_err(ice_pf_to_dev(pf), "Failed to set FW log configuration. fwlog_events: 0x%lx fwlog_level: %u\n", user_input->events, user_input->log_level); - return ice_status_to_errno(status); + return status; } return 0; @@ -5736,7 +5928,7 @@ static int ice_pf_fwlog_init(struct ice_pf *pf, struct ice_fwlog_user_input *user_input) { struct ice_fwlog_cfg cfg = {}; - enum ice_status status; + int status; if (!ice_pf_fwlog_is_input_valid(pf, user_input)) return -EINVAL; @@ -5749,12 +5941,90 @@ ice_pf_fwlog_init(struct ice_pf *pf, struct ice_fwlog_user_input *user_input) if (status) { dev_err(ice_pf_to_dev(pf), "Failed to init FW log configuration. fwlog_events: 0x%lx fwlog_level: %u\n", user_input->events, user_input->log_level); - return ice_status_to_errno(status); + return status; } return 0; } +/** + * ice_init_tx_topology - performs Tx topology initialization + * @hw: pointer to the hardware structure + * @firmware: pointer to firmware structure + */ +static int +ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware) +{ + u8 num_tx_sched_layers = hw->num_tx_sched_layers; + struct ice_pf *pf = hw->back; + struct device *dev; + u8 *buf_copy; + int err; + + dev = ice_pf_to_dev(pf); + /* ice_cfg_tx_topo buf argument is not a constant, + * so we have to make a copy + */ + buf_copy = kmemdup(firmware->data, firmware->size, GFP_KERNEL); + + err = ice_cfg_tx_topo(hw, buf_copy, firmware->size); + if (!err) { + if (hw->num_tx_sched_layers > num_tx_sched_layers) + dev_info(dev, "Transmit balancing feature disabled\n"); + else + dev_info(dev, "Transmit balancing feature enabled\n"); + /* if there was a change in topology ice_cfg_tx_topo triggered + * a CORER and we need to re-init hw + */ + ice_deinit_hw(hw); + err = ice_init_hw(hw); + if (err) { + kfree(buf_copy); + return -EIO; + } + } else if (err == -EIO) { + dev_info(dev, "DDP package does not support transmit balancing feature - please update to the latest DDP package and try again\n"); + } + + kfree(buf_copy); + + /* This feature is opt-in so we return 0 */ + return 0; +} + +/** + * ice_init_ddp_config - DDP related configuration + * @hw: pointer to the hardware structure + * @pf: pointer to pf structure + * + * This function loads DDP file from the disk, then initializes Tx + * topology. At the end DDP package is loaded on the card. + */ +static int ice_init_ddp_config(struct ice_hw *hw, struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + const struct firmware *firmware = NULL; + int err; + + err = ice_request_fw(pf, &firmware); + if (err) + return err; + + err = ice_init_tx_topology(hw, firmware); + if (err) { + dev_err(dev, "ice_init_hw during change of tx topology failed: %d\n", + err); + release_firmware(firmware); + return err; + } + + /* Download firmware to device */ + ice_load_pkg(firmware, pf); + release_firmware(firmware); + + return err; +} + /** * ice_probe - Device initialization routine * @pdev: PCI device information struct @@ -5771,28 +6041,27 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) struct ice_hw *hw; int err; - /* this driver uses devres, see - * Documentation/driver-api/driver-model/devres.rst - */ - err = pcim_enable_device(pdev); + if (pdev->is_virtfn) { + dev_err(dev, "can't probe a virtual function\n"); + return -EINVAL; + } + + err = pci_enable_device(pdev); if (err) return err; - err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev)); + err = pci_request_mem_regions(pdev, dev_driver_string(dev)); if (err) { - dev_err(dev, "BAR0 I/O map error %d\n", err); + dev_err(dev, "pci_request_mem_regions failed %d\n", err); return err; } - pf = ice_allocate_pf(dev); if (!pf) return -ENOMEM; /* set up for high or low DMA */ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); - if (err) - err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); if (err) { dev_err(dev, "DMA configuration failed: 0x%x\n", err); return err; @@ -5808,7 +6077,18 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) set_bit(ICE_SERVICE_DIS, pf->state); hw = &pf->hw; - hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; + + /* Map only upto RDMA doorbell offset in the BAR. + * This will all be UC. + */ + hw->hw_addr = (u8 __iomem *)ioremap(pci_resource_start(pdev, 0), + ICE_BAR_RDMA_DOORBELL_OFFSET); + if (!hw->hw_addr) { + err = -EIO; + dev_err(dev, "ioremap failed\n"); + goto err_init_iomap_fail; + } + pci_save_state(pdev); hw->back = pf; @@ -5823,11 +6103,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); - err = ice_devlink_register(pf); - if (err) { - dev_err(dev, "ice_devlink_register failed: %d\n", err); - goto err_exit_unroll; - } +#ifndef HAVE_DEVLINK_NOTIFY_REGISTER + ice_devlink_register(pf); +#endif /* !HAVE_DEVLINK_NOTIFY_REGISTER */ #ifndef CONFIG_DYNAMIC_DEBUG if (debug < -1) @@ -5843,12 +6121,10 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) return 0; } - ice_debugfs_pf_init(pf); - user_input.log_level = fwlog_level; user_input.events = fwlog_events; if (ice_pf_fwlog_init(pf, &user_input)) { - dev_err(dev, "failed to initialize FW logging: %d\n", err); + ice_dev_err_errno(dev, err, "failed to initialize FW logging"); err = -EIO; goto err_exit_unroll; } @@ -5862,9 +6138,11 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ice_init_feature_support(pf); - ice_request_fw(pf); + ice_debugfs_pf_init(pf); - /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be + ice_init_ddp_config(hw, pf); + + /* if ice_init_ddp_config fails, ICE_FLAG_ADV_FEATURES bit won't be * set in pf->state, which will cause ice_is_safe_mode to return * true */ @@ -5875,14 +6153,16 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) * device/function capabilities, override them. */ ice_set_safe_mode_caps(hw); + pf->max_qps = 1; + } else { + pf->max_qps = num_online_cpus(); } ice_set_umac_shared(hw); - err = ice_init_pf(pf); if (err) { - dev_err(dev, "ice_init_pf failed: %d\n", err); + ice_dev_err_errno(dev, err, "ice_init_pf failed"); goto err_init_pf_unroll; } ice_verify_eeprom(pf); @@ -5906,7 +6186,13 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) #endif /* ETHTOOL_GFECPARAM */ ice_devlink_init_regions(pf); - ice_devlink_params_publish(pf); +#ifdef HAVE_DEVLINK_HEALTH + ice_devlink_init_mdd_reporter(pf); +#endif /* HAVE_DEVLINK_HEALTH */ + +#ifdef HAVE_UDP_TUNNEL_NIC_INFO + ice_udp_tunnel_prepare(pf); +#endif /* HAVE_UDP_TUNNEL_NIC_INFO */ pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; if (!pf->num_alloc_vsi) { @@ -5923,7 +6209,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) err = ice_init_interrupt_scheme(pf); if (err) { - dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); + ice_dev_err_errno(dev, err, + "ice_init_interrupt_scheme failed"); err = -EIO; goto err_init_vsi_unroll; } @@ -5935,11 +6222,10 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) */ err = ice_req_irq_msix_misc(pf); if (err) { - dev_err(dev, "setup of misc vector failed: %d\n", err); + ice_dev_err_errno(dev, err, "setup of misc vector failed"); goto err_init_interrupt_unroll; } - /* create switch struct for the switch element created by FW on boot */ pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL); if (!pf->first_sw) { @@ -5959,7 +6245,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) err = ice_setup_pf_sw(pf); if (err) { - dev_err(dev, "probe failed due to setup PF switch: %d\n", err); + ice_dev_err_errno(dev, err, + "probe failed due to setup PF switch"); goto err_alloc_sw_unroll; } @@ -5981,14 +6268,14 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) err = ice_init_link_events(pf->hw.port_info); if (err) { - dev_err(dev, "ice_init_link_events failed: %d\n", err); + ice_dev_err_errno(dev, err, "ice_init_link_events failed"); goto err_send_version_unroll; } /* not a fatal error if this fails */ err = ice_init_nvm_phy_type(pf->hw.port_info); if (err) - dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err); + ice_dev_err_errno(dev, err, "ice_init_nvm_phy_type failed"); ice_init_link_dflt_override(pf->hw.port_info); @@ -5997,7 +6284,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) if (err) dev_err(dev, "ice_update_link_info failed: %d\n", err); - ice_check_module_power(pf, pf->hw.port_info->phy.link_info.link_cfg_err); + ice_check_link_cfg_err(pf, + pf->hw.port_info->phy.link_info.link_cfg_err); /* if media available, initialize PHY settings */ if (pf->hw.port_info->phy.link_info.link_info & @@ -6005,7 +6293,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) /* not a fatal error if this fails */ err = ice_init_phy_user_cfg(pf->hw.port_info); if (err) - dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err); + ice_dev_err_errno(dev, err, + "ice_init_phy_user_cfg failed"); if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { struct ice_vsi *vsi = ice_get_main_vsi(pf); @@ -6031,35 +6320,18 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) /* Disable WoL at init, wait for user to enable */ device_set_wakeup_enable(dev, false); - /* init peers only if supported */ - if (ice_is_peer_ena(pf)) { - pf->peers = devm_kcalloc(dev, ICE_MAX_NUM_PEERS, - sizeof(*pf->peers), GFP_KERNEL); - if (!pf->peers) { - err = -ENOMEM; - goto err_init_peer_unroll; - } - - err = ice_init_peer_devices(pf); - if (err) { - dev_err(dev, "Failed to initialize peer_objs: 0x%x\n", - err); - err = -EIO; - goto err_init_peer_unroll; - } - } else { - dev_warn(dev, "RDMA is not supported on this device\n"); - } - if (ice_is_safe_mode(pf)) { ice_set_safe_mode_vlan_cfg(pf); goto probe_done; } /* initialize DDP driven features */ - if (test_bit(ICE_FLAG_PTP_ENA, pf->flags)) + if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) ice_ptp_init(pf); + if (ice_is_feature_supported(pf, ICE_F_GNSS)) + ice_gnss_init(pf); + /* Note: Flow director init failure is non-fatal to load */ if (ice_init_fdir(pf)) dev_err(dev, "could not initialize flow director\n"); @@ -6070,8 +6342,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) /* Note: ACL init failure is non-fatal to load */ err = ice_init_acl(pf); if (err) - dev_err(&pf->pdev->dev, - "Failed to initialize ACL: %d\n", err); + ice_dev_err_errno(&pf->pdev->dev, err, + "Failed to initialize ACL"); } /* set DCF UDP tunnel enable flag as false by default */ @@ -6097,50 +6369,90 @@ probe_done: if (err) goto err_netdev_reg; + err = ice_devlink_register_params(pf); + if (err) + goto err_netdev_reg; + ice_config_health_events(pf, true); /* ready to go, so clear down state bit */ clear_bit(ICE_DOWN, pf->state); + /* init peers only if supported */ + if (ice_is_aux_ena(pf)) { + pf->cdev_infos = devm_kcalloc(dev, IIDC_MAX_NUM_AUX, + sizeof(*pf->cdev_infos), + GFP_KERNEL); + if (!pf->cdev_infos) { + err = -ENOMEM; + goto err_devlink_reg_param; + } + err = ice_init_aux_devices(pf); + if (err) { + dev_err(dev, "Failed to initialize aux devs: %d\n", + err); + err = -EIO; + goto err_init_aux_unroll; + } + } else { + dev_warn(dev, "Aux drivers are not supported on this device\n"); + } + + if (test_bit(ICE_FLAG_SIOV_CAPABLE, pf->flags)) + ice_initialize_siov_res(pf); + +#ifdef HAVE_DEVLINK_NOTIFY_REGISTER + ice_devlink_register(pf); +#endif /* HAVE_DEVLINK_NOTIFY_REGISTER */ return 0; /* Unwind non-managed device resources, etc. if something failed */ -err_netdev_reg: -err_init_peer_unroll: - if (ice_is_peer_ena(pf)) { - ice_for_each_peer(pf, NULL, ice_unroll_peer); - if (pf->peers) { - devm_kfree(dev, pf->peers); - pf->peers = NULL; - } +err_init_aux_unroll: + if (ice_is_aux_ena(pf)) { + ice_for_each_aux(pf, NULL, ice_unroll_cdev_info); + pf->cdev_infos = NULL; } +err_devlink_reg_param: + ice_devlink_unregister_params(pf); +err_netdev_reg: + if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) + ice_ptp_release(pf); err_send_version_unroll: ice_vsi_release_all(pf); err_alloc_sw_unroll: set_bit(ICE_SERVICE_DIS, pf->state); set_bit(ICE_DOWN, pf->state); devm_kfree(dev, pf->first_sw); + pf->first_sw = NULL; err_msix_misc_unroll: ice_free_irq_msix_misc(pf); err_init_interrupt_unroll: ice_clear_interrupt_scheme(pf); err_init_vsi_unroll: devm_kfree(dev, pf->vsi); + pf->vsi = NULL; err_init_pf_unroll: ice_deinit_pf(pf); - ice_devlink_params_unpublish(pf); + iounmap(hw->hw_addr); +err_init_iomap_fail: +#ifdef HAVE_DEVLINK_HEALTH + ice_devlink_destroy_mdd_reporter(pf); +#endif /* HAVE_DEVLINK_HEALTH */ ice_devlink_destroy_regions(pf); ice_deinit_hw(hw); + hw->hw_addr = NULL; err_exit_unroll: ice_debugfs_pf_exit(pf); err_rec_mode: +#ifndef HAVE_DEVLINK_NOTIFY_REGISTER ice_devlink_unregister(pf); +#endif /* !HAVE_DEVLINK_NOTIFY_REGISTER */ pci_disable_pcie_error_reporting(pdev); + pci_release_mem_regions(pdev); pci_disable_device(pdev); return err; } - /** * ice_set_wake - enable or disable Wake on LAN * @pf: pointer to the PF struct @@ -6174,9 +6486,9 @@ static void ice_setup_mc_magic_wake(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - enum ice_status status; u8 mac_addr[ETH_ALEN]; struct ice_vsi *vsi; + int status; u8 flags; if (!pf->wol_ena) @@ -6198,9 +6510,8 @@ static void ice_setup_mc_magic_wake(struct ice_pf *pf) status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL); if (status) - dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); } /** @@ -6210,11 +6521,14 @@ static void ice_setup_mc_magic_wake(struct ice_pf *pf) static void ice_remove(struct pci_dev *pdev) { struct ice_pf *pf = pci_get_drvdata(pdev); + struct device *dev = &pf->pdev->dev; + struct ice_hw *hw; int i; if (!pf) return; + hw = &pf->hw; /* ICE_PREPPED_RECOVERY_MODE is set when the up and running * driver transitions to recovery mode. If this is not set * it means that the driver went into recovery mode on load. @@ -6228,6 +6542,10 @@ static void ice_remove(struct pci_dev *pdev) return; } +#ifdef HAVE_DEVLINK_NOTIFY_REGISTER + ice_devlink_unregister(pf); +#endif /* HAVE_DEVLINK_NOTIFY_REGISTER */ + for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { if (!ice_is_reset_in_progress(pf->state)) break; @@ -6240,43 +6558,45 @@ static void ice_remove(struct pci_dev *pdev) ice_tc_indir_block_remove(pf); #endif /* HAVE_TC_INDIR_BLOCK */ - if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { set_bit(ICE_VF_RESETS_DISABLED, pf->state); ice_free_vfs(pf); } + if (test_bit(ICE_FLAG_SIOV_ENA, pf->flags)) { + u32 val; + + ice_vdcm_deinit(pf->pdev); + + /* disable PASID mailbox */ + val = rd32(hw, GL_MBX_PASID); + val &= ~GL_MBX_PASID_PASID_MODE_M; + wr32(hw, GL_MBX_PASID, val); + clear_bit(ICE_FLAG_SIOV_ENA, pf->flags); + } + ice_unplug_aux_devs(pf); + ice_for_each_aux(pf, NULL, ice_unroll_cdev_info); + devm_kfree(dev, pf->cdev_infos); ice_service_task_stop(pf); ice_aq_cancel_waiting_tasks(pf); - - if (ice_is_peer_ena(pf)) { - enum ice_close_reason reason; - - reason = ICE_REASON_INTERFACE_DOWN; - ice_for_each_peer(pf, &reason, ice_peer_close); - } + ice_devlink_unregister_params(pf); set_bit(ICE_DOWN, pf->state); if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) ice_deinit_acl(pf); - mutex_destroy(&(&pf->hw)->fdir_fltr_lock); + mutex_destroy(&hw->fdir_fltr_lock); #ifdef HAVE_NETDEV_UPPER_INFO ice_deinit_lag(pf); #endif /* HAVE_NETDEV_UPPER_INFO */ - if (test_bit(ICE_FLAG_PTP_ENA, pf->flags)) + if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) ice_ptp_release(pf); + if (ice_is_feature_supported(pf, ICE_F_GNSS)) + ice_gnss_exit(pf); if (!ice_is_safe_mode(pf)) ice_remove_arfs(pf); ice_setup_mc_magic_wake(pf); ice_vsi_release_all(pf); - if (ice_is_peer_ena(pf)) { -#if IS_ENABLED(CONFIG_MFD_CORE) - ida_simple_remove(&ice_peer_index_ida, pf->peer_idx); -#endif - ice_for_each_peer(pf, NULL, ice_unreg_peer_obj); - devm_kfree(&pdev->dev, pf->peers); - } ice_set_wake(pf); ice_free_irq_msix_misc(pf); ice_for_each_vsi(pf, i) { @@ -6286,21 +6606,27 @@ static void ice_remove(struct pci_dev *pdev) } ice_deinit_pf(pf); - ice_devlink_params_unpublish(pf); +#ifdef HAVE_DEVLINK_HEALTH + ice_devlink_destroy_mdd_reporter(pf); +#endif /* HAVE_DEVLINK_HEALTH */ ice_devlink_destroy_regions(pf); - if (ice_fwlog_unregister(&pf->hw)) + if (ice_fwlog_unregister(hw)) dev_dbg(&pdev->dev, "failed to unregister from FW logging\n"); - ice_deinit_hw(&pf->hw); + ice_deinit_hw(hw); +#ifndef HAVE_DEVLINK_NOTIFY_REGISTER ice_devlink_unregister(pf); +#endif /* !HAVE_DEVLINK_NOTIFY_REGISTER */ ice_debugfs_pf_exit(pf); /* Issue a PFR as part of the prescribed driver unload flow. Do not * do it via ice_schedule_reset() since there is no need to rebuild * and the service task is already stopped. */ - ice_reset(&pf->hw, ICE_RESET_PFR); + ice_reset(hw, ICE_RESET_PFR); + iounmap(hw->hw_addr); pci_wait_for_pending_transaction(pdev); ice_clear_interrupt_scheme(pf); pci_disable_pcie_error_reporting(pdev); + pci_release_mem_regions(pdev); pci_disable_device(pdev); } @@ -6335,7 +6661,6 @@ static void ice_prepare_for_shutdown(struct ice_pf *pf) /* Notify VFs of impending reset */ if (ice_check_sq_alive(hw, &hw->mailboxq)) ice_vc_notify_reset(pf); - dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n"); /* disable the VSIs and their queues that are not already DOWN */ @@ -6345,7 +6670,7 @@ static void ice_prepare_for_shutdown(struct ice_pf *pf) if (pf->vsi[v]) pf->vsi[v]->vsi_num = 0; - ice_shutdown_all_ctrlq(hw); + ice_shutdown_all_ctrlq(hw, true); } /** @@ -6369,7 +6694,8 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf) ret = ice_init_interrupt_scheme(pf); if (ret) { - dev_err(dev, "Failed to re-initialize interrupt %d\n", ret); + ice_dev_err_errno(dev, ret, + "Failed to re-initialize interrupt"); return ret; } @@ -6386,8 +6712,8 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf) ret = ice_req_irq_msix_misc(pf); if (ret) { - dev_err(dev, "Setting up misc vector failed after device suspend %d\n", - ret); + ice_dev_err_errno(dev, ret, + "Setting up misc vector failed after device suspend"); goto err_reinit; } @@ -6429,12 +6755,7 @@ static int __maybe_unused ice_suspend(struct device *dev) */ disabled = ice_service_task_stop(pf); - if (ice_is_peer_ena(pf)) { - enum ice_close_reason reason; - - reason = ICE_REASON_INTERFACE_DOWN; - ice_for_each_peer(pf, &reason, ice_peer_close); - } + ice_unplug_aux_devs(pf); /* Already suspended?, then there is nothing to do */ if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { @@ -6512,9 +6833,9 @@ static int __maybe_unused ice_resume(struct device *dev) */ ret = ice_reinit_interrupt_scheme(pf); if (ret) - dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret); + ice_dev_err_errno(dev, ret, "Cannot restore interrupt scheme"); - ice_peer_refresh_msix(pf); + ice_cdev_info_refresh_msix(pf); clear_bit(ICE_DOWN, pf->state); /* Now perform PF reset and rebuild */ @@ -6582,8 +6903,8 @@ static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev) err = pci_enable_device_mem(pdev); if (err) { - dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n", - err); + ice_dev_err_errno(&pdev->dev, err, + "Cannot re-enable PCI device after reset"); result = PCI_ERS_RESULT_DISCONNECT; } else { pci_set_master(pdev); @@ -6601,8 +6922,8 @@ static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev) err = pci_aer_clear_nonfatal_status(pdev); if (err) - dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status failed, error %d\n", - err); + ice_dev_dbg_errno(&pdev->dev, err, + "pci_aer_clear_nonfatal_status failed"); /* non-fatal, continue */ return result; @@ -6718,6 +7039,11 @@ static const struct pci_device_id ice_pci_tbl[] = { { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_BACKPLANE), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_QSFP), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SFP), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SGMII), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT), 0 }, /* required last entry */ { 0, } }; @@ -6779,13 +7105,17 @@ static int __init ice_module_init(void) pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver); pr_info("%s\n", ice_copyright); - ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME); if (!ice_wq) { pr_err("Failed to create workqueue\n"); return -ENOMEM; } + ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0); + if (!ice_lag_wq) { + pr_err("Failed to create LAG workqueue\n"); + return -ENOMEM; + } #ifdef HAVE_RHEL7_PCI_DRIVER_RH /* The size member must be initialized in the driver via a call to * set_pci_driver_rh_size before pci_register_driver is called @@ -6800,10 +7130,8 @@ static int __init ice_module_init(void) if (status) { pr_err("failed to register PCI driver, err %d\n", status); destroy_workqueue(ice_wq); + destroy_workqueue(ice_lag_wq); ice_debugfs_exit(); -#if IS_ENABLED(CONFIG_MFD_CORE) - ida_destroy(&ice_peer_index_ida); -#endif } return status; @@ -6820,13 +7148,8 @@ static void __exit ice_module_exit(void) { pci_unregister_driver(&ice_driver); destroy_workqueue(ice_wq); + destroy_workqueue(ice_lag_wq); ice_debugfs_exit(); - /* release all cached layer within ida tree, associated with - * ice_peer_index_ida object - */ -#if IS_ENABLED(CONFIG_MFD_CORE) - ida_destroy(&ice_peer_index_ida); -#endif pr_info("module unloaded\n"); } module_exit(ice_module_exit); @@ -6845,10 +7168,10 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; struct sockaddr *addr = pi; - enum ice_status status; + u8 old_mac[ETH_ALEN]; u8 flags = 0; - int err = 0; u8 *mac; + int err; mac = (u8 *)addr->sa_data; @@ -6856,7 +7179,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) return -EADDRNOTAVAIL; if (ether_addr_equal(netdev->dev_addr, mac)) { - netdev_warn(netdev, "already using mac %pM\n", mac); + netdev_dbg(netdev, "already using mac %pM\n", mac); return 0; } @@ -6869,55 +7192,59 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) #ifdef HAVE_TC_SETUP_CLSFLOWER if (ice_chnl_dmac_fltr_cnt(pf)) { - netdev_err(netdev, - "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n", + netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n", mac); return -EAGAIN; } #endif /* HAVE_TC_SETUP_CLSFLOWER */ + netif_addr_lock_bh(netdev); + ether_addr_copy(old_mac, netdev->dev_addr); + /* change the netdev's MAC address */ + eth_hw_addr_set(netdev, mac); + netif_addr_unlock_bh(netdev); + /* Clean up old MAC filter. Not an error if old filter doesn't exist */ - status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI); - if (status && status != ICE_ERR_DOES_NOT_EXIST) { + err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI); + if (err && err != -ENOENT) { err = -EADDRNOTAVAIL; goto err_update_filters; } /* Add filter for new MAC. If filter exists, return success */ - status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); - if (status == ICE_ERR_ALREADY_EXISTS) { + err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); + if (err == -EEXIST) { /* Although this MAC filter is already present in hardware it's * possible in some cases (e.g. bonding) that dev_addr was * modified outside of the driver and needs to be restored back * to this value. */ - memcpy(netdev->dev_addr, mac, netdev->addr_len); netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac); return 0; - } - - /* error if the new filter addition failed */ - if (status) + } else if (err) { + /* error if the new filter addition failed */ err = -EADDRNOTAVAIL; + } err_update_filters: if (err) { netdev_err(netdev, "can't set MAC %pM. filter update failed\n", mac); + netif_addr_lock_bh(netdev); + eth_hw_addr_set(netdev, old_mac); + netif_addr_unlock_bh(netdev); return err; } - /* change the netdev's MAC address */ - memcpy(netdev->dev_addr, mac, netdev->addr_len); netdev_dbg(vsi->netdev, "updated MAC address to %pM\n", netdev->dev_addr); /* write new MAC address to the firmware */ flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; - status = ice_aq_manage_mac_write(hw, mac, flags, NULL); - if (status) { - netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n", - mac, ice_stat_str(status)); + err = ice_aq_manage_mac_write(hw, mac, flags, NULL); + if (err) { + netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n", + mac, err); } return 0; } @@ -6960,8 +7287,8 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; - enum ice_status status; u16 q_handle; + int status; u8 tc; /* Validate maxrate requested is within permitted range */ @@ -6981,13 +7308,11 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) else status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, q_handle, ICE_MAX_BW, maxrate * 1000); - if (status) { - netdev_err(netdev, "Unable to set Tx max rate, error %s\n", - ice_stat_str(status)); - return -EIO; - } + if (status) + netdev_err(netdev, "Unable to set Tx max rate, error %d\n", + status); - return 0; + return status; } #endif /* HAVE_NDO_SET_TX_MAXRATE */ @@ -7044,7 +7369,7 @@ ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], return err; } -#ifdef HAVE_NDO_FDB_ADD_VID +#ifdef HAVE_NDO_FDB_DEL_EXTACK /** * ice_fdb_del - delete an entry from the hardware database * @ndm: the input from the stack @@ -7052,8 +7377,14 @@ ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], * @dev: the net device pointer * @addr: the MAC address entry being added * @vid: VLAN ID + * @extack: netlink extended ack */ static int +ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, + __always_unused u16 vid, struct netlink_ext_ack *extack) +#elif defined(HAVE_NDO_FDB_ADD_VID) +static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, __always_unused u16 vid) @@ -7080,7 +7411,7 @@ ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], return err; } -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS /** * ice_vsi_cfg_netdev_tc0 - Setup the netdev TC 0 configuration * @vsi: the VSI being configured @@ -7127,7 +7458,6 @@ ice_fwd_add_macvlan(struct net_device *netdev, struct net_device *vdev) struct ice_pf *pf = parent_vsi->back; struct ice_macvlan *mv = NULL; int avail_id, ret, offset, i; - enum ice_status status; struct device *dev; u8 mac[ETH_ALEN]; @@ -7162,10 +7492,10 @@ ice_fwd_add_macvlan(struct net_device *netdev, struct net_device *vdev) return ERR_PTR(-EIO); } - pf->num_macvlan++; offset = parent_vsi->alloc_txq + avail_id; +#ifdef HAVE_NETDEV_SB_DEV ret = netdev_set_sb_channel(vdev, avail_id + 1); if (ret) { netdev_err(netdev, "Error setting netdev_set_sb_channel %d\n", @@ -7183,12 +7513,24 @@ ice_fwd_add_macvlan(struct net_device *netdev, struct net_device *vdev) ret); goto bind_sb_channel_err; } +#endif /* HAVE_NETDEV_SB_DEV */ vsi->netdev = vdev; /* Set MACVLAN ring in root device Tx rings */ ice_for_each_txq(vsi, i) parent_vsi->tx_rings[offset + i] = vsi->tx_rings[i]; + ret = ice_vsi_cfg_netdev_tc0(vsi); + if (ret) + goto netdev_tx_cfg_err; + + ret = netif_set_real_num_tx_queues(vdev, vsi->num_txq); + if (ret) { + netdev_err(netdev, "Error setting netif_set_real_num_tx_queues %d\n", + ret); + goto netdev_tx_cfg_err; + } + ice_napi_add(vsi); ret = ice_vsi_open(vsi); @@ -7196,13 +7538,13 @@ ice_fwd_add_macvlan(struct net_device *netdev, struct net_device *vdev) goto vsi_open_err; ether_addr_copy(mac, vdev->dev_addr); - status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); - if (status == ICE_ERR_ALREADY_EXISTS) { - dev_info(dev, "can't add MAC filters %pM for VSI %d, error %s\n", - mac, vsi->idx, ice_stat_str(status)); - } else if (status) { - dev_err(dev, "can't add MAC filters %pM for VSI %d, error %s\n", - mac, vsi->idx, ice_stat_str(status)); + ret = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); + if (ret == -EEXIST) { + dev_info(dev, "can't add MAC filters %pM for VSI %d, error %d\n", + mac, vsi->idx, ret); + } else if (ret) { + dev_err(dev, "can't add MAC filters %pM for VSI %d, error %d\n", + mac, vsi->idx, ret); ret = -ENOMEM; goto add_mac_err; } @@ -7231,10 +7573,13 @@ add_mac_err: vsi_open_err: ice_napi_del(vsi); vsi->netdev = NULL; +netdev_tx_cfg_err: +#ifdef HAVE_NETDEV_SB_DEV netdev_unbind_sb_channel(netdev, vdev); bind_sb_channel_err: netdev_set_sb_channel(vdev, 0); set_sb_channel_err: +#endif /* HAVE_NETDEV_SB_DEV */ pf->num_macvlan--; ice_vsi_release(vsi); return ERR_PTR(ret); @@ -7253,8 +7598,10 @@ static void ice_fwd_del_macvlan(struct net_device *netdev, void *accel_priv) struct ice_pf *pf = parent_vsi->back; struct net_device *vdev = mv->vdev; +#ifdef HAVE_NETDEV_SB_DEV netdev_unbind_sb_channel(netdev, vdev); netdev_set_sb_channel(vdev, 0); +#endif /* HAVE_NETDEV_SB_DEV */ ice_vsi_release(mv->vsi); parent_vsi->tx_rings[parent_vsi->num_txq + mv->id] = NULL; @@ -7409,17 +7756,23 @@ static void ice_deinit_macvlan(struct ice_vsi *vsi) */ static void ice_vsi_replay_macvlan(struct ice_pf *pf) { +#ifdef HAVE_NETDEV_SB_DEV struct device *dev = ice_pf_to_dev(pf); +#endif /* HAVE_NETDEV_SB_DEV */ struct ice_macvlan *mv, *mv_temp; list_for_each_entry_safe(mv, mv_temp, &pf->macvlan_list, list) { struct ice_vsi *vsi = mv->parent_vsi; int offset = vsi->alloc_txq + mv->id; - int ret = 0, i; +#ifdef HAVE_NETDEV_SB_DEV + int ret; +#endif /* HAVE_NETDEV_SB_DEV */ + int i; ice_for_each_txq(mv->vsi, i) vsi->tx_rings[offset + i] = mv->vsi->tx_rings[i]; +#ifdef HAVE_NETDEV_SB_DEV ret = netdev_set_sb_channel(mv->vdev, mv->id + 1); if (ret) { dev_dbg(dev, "Error setting netdev_set_sb_channel %d\n", @@ -7446,15 +7799,19 @@ static void ice_vsi_replay_macvlan(struct ice_pf *pf) ice_fwd_del_macvlan(mv->parent_vsi->netdev, mv); continue; } +#endif /* HAVE_NETDEV_SB_DEV */ } } -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ NETIF_F_HW_VLAN_CTAG_TX | \ NETIF_F_HW_VLAN_STAG_RX | \ NETIF_F_HW_VLAN_STAG_TX) +#define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_HW_VLAN_STAG_RX) + #define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \ NETIF_F_HW_VLAN_STAG_FILTER) @@ -7499,25 +7856,38 @@ static netdev_features_t ice_fix_features(struct net_device *netdev, netdev_features_t features) { struct ice_netdev_priv *np = netdev_priv(netdev); - netdev_features_t supported_vlan_filtering; - netdev_features_t requested_vlan_filtering; - struct ice_vsi *vsi = np->vsi; + netdev_features_t req_vlan_fltr, cur_vlan_fltr; + bool cur_ctag, cur_stag, req_ctag, req_stag; - requested_vlan_filtering = features & NETIF_VLAN_FILTERING_FEATURES; + cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES; + cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER; + cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER; - /* make sure supported_vlan_filtering works for both SVM and DVM */ - supported_vlan_filtering = NETIF_F_HW_VLAN_CTAG_FILTER; - if (ice_is_dvm_ena(&vsi->back->hw)) - supported_vlan_filtering |= NETIF_F_HW_VLAN_STAG_FILTER; + req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES; + req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER; + req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER; - if (requested_vlan_filtering && - requested_vlan_filtering != supported_vlan_filtering) { - if (requested_vlan_filtering & NETIF_F_HW_VLAN_CTAG_FILTER) { - netdev_warn(netdev, "cannot support requested VLAN filtering settings, enabling all supported VLAN filtering settings\n"); - features |= supported_vlan_filtering; + if (req_vlan_fltr != cur_vlan_fltr) { + if (ice_is_dvm_ena(&np->vsi->back->hw)) { + if (req_ctag && req_stag) { + features |= NETIF_VLAN_FILTERING_FEATURES; + } else if (!req_ctag && !req_stag) { + features &= ~NETIF_VLAN_FILTERING_FEATURES; + } else if ((!cur_ctag && req_ctag && !cur_stag) || + (!cur_stag && req_stag && !cur_ctag)) { + features |= NETIF_VLAN_FILTERING_FEATURES; + netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n"); + } else if ((cur_ctag && !req_ctag && cur_stag) || + (cur_stag && !req_stag && cur_ctag)) { + features &= ~NETIF_VLAN_FILTERING_FEATURES; + netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n"); + } } else { - netdev_warn(netdev, "cannot support requested VLAN filtering settings, clearing all supported VLAN filtering settings\n"); - features &= ~supported_vlan_filtering; + if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER) + netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n"); + + if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER) + features |= NETIF_F_HW_VLAN_CTAG_FILTER; } } @@ -7528,6 +7898,14 @@ ice_fix_features(struct net_device *netdev, netdev_features_t features) NETIF_F_HW_VLAN_STAG_TX); } + if (!(netdev->features & NETIF_F_RXFCS) && + (features & NETIF_F_RXFCS) && + (features & NETIF_VLAN_STRIPPING_FEATURES) && + !ice_vsi_has_non_zero_vlans(np->vsi)) { + netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n"); + features &= ~NETIF_VLAN_STRIPPING_FEATURES; + } + return features; } @@ -7621,6 +7999,13 @@ ice_set_vlan_features(struct net_device *netdev, netdev_features_t features) current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES; requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES; if (current_vlan_features ^ requested_vlan_features) { + if ((features & NETIF_F_RXFCS) && + (features & NETIF_VLAN_STRIPPING_FEATURES)) { + dev_err(ice_pf_to_dev(vsi->back), + "To enable VLAN stripping, you must first enable FCS/CRC stripping\n"); + return -EIO; + } + err = ice_set_vlan_offload_features(vsi, features); if (err) return err; @@ -7646,37 +8031,53 @@ ice_set_vlan_features(struct net_device *netdev, netdev_features_t features) static int ice_set_features(struct net_device *netdev, netdev_features_t features) { + netdev_features_t changed = netdev->features ^ features; struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; int ret = 0; /* Don't set any netdev advanced features with device in Safe Mode */ - if (ice_is_safe_mode(vsi->back)) { - dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n"); + if (ice_is_safe_mode(pf)) { + dev_err(ice_pf_to_dev(pf), + "Device is in Safe Mode - not enabling advanced netdev features\n"); return ret; } /* Do not change setting during reset */ if (ice_is_reset_in_progress(pf->state)) { - dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n"); + dev_err(ice_pf_to_dev(pf), + "Device is resetting, changing advanced netdev features temporarily unavailable.\n"); return -EBUSY; } /* Multiple features can be changed in one call so keep features in * separate if/else statements to guarantee each feature is checked */ - if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) - ice_vsi_manage_rss_lut(vsi, true); - else if (!(features & NETIF_F_RXHASH) && - netdev->features & NETIF_F_RXHASH) - ice_vsi_manage_rss_lut(vsi, false); + if (changed & NETIF_F_RXHASH) + ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH)); ret = ice_set_vlan_features(netdev, features); if (ret) return ret; -#ifdef HAVE_NETDEV_SB_DEV + /* Turn on receive of FCS aka CRC, and after setting this + * flag the packet data will have the 4 byte CRC appended + */ + if (changed & NETIF_F_RXFCS) { + if ((features & NETIF_F_RXFCS) && + (features & NETIF_VLAN_STRIPPING_FEATURES)) { + dev_err(ice_pf_to_dev(vsi->back), + "To disable FCS/CRC stripping, you must first disable VLAN stripping\n"); + return -EIO; + } + + ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS)); + ret = ice_down_up(vsi); + if (ret) + return ret; + } +#ifdef HAVE_NDO_DFWD_OPS if ((features & NETIF_F_HW_L2FW_DOFFLOAD) && !(netdev->features & NETIF_F_HW_L2FW_DOFFLOAD)) { @@ -7687,16 +8088,13 @@ ice_set_features(struct net_device *netdev, netdev_features_t features) (netdev->features & NETIF_F_HW_L2FW_DOFFLOAD)) { ice_deinit_macvlan(vsi); } -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ - if ((features & NETIF_F_NTUPLE) && - !(netdev->features & NETIF_F_NTUPLE)) { - ice_vsi_manage_fdir(vsi, true); - ice_init_arfs(vsi); - } else if (!(features & NETIF_F_NTUPLE) && - (netdev->features & NETIF_F_NTUPLE)) { - ice_vsi_manage_fdir(vsi, false); - ice_clear_arfs(vsi); + if (changed & NETIF_F_NTUPLE) { + bool ena = !!(features & NETIF_F_NTUPLE); + + ice_vsi_manage_fdir(vsi, ena); + ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi); } #ifdef NETIF_F_HW_TC @@ -7706,11 +8104,12 @@ ice_set_features(struct net_device *netdev, netdev_features_t features) return -EACCES; } - if ((features & NETIF_F_HW_TC) && - !(netdev->features & NETIF_F_HW_TC)) - set_bit(ICE_FLAG_CLS_FLOWER, pf->flags); - else - clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags); + if (changed & NETIF_F_HW_TC) { + bool ena = !!(features & NETIF_F_HW_TC); + + ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) : + clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags); + } #endif /* NETIF_F_HW_TC */ return 0; @@ -7732,11 +8131,7 @@ static int ice_vsi_vlan_setup(struct ice_vsi *vsi) if (err) return err; - err = ice_vsi_add_vlan_zero(vsi); - if (err) - return err; - - return 0; + return ice_vsi_add_vlan_zero(vsi); } /** @@ -7774,7 +8169,7 @@ int ice_vsi_cfg(struct ice_vsi *vsi) * The ice driver hardware works differently than the hardware that DIMLIB was * originally made for. ice hardware doesn't have packet count limits that * can trigger an interrupt, but it *does* have interrupt rate limit support, - * which is hard-coded to limit to 250,000 ints/second. + * which is hard-coded to a limit of 250,000 ints/second. * If not using dynamic moderation, the INTRL value can be modified * by ethtool rx-usecs-high. */ @@ -7785,8 +8180,8 @@ struct ice_dim { u16 itr; }; -/* Make a different profile for RX that doesn't allow quite so aggressive - * moderation at the high end (it maxxes out at 126us or about 8k interrupts a +/* Make a different profile for Rx that doesn't allow quite so aggressive + * moderation at the high end (it maxes out at 126us or about 8k interrupts a * second. */ static const struct ice_dim rx_profile[] = { @@ -7857,7 +8252,8 @@ static void ice_rx_dim_work(struct work_struct *work) * Set up interrupt moderation registers, with the intent to do the right thing * when called from reset or from probe, and whether or not dynamic moderation * is enabled or not. Take special care to write all the registers in both - * dynamic mode or not in order to make sure hardware is in a known state. + * dynamic moderation mode or not in order to make sure hardware is in a known + * state. */ static void ice_init_moderation(struct ice_q_vector *q_vector) { @@ -7889,25 +8285,31 @@ static void ice_init_moderation(struct ice_q_vector *q_vector) ice_set_q_vector_intrl(q_vector); } +/** + * ice_napi_enable - Enable NAPI for a single q_vector of a VSI + * @q_vector: a queue interrupt vector being configured + */ +static void ice_napi_enable(struct ice_q_vector *q_vector) +{ + ice_init_moderation(q_vector); + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_enable(&q_vector->napi); +} + /** * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI * @vsi: the VSI being configured */ static void ice_napi_enable_all(struct ice_vsi *vsi) { - int q_idx; + int v_idx; if (!vsi->netdev) return; - ice_for_each_q_vector(vsi, q_idx) { - struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; - - ice_init_moderation(q_vector); - - if (q_vector->rx.ring || q_vector->tx.ring) - napi_enable(&q_vector->napi); - } + ice_for_each_q_vector(vsi, v_idx) + ice_napi_enable(vsi->q_vectors[v_idx]); } /** @@ -7931,7 +8333,6 @@ static int ice_up_complete(struct ice_vsi *vsi) if (err) return err; - clear_bit(ICE_VSI_DOWN, vsi->state); ice_napi_enable_all(vsi); ice_vsi_ena_irq(vsi); @@ -7939,14 +8340,17 @@ static int ice_up_complete(struct ice_vsi *vsi) if (vsi->port_info && (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && vsi->netdev && vsi->type == ICE_VSI_PF) { + ice_force_phys_link_state(vsi, true); ice_print_link_msg(vsi, true); netif_tx_start_all_queues(vsi->netdev); netif_carrier_on(vsi->netdev); - if (!ice_is_e810(&pf->hw)) - ice_ptp_link_change(pf, pf->hw.pf_id, true); + ice_ptp_link_change(pf, pf->hw.pf_id, true); } - + /* Perform an initial read of the statistics registers now to + * set the baseline so counters are ready when interface is up + */ + ice_update_eth_stats(vsi); if (vsi->type == ICE_VSI_PF) ice_service_task_schedule(pf); @@ -7978,7 +8382,7 @@ int ice_up(struct ice_vsi *vsi) * This function fetches stats from the ring considering the atomic operations * that needs to be performed to read u64 values in 32 bit machine. */ -static void +void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes) { unsigned int start; @@ -7997,14 +8401,16 @@ ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes) /** * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters * @vsi: the VSI to be updated + * @vsi_stats: the stats struct to be updated * @rings: rings to work on * @count: number of rings */ static void -ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings, +ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, + struct rtnl_link_stats64 *vsi_stats, + struct ice_ring **rings, u16 count) { - struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; u16 i; for (i = 0; i < count; i++) { @@ -8015,6 +8421,7 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings, ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); vsi_stats->tx_packets += pkts; vsi_stats->tx_bytes += bytes; + vsi->tx_restart += ring->tx_stats.restart_q; vsi->tx_busy += ring->tx_stats.tx_busy; vsi->tx_linearize += ring->tx_stats.tx_linearize; @@ -8027,15 +8434,12 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings, */ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) { - struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; - u64 pkts, bytes; + struct rtnl_link_stats64 *vsi_stats; int i; - /* reset netdev stats */ - vsi_stats->tx_packets = 0; - vsi_stats->tx_bytes = 0; - vsi_stats->rx_packets = 0; - vsi_stats->rx_bytes = 0; + vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC); + if (!vsi_stats) + return; /* reset non-netdev (extended) stats */ vsi->tx_restart = 0; @@ -8050,11 +8454,13 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) rcu_read_lock(); /* update Tx rings counters */ - ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq); + ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings, + vsi->num_txq); /* update Rx rings counters */ ice_for_each_rxq(vsi, i) { struct ice_ring *ring = READ_ONCE(vsi->rx_rings[i]); + u64 pkts, bytes; ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); vsi_stats->rx_packets += pkts; @@ -8069,11 +8475,18 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) #ifdef HAVE_XDP_SUPPORT /* update XDP Tx rings counters */ if (ice_is_xdp_ena_vsi(vsi)) - ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings, + ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings, vsi->num_xdp_txq); #endif /* HAVE_XDP_SUPPORT */ rcu_read_unlock(); + + vsi->net_stats.tx_packets = vsi_stats->tx_packets; + vsi->net_stats.tx_bytes = vsi_stats->tx_bytes; + vsi->net_stats.rx_packets = vsi_stats->rx_packets; + vsi->net_stats.rx_bytes = vsi_stats->rx_bytes; + + kfree(vsi_stats); } /** @@ -8288,7 +8701,6 @@ void ice_update_pf_stats(struct ice_pf *pf) ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, &prev_ps->rx_jabber, &cur_ps->rx_jabber); - cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; pf->stat_prev_loaded = true; @@ -8327,6 +8739,7 @@ ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) */ if (!test_bit(ICE_VSI_DOWN, vsi->state)) ice_update_vsi_ring_stats(vsi); + stats->tx_packets = vsi_stats->tx_packets; stats->tx_bytes = vsi_stats->tx_bytes; stats->rx_packets = vsi_stats->rx_packets; @@ -8398,26 +8811,25 @@ static void ice_napi_disable_all(struct ice_vsi *vsi) /** * ice_down - Shutdown the connection * @vsi: The VSI being stopped + * + * Caller of this function is expected to set the vsi->state ICE_DOWN bit */ int ice_down(struct ice_vsi *vsi) { int link_err = 0, vlan_err = 0; int i, tx_err, rx_err; - /* Caller of this function is expected to set the - * vsi->state ICE_DOWN bit - */ + WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state)); + if (vsi->netdev && vsi->type == ICE_VSI_PF) { vlan_err = ice_vsi_del_vlan_zero(vsi); - if (!ice_is_e810(&vsi->back->hw)) - ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false); + ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false); netif_carrier_off(vsi->netdev); netif_tx_disable(vsi->netdev); } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { ice_eswitch_stop_all_tx_queues(vsi->back); } - ice_vsi_dis_irq(vsi); tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); @@ -8462,6 +8874,31 @@ int ice_down(struct ice_vsi *vsi) return 0; } +/** + * ice_down_up - shutdown the VSI connection and bring it up + * @vsi: the VSI to be reconnected + */ +int ice_down_up(struct ice_vsi *vsi) +{ + int ret; + + /* if DOWN already set, nothing to do */ + if (test_and_set_bit(ICE_VSI_DOWN, vsi->state)) + return 0; + + ret = ice_down(vsi); + if (ret) + return ret; + + ret = ice_up(vsi); + if (ret) { + netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n"); + return ret; + } + + return 0; +} + /** * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources * @vsi: VSI having resources allocated @@ -8479,13 +8916,15 @@ int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) } ice_for_each_txq(vsi, i) { - struct ice_ring *ring = vsi->tx_rings[i]; + struct ice_ring *tx_ring = vsi->tx_rings[i]; - if (!ring) + if (!tx_ring) return -EINVAL; + if (vsi->netdev) - ring->netdev = vsi->netdev; - err = ice_setup_tx_ring(ring); + tx_ring->netdev = vsi->netdev; + + err = ice_setup_tx_ring(tx_ring); if (err) break; } @@ -8614,7 +9053,7 @@ int ice_vsi_open(struct ice_vsi *vsi) goto err_setup_rx; if (vsi->type == ICE_VSI_PF) { -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS unsigned int total_qs = vsi->num_txq; if (test_bit(ICE_FLAG_MACVLAN_ENA, pf->flags)) @@ -8625,7 +9064,7 @@ int ice_vsi_open(struct ice_vsi *vsi) #else /* Notify the stack of the actual queue counts. */ err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ if (err) goto err_set_qs; @@ -8686,7 +9125,6 @@ static void ice_vsi_release_all(struct ice_pf *pf) static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) { struct device *dev = ice_pf_to_dev(pf); - enum ice_status status; int i, err; ice_for_each_vsi(pf, i) { @@ -8698,18 +9136,18 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) /* rebuild the VSI */ err = ice_vsi_rebuild(vsi, true); if (err) { - dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n", - err, vsi->idx, ice_vsi_type_str(type)); + ice_dev_err_errno(dev, err, + "rebuild VSI failed, VSI index %d, type %s", + vsi->idx, ice_vsi_type_str(type)); return err; } /* replay filters for the VSI */ - status = ice_replay_vsi(&pf->hw, vsi->idx); - if (status) { - dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n", - ice_stat_str(status), vsi->idx, - ice_vsi_type_str(type)); - return -EIO; + err = ice_replay_vsi(&pf->hw, vsi->idx); + if (err) { + dev_err(dev, "replay VSI failed, status %d, VSI index %d, type %s\n", + err, vsi->idx, ice_vsi_type_str(type)); + return err; } /* Re-map HW VSI number, using VSI handle that has been @@ -8720,8 +9158,9 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) /* enable the VSI */ err = ice_ena_vsi(vsi, false); if (err) { - dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n", - err, vsi->idx, ice_vsi_type_str(type)); + ice_dev_err_errno(dev, err, + "enable VSI failed, VSI index %d, type %s", + vsi->idx, ice_vsi_type_str(type)); return err; } @@ -8773,7 +9212,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) struct ice_fwlog_user_input user_input = { 0 }; struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - enum ice_status ret; + struct ice_vsi *vsi; bool dvm; int err; @@ -8783,14 +9222,19 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); #define ICE_EMP_RESET_SLEEP 5000 - if (reset_type == ICE_RESET_EMPR) + if (reset_type == ICE_RESET_EMPR) { + /* If an EMP reset has occurred, any previously pending flash + * update will have completed. We no longer know whether or + * not the NVM update EMP reset is restricted. + */ + pf->fw_emp_reset_disabled = false; + msleep(ICE_EMP_RESET_SLEEP); + } - - ret = ice_init_all_ctrlq(hw); - if (ret) { - dev_err(dev, "control queues init failed %s\n", - ice_stat_str(ret)); + err = ice_init_all_ctrlq(hw); + if (err) { + dev_err(dev, "control queues init failed %d\n", err); goto err_init_ctrlq; } @@ -8822,41 +9266,40 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) } } - ret = ice_clear_pf_cfg(hw); - if (ret) { - dev_err(dev, "clear PF configuration failed %s\n", - ice_stat_str(ret)); + /* Restore necessary config for Scalable IOV */ + if (test_bit(ICE_FLAG_SIOV_ENA, pf->flags)) + ice_restore_pasid_config(pf, reset_type); + + err = ice_clear_pf_cfg(hw); + if (err) { + dev_err(dev, "clear PF configuration failed %d\n", err); goto err_init_ctrlq; } - if (pf->first_sw->dflt_vsi_ena) - dev_info(dev, "Clearing default VSI, re-enable after reset completes\n"); - /* clear the default VSI configuration if it exists */ - pf->first_sw->dflt_vsi = NULL; - pf->first_sw->dflt_vsi_ena = false; - ice_clear_pxe_mode(hw); - ret = ice_init_nvm(hw); - if (ret) { - dev_err(dev, "ice_init_nvm failed %s\n", ice_stat_str(ret)); + err = ice_init_nvm(hw); + if (err) { + dev_err(dev, "ice_init_nvm failed %d\n", err); goto err_init_ctrlq; } - ret = ice_get_caps(hw); - if (ret) { - dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret)); + err = ice_get_caps(hw); + if (err) { + dev_err(dev, "ice_get_caps failed %d\n", err); goto err_init_ctrlq; } - ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); - if (ret) { - dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret)); + err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, false, + NULL); + if (err) { + dev_err(dev, "set_mac_cfg failed %d\n", err); goto err_init_ctrlq; } dvm = ice_is_dvm_ena(hw); + pf->max_qps = num_online_cpus(); err = ice_aq_set_port_params(pf->hw.port_info, 0, false, false, dvm, NULL); if (err) @@ -8866,13 +9309,12 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) if (err) goto err_sched_init_port; - ice_pf_reset_stats(pf); /* start misc vector */ err = ice_req_irq_msix_misc(pf); if (err) { - dev_err(dev, "misc vector setup failed: %d\n", err); + ice_dev_err_errno(dev, err, "misc vector setup failed"); goto err_sched_init_port; } @@ -8898,32 +9340,35 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) * the VSI rebuild. If not, this causes the PTP link status events to * fail. */ - if (test_bit(ICE_FLAG_PTP_ENA, pf->flags)) - ice_ptp_init(pf); + if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) + ice_ptp_reset(pf); + if (ice_is_feature_supported(pf, ICE_F_GNSS)) + ice_gnss_init(pf); /* rebuild PF VSI */ err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); if (err) { - dev_err(dev, "PF VSI rebuild failed: %d\n", err); + ice_dev_err_errno(dev, err, "PF VSI rebuild failed"); goto err_vsi_rebuild; } - if (ice_is_peer_ena(pf)) { - struct ice_vsi *vsi = ice_get_main_vsi(pf); + /* configure PTP timestamping after VSI rebuild */ + ice_ptp_cfg_timestamp(pf, false); - if (!vsi) { - dev_err(dev, "No PF_VSI to update peer\n"); - goto err_vsi_rebuild; - } - ice_for_each_peer(pf, vsi, ice_peer_update_vsi); + vsi = ice_get_main_vsi(pf); + + if (!vsi) { + dev_err(dev, "No PF_VSI to update aux drivers\n"); + goto err_vsi_rebuild; } -#ifdef HAVE_NETDEV_SB_DEV + ice_cdev_info_update_vsi(ice_find_cdev_info_by_id(pf, IIDC_RDMA_ID), + vsi); +#ifdef HAVE_NDO_DFWD_OPS if (test_bit(ICE_FLAG_MACVLAN_ENA, pf->flags)) { - struct ice_vsi *vsi; - err = ice_vsi_rebuild_by_type(pf, ICE_VSI_OFFLOAD_MACVLAN); if (err) { - dev_err(dev, "MACVLAN VSI rebuild failed: %d\n", err); + ice_dev_err_errno(dev, err, + "MACVLAN VSI rebuild failed"); goto err_vsi_rebuild; } @@ -8941,11 +9386,12 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) ice_vsi_replay_macvlan(pf); } -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL); if (err) { - dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err); + ice_dev_err_errno(dev, err, + "Switchdev CTRL VSI rebuild failed"); goto err_vsi_rebuild; } @@ -8953,8 +9399,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) if (reset_type == ICE_RESET_PFR) { err = ice_rebuild_channels(pf); if (err) { - dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n", - err); + ice_dev_err_errno(dev, err, + "failed to rebuild and replay ADQ VSIs"); goto err_vsi_rebuild; } } @@ -8964,7 +9410,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL); if (err) { - dev_err(dev, "control VSI rebuild failed: %d\n", err); + ice_dev_err_errno(dev, err, + "control VSI rebuild failed"); goto err_vsi_rebuild; } @@ -8978,22 +9425,25 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) ice_rebuild_arfs(pf); } - ice_update_pf_netdev_link(pf); ice_config_health_events(pf, true); /* tell the firmware we are up */ - ret = ice_send_version(pf); - if (ret) { - dev_err(dev, "Rebuild failed due to error sending driver version: %s\n", - ice_stat_str(ret)); + err = ice_send_version(pf); + if (err) { + dev_err(dev, "Rebuild failed due to error sending driver version: %d\n", + err); goto err_vsi_rebuild; } ice_replay_post(hw); /* if we get here, reset flow is successful */ clear_bit(ICE_RESET_FAILED, pf->state); +#ifdef HAVE_DEVLINK_HEALTH + ice_devlink_clear_after_reset(pf); +#endif /* HAVE_DEVLINK_HEALTH */ + set_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags); return; err_vsi_rebuild: @@ -9001,7 +9451,7 @@ err_vsi_rebuild: err_sched_init_port: ice_sched_cleanup_all(hw); err_init_ctrlq: - ice_shutdown_all_ctrlq(hw); + ice_shutdown_all_ctrlq(hw, false); set_bit(ICE_RESET_FAILED, pf->state); clear_recovery: /* set this bit in PF state to control service task scheduling */ @@ -9035,7 +9485,6 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; - struct ice_event *event; u8 count = 0; int err = 0; @@ -9056,39 +9505,10 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) } #endif /* HAVE_XDP_SUPPORT */ -#ifdef HAVE_NETDEVICE_MIN_MAX_MTU -#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU - if (new_mtu < netdev->extended->min_mtu) { - netdev_err(netdev, "new MTU invalid. min_mtu is %d\n", - netdev->extended->min_mtu); - return -EINVAL; - } else if (new_mtu > netdev->extended->max_mtu) { - netdev_err(netdev, "new MTU invalid. max_mtu is %d\n", - netdev->extended->min_mtu); - return -EINVAL; - } -#else /* HAVE_RHEL7_EXTENDED_MIN_MAX_MTU */ - if (new_mtu < (int)netdev->min_mtu) { - netdev_err(netdev, "new MTU invalid. min_mtu is %d\n", - netdev->min_mtu); - return -EINVAL; - } else if (new_mtu > (int)netdev->max_mtu) { - netdev_err(netdev, "new MTU invalid. max_mtu is %d\n", - netdev->min_mtu); - return -EINVAL; - } -#endif /* HAVE_RHEL7_EXTENDED_MIN_MAX_MTU */ -#else /* HAVE_NETDEVICE_MIN_MAX_MTU */ - if (new_mtu < ETH_MIN_MTU) { - netdev_err(netdev, "new MTU invalid. min_mtu is %d\n", - ETH_MIN_MTU); - return -EINVAL; - } else if (new_mtu > ICE_MAX_MTU) { - netdev_err(netdev, "new MTU invalid. max_mtu is %d\n", - ICE_MAX_MTU); - return -EINVAL; - } -#endif /* HAVE_NETDEVICE_MIN_MAX_MTU */ + err = ice_check_mtu_valid(netdev, new_mtu); + if (err) + return err; + /* if a reset is in progress, wait for some time for it to complete */ do { if (ice_is_reset_in_progress(pf->state)) { @@ -9105,10 +9525,6 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) return -EBUSY; } - event = kzalloc(sizeof(*event), GFP_KERNEL); - if (!event) - return -ENOMEM; - netdev->mtu = (unsigned int)new_mtu; /* if VSI is up, bring it down and then back up */ @@ -9116,38 +9532,29 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) err = ice_down(vsi); if (err) { netdev_err(netdev, "change MTU if_down err %d\n", err); - goto free_event; + return err; } err = ice_up(vsi); if (err) { netdev_err(netdev, "change MTU if_up err %d\n", err); - goto free_event; + return err; } } - if (ice_is_safe_mode(pf)) - goto out; - - set_bit(ICE_EVENT_MTU_CHANGE, event->type); - event->reporter = NULL; - event->info.mtu = (u16)new_mtu; - ice_for_each_peer(pf, event, ice_peer_check_for_reg); - -out: netdev_dbg(netdev, "changed MTU to %d\n", new_mtu); -free_event: - kfree(event); - return err; + set_bit(ICE_FLAG_MTU_CHANGED, pf->flags); + + return 0; } /** - * ice_do_ioctl - Access the hwtstamp interface + * ice_eth_ioctl - Access the hwtstamp interface * @netdev: network interface device structure * @ifr: interface request data * @cmd: ioctl command */ -static int ice_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_pf *pf = np->vsi->back; @@ -9236,78 +9643,6 @@ const char *ice_aq_str(enum ice_aq_err aq_err) return "ICE_AQ_RC_UNKNOWN"; } -/** - * ice_stat_str - convert status err code to a string - * @stat_err: the status error code to convert - */ -const char *ice_stat_str(enum ice_status stat_err) -{ - switch (stat_err) { - case ICE_SUCCESS: - return "OK"; - case ICE_ERR_PARAM: - return "ICE_ERR_PARAM"; - case ICE_ERR_NOT_IMPL: - return "ICE_ERR_NOT_IMPL"; - case ICE_ERR_NOT_READY: - return "ICE_ERR_NOT_READY"; - case ICE_ERR_NOT_SUPPORTED: - return "ICE_ERR_NOT_SUPPORTED"; - case ICE_ERR_BAD_PTR: - return "ICE_ERR_BAD_PTR"; - case ICE_ERR_INVAL_SIZE: - return "ICE_ERR_INVAL_SIZE"; - case ICE_ERR_DEVICE_NOT_SUPPORTED: - return "ICE_ERR_DEVICE_NOT_SUPPORTED"; - case ICE_ERR_RESET_FAILED: - return "ICE_ERR_RESET_FAILED"; - case ICE_ERR_FW_API_VER: - return "ICE_ERR_FW_API_VER"; - case ICE_ERR_NO_MEMORY: - return "ICE_ERR_NO_MEMORY"; - case ICE_ERR_CFG: - return "ICE_ERR_CFG"; - case ICE_ERR_OUT_OF_RANGE: - return "ICE_ERR_OUT_OF_RANGE"; - case ICE_ERR_ALREADY_EXISTS: - return "ICE_ERR_ALREADY_EXISTS"; - case ICE_ERR_NVM: - return "ICE_ERR_NVM"; - case ICE_ERR_NVM_CHECKSUM: - return "ICE_ERR_NVM_CHECKSUM"; - case ICE_ERR_BUF_TOO_SHORT: - return "ICE_ERR_BUF_TOO_SHORT"; - case ICE_ERR_NVM_BLANK_MODE: - return "ICE_ERR_NVM_BLANK_MODE"; - case ICE_ERR_IN_USE: - return "ICE_ERR_IN_USE"; - case ICE_ERR_MAX_LIMIT: - return "ICE_ERR_MAX_LIMIT"; - case ICE_ERR_RESET_ONGOING: - return "ICE_ERR_RESET_ONGOING"; - case ICE_ERR_HW_TABLE: - return "ICE_ERR_HW_TABLE"; - case ICE_ERR_DOES_NOT_EXIST: - return "ICE_ERR_DOES_NOT_EXIST"; - case ICE_ERR_FW_DDP_MISMATCH: - return "ICE_ERR_FW_DDP_MISMATCH"; - case ICE_ERR_AQ_ERROR: - return "ICE_ERR_AQ_ERROR"; - case ICE_ERR_AQ_TIMEOUT: - return "ICE_ERR_AQ_TIMEOUT"; - case ICE_ERR_AQ_FULL: - return "ICE_ERR_AQ_FULL"; - case ICE_ERR_AQ_NO_WORK: - return "ICE_ERR_AQ_NO_WORK"; - case ICE_ERR_AQ_EMPTY: - return "ICE_ERR_AQ_EMPTY"; - case ICE_ERR_AQ_FW_CRITICAL: - return "ICE_ERR_AQ_FW_CRITICAL"; - } - - return "ICE_ERR_UNKNOWN"; -} - /** * ice_set_rss_lut - Set RSS LUT * @vsi: Pointer to VSI structure @@ -9320,7 +9655,7 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) { struct ice_aq_get_set_rss_lut_params params = {}; struct ice_hw *hw = &vsi->back->hw; - enum ice_status status; + int status; if (!lut) return -EINVAL; @@ -9333,14 +9668,11 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) params.global_lut_id = *vsi->global_lut_id; status = ice_aq_set_rss_lut(hw, ¶ms); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return -EIO; - } + if (status) + dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); - return 0; + return status; } /** @@ -9353,20 +9685,17 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed) { struct ice_hw *hw = &vsi->back->hw; - enum ice_status status; + int status; if (!seed) return -EINVAL; status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return -EIO; - } + if (status) + dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); - return 0; + return status; } /** @@ -9381,7 +9710,7 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) { struct ice_aq_get_set_rss_lut_params params = {}; struct ice_hw *hw = &vsi->back->hw; - enum ice_status status; + int status; if (!lut) return -EINVAL; @@ -9394,14 +9723,11 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) params.global_lut_id = *vsi->global_lut_id; status = ice_aq_get_rss_lut(hw, ¶ms); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return -EIO; - } + if (status) + dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); - return 0; + return status; } /** @@ -9414,20 +9740,17 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed) { struct ice_hw *hw = &vsi->back->hw; - enum ice_status status; + int status; if (!seed) return -EINVAL; status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return -EIO; - } + if (status) + dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); - return 0; + return status; } /** @@ -9486,8 +9809,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) struct ice_aqc_vsi_props *vsi_props; struct ice_hw *hw = &vsi->back->hw; struct ice_vsi_ctx *ctxt; - enum ice_status status; - int ret = 0; + int ret; vsi_props = &vsi->info; @@ -9508,12 +9830,10 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); - status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n", - bmode, ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - ret = -EIO; + ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (ret) { + dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n", + bmode, ret, ice_aq_str(hw->adminq.sq_last_status)); goto out; } /* Update sw flags for book keeping */ @@ -9553,9 +9873,8 @@ static int ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh) struct ice_pf *pf = np->vsi->back; struct nlattr *attr, *br_spec; struct ice_hw *hw = &pf->hw; - enum ice_status status; struct ice_sw *pf_sw; - int rem, v, err = 0; + int rem, v, err; pf_sw = pf->first_sw; /* find the attribute in the netlink message */ @@ -9587,14 +9906,14 @@ static int ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh) /* Update the unicast switch filter rules for the corresponding * switch of the netdev */ - status = ice_update_sw_rule_bridge_mode(hw); - if (status) { - netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n", - mode, ice_stat_str(status), + err = ice_update_sw_rule_bridge_mode(hw); + if (err) { + netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n", + mode, err, ice_aq_str(hw->adminq.sq_last_status)); /* revert hw->evb_veb */ hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); - return -EIO; + return err; } pf_sw->bridge_mode = mode; @@ -9713,8 +10032,10 @@ static void ice_tx_timeout(struct net_device *netdev) ice_service_task_schedule(pf); pf->tx_timeout_recovery_level++; + } +#ifndef HAVE_UDP_TUNNEL_NIC_INFO /** * ice_udp_tunnel_add - Get notifications about UDP tunnel ports that come up * @netdev: This physical port's netdev @@ -9722,6 +10043,11 @@ static void ice_tx_timeout(struct net_device *netdev) */ static void __maybe_unused ice_udp_tunnel_add(struct net_device *netdev, struct udp_tunnel_info *ti) +#else /* !HAVE_UDP_TUNNEL_NIC_INFO */ +static int +ice_udp_tunnel_add(struct net_device *netdev, unsigned int table, + unsigned int idx, struct udp_tunnel_info *ti) +#endif /* HAVE_UDP_TUNNEL_NIC_INFO */ { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_tnl_entry *tnl_entry; @@ -9729,11 +10055,15 @@ ice_udp_tunnel_add(struct net_device *netdev, struct udp_tunnel_info *ti) struct ice_pf *pf = vsi->back; enum ice_tunnel_type tnl_type; u16 port = ntohs(ti->port); - enum ice_status status; + int status; if (ice_dcf_is_udp_tunnel_capable(&pf->hw)) { netdev_info(netdev, "Cannot config tunnel, the capability is used by DCF\n"); +#ifdef HAVE_UDP_TUNNEL_NIC_INFO + return -EINVAL; +#else return; +#endif /* HAVE_UDP_TUNNEL_NIC_INFO */ } switch (ti->type) { @@ -9745,14 +10075,22 @@ ice_udp_tunnel_add(struct net_device *netdev, struct udp_tunnel_info *ti) break; default: netdev_err(netdev, "Unknown tunnel type\n"); +#ifdef HAVE_UDP_TUNNEL_NIC_INFO + return -EINVAL; +#else return; +#endif } status = ice_is_create_tunnel_possible(&pf->hw, tnl_type, port); - if (status == ICE_ERR_OUT_OF_RANGE) { + if (status == -EIO) { netdev_info(netdev, "Max tunneled UDP ports reached, port %d not added\n", port); +#ifdef HAVE_UDP_TUNNEL_NIC_INFO + return -EPERM; +#else return; +#endif } spin_lock(&pf->tnl_lock); @@ -9767,7 +10105,11 @@ ice_udp_tunnel_add(struct net_device *netdev, struct udp_tunnel_info *ti) sizeof(*tnl_entry), GFP_ATOMIC); if (!tnl_entry) { spin_unlock(&pf->tnl_lock); +#ifdef HAVE_UDP_TUNNEL_NIC_INFO + return -ENOMEM; +#else return; +#endif } tnl_entry->type = tnl_type; tnl_entry->port = port; @@ -9780,8 +10122,12 @@ ice_udp_tunnel_add(struct net_device *netdev, struct udp_tunnel_info *ti) /* kick the service_task so that it can create the tunnel */ ice_service_task_schedule(vsi->back); +#ifdef HAVE_UDP_TUNNEL_NIC_INFO + return 0; +#endif } +#ifndef HAVE_UDP_TUNNEL_NIC_INFO /** * ice_udp_tunnel_del - Get notifications about UDP tunnel ports that go away * @netdev: This physical port's netdev @@ -9789,6 +10135,11 @@ ice_udp_tunnel_add(struct net_device *netdev, struct udp_tunnel_info *ti) */ static void __maybe_unused ice_udp_tunnel_del(struct net_device *netdev, struct udp_tunnel_info *ti) +#else /* !HAVE_UDP_TUNNEL_NIC_INFO */ +static int +ice_udp_tunnel_del(struct net_device *netdev, unsigned int table, + unsigned int idx, struct udp_tunnel_info *ti) +#endif /* HAVE_UDP_TUNNEL_NIC_INFO */ { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; @@ -9796,10 +10147,17 @@ ice_udp_tunnel_del(struct net_device *netdev, struct udp_tunnel_info *ti) enum ice_tunnel_type tnl_type; struct ice_tnl_entry *entry; u16 port = ntohs(ti->port); +#ifdef HAVE_UDP_TUNNEL_NIC_INFO + int ret = 0; +#endif if (ice_dcf_is_udp_tunnel_capable(&pf->hw)) { netdev_info(netdev, "Cannot config tunnel, the capability is used by DCF\n"); +#ifdef HAVE_UDP_TUNNEL_NIC_INFO + return -EINVAL; +#else return; +#endif } switch (ti->type) { @@ -9811,7 +10169,11 @@ ice_udp_tunnel_del(struct net_device *netdev, struct udp_tunnel_info *ti) break; default: netdev_err(netdev, "Unknown tunnel type\n"); +#ifdef HAVE_UDP_TUNNEL_NIC_INFO + return -EINVAL; +#else return; +#endif } spin_lock(&pf->tnl_lock); @@ -9829,11 +10191,17 @@ ice_udp_tunnel_del(struct net_device *netdev, struct udp_tunnel_info *ti) } else { netdev_err(netdev, "Unable to find Tunnel, port %u, tnl_type %u\n", port, tnl_type); +#ifdef HAVE_UDP_TUNNEL_NIC_INFO + ret = -EINVAL; +#endif } spin_unlock(&pf->tnl_lock); /* kick the service_task so that it can destroy the tunnel */ ice_service_task_schedule(vsi->back); +#ifdef HAVE_UDP_TUNNEL_NIC_INFO + return ret; +#endif } #if defined(HAVE_VXLAN_RX_OFFLOAD) && !defined(HAVE_UDP_ENC_RX_OFFLOAD) @@ -9970,12 +10338,11 @@ ice_setup_tc_cls_flower(struct ice_netdev_priv *np, static int ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { - struct ice_netdev_priv *np = (struct ice_netdev_priv *)cb_priv; + struct ice_netdev_priv *np = cb_priv; switch (type) { case TC_SETUP_CLSFLOWER: return ice_setup_tc_cls_flower(np, np->vsi->netdev, - (struct flow_cls_offload *) type_data); default: return -EOPNOTSUPP; @@ -10024,41 +10391,29 @@ ice_validate_mqprio_qopt(struct ice_vsi *vsi, if (!qcount) return -EINVAL; - if (!i && !is_power_of_2(qcount)) { - dev_err(dev, "TC0:qcount[%d] must be a power of 2\n", - qcount); - return -EINVAL; - } else if (non_power_of_2_qcount) { - if (qcount > non_power_of_2_qcount) { - dev_err(dev, "TC%d:qcount[%d] > non_power_of_2_qcount [%d]\n", - i, qcount, non_power_of_2_qcount); + if (is_power_of_2(qcount)) { + if (non_power_of_2_qcount && + qcount > non_power_of_2_qcount) { + dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n", + qcount, non_power_of_2_qcount); return -EINVAL; - } else if (qcount < non_power_of_2_qcount) { - /* it must be power of 2, otherwise fail */ - if (!is_power_of_2(qcount)) { - dev_err(dev, "qcount must be a power of 2, TC%d: qcnt[%d] < non_power_of_2_qcount [%d]\n", - i, qcount, - non_power_of_2_qcount); - return -EINVAL; - } } - } else if (!is_power_of_2(qcount)) { - /* after tc0, next TCs qcount can be non-power of 2, - * if so, set channel RSS size to be the count of that - * TC - */ - non_power_of_2_qcount = qcount; - max_rss_q_cnt = qcount; - dev_dbg(dev, "TC%d:count[%d] non power of 2\n", i, - qcount); - } - - /* figure out max_rss_q_cnt based on TC's qcount */ - if (max_rss_q_cnt) { if (qcount > max_rss_q_cnt) max_rss_q_cnt = qcount; } else { + if (non_power_of_2_qcount && + qcount != non_power_of_2_qcount) { + dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n", + qcount, non_power_of_2_qcount); + return -EINVAL; + } + if (qcount < max_rss_q_cnt) { + dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n", + qcount, max_rss_q_cnt); + return -EINVAL; + } max_rss_q_cnt = qcount; + non_power_of_2_qcount = qcount; } /* Convert input bandwidth from Bytes/s to Kbps */ @@ -10105,10 +10460,7 @@ ice_validate_mqprio_qopt(struct ice_vsi *vsi, (mqprio_qopt->qopt.offset[i] + qcount)) return -EINVAL; } - if (vsi->num_rxq < - (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) - return -EINVAL; - if (vsi->num_txq < + if (pf->max_adq_qps < (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) return -EINVAL; @@ -10149,7 +10501,6 @@ static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi) for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) { enum ice_block blk = ICE_BLK_FD; struct ice_fd_hw_prof *prof; - enum ice_status status; u64 entry_h; int tun; @@ -10159,6 +10510,7 @@ static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi) for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { enum ice_flow_priority prio; + int status; u64 prof_id; /* add this VSI to FDir profile for this flow */ @@ -10243,11 +10595,6 @@ static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch) /* set/clear inline flow-director bits for newly created VSI based * on PF level private flags */ - if (test_bit(ICE_FLAG_CHNL_INLINE_FD_ENA, pf->flags)) - set_bit(ICE_CHNL_FEATURE_INLINE_FD_ENA, vsi->features); - else - clear_bit(ICE_CHNL_FEATURE_INLINE_FD_ENA, vsi->features); - if (test_bit(ICE_FLAG_CHNL_INLINE_FD_MARK_ENA, pf->flags)) set_bit(ICE_CHNL_FEATURE_INLINE_FD_MARK_ENA, vsi->features); else @@ -10269,8 +10616,6 @@ static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch) ch->vsi_num = vsi->vsi_num; ch->info.mapping_flags = vsi->info.mapping_flags; ch->ch_vsi = vsi; - /* initialize filter type to be INVALID */ - ch->fltr_type = ICE_CHNL_FLTR_TYPE_INVALID; /* set the back pointer of channel for newly created VSI */ vsi->ch = ch; @@ -10293,7 +10638,6 @@ static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch) { int i; - for (i = 0; i < ch->num_txq; i++) { struct ice_q_vector *tx_q_vector, *rx_q_vector; struct ice_ring *tx_ring, *rx_ring; @@ -10453,10 +10797,9 @@ ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi, /** * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate - * ice_set_min_bw_limit - setup minimum BW limit for Tx based on min_tx_rate * @vsi: VSI to be configured * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit - * @min_tx_rate: min Tx rate in Kbps to be configured as mimimum BW limit + * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit */ static int ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate) @@ -10467,11 +10810,7 @@ ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate) if (err) return err; - err = ice_set_max_bw_limit(vsi, max_tx_rate); - if (err) - return err; - - return 0; + return ice_set_max_bw_limit(vsi, max_tx_rate); } /** @@ -10497,7 +10836,7 @@ static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch) } if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) { - dev_err(dev, "Error: cnt_q_avail (%u) less than num_queues %d\n", + dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n", vsi->cnt_q_avail, ch->num_txq); return -EINVAL; } @@ -10543,7 +10882,7 @@ static void ice_rem_all_chnl_fltrs(struct ice_pf *pf) &pf->tc_flower_fltr_list, tc_flower_node) { struct ice_rule_query_data rule; - enum ice_status status; + int status; /* for now process only channel specific filters */ if (!ice_is_chnl_fltr(fltr)) @@ -10551,16 +10890,14 @@ static void ice_rem_all_chnl_fltrs(struct ice_pf *pf) rule.rid = fltr->rid; rule.rule_id = fltr->rule_id; - rule.vsi_handle = fltr->dest_id; + rule.vsi_handle = fltr->dest_vsi_handle; status = ice_rem_adv_rule_by_id(&pf->hw, &rule); if (status) { - if (status == ICE_ERR_DOES_NOT_EXIST) - dev_dbg(ice_pf_to_dev(pf), - "TC flower filter (rule_id %u) does not exist\n", + if (status == -ENOENT) + dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n", rule.rule_id); else - dev_err(ice_pf_to_dev(pf), - "failed to delete TC flower filter, status %d\n", + dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n", status); } else if (fltr->dest_vsi) { /* update advanced switch filter count */ @@ -10580,7 +10917,6 @@ static void ice_rem_all_chnl_fltrs(struct ice_pf *pf) } #endif /* HAVE_TC_SETUP_CLSFLOWER */ - /** * ice_remove_q_channels - Remove queue channels for the TCs * @vsi: VSI to be configured @@ -10605,6 +10941,15 @@ static void ice_remove_q_channels(struct ice_vsi *vsi, ice_rem_all_chnl_fltrs(pf); #endif /* HAVE_TC_SETUP_CLSFLOWER */ + /* remove ntuple filters since queue configuration is being changed */ + if (vsi->netdev->features & NETIF_F_NTUPLE) { + struct ice_hw *hw = &pf->hw; + + mutex_lock(&hw->fdir_fltr_lock); + ice_fdir_del_all_fltrs(vsi); + mutex_unlock(&hw->fdir_fltr_lock); + } + /* perform cleanup for channels if they exist */ list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { struct ice_vsi *ch_vsi; @@ -10637,7 +10982,7 @@ static void ice_remove_q_channels(struct ice_vsi *vsi, /* Release FD resources for the channel VSI */ ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx); - /* clear the VSI from schedular tree */ + /* clear the VSI from scheduler tree */ ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx); /* Delete VSI from FW */ @@ -10683,7 +11028,6 @@ static int ice_rebuild_channels(struct ice_pf *pf) main_vsi->old_numtc == 1) return 0; /* nothing to be done */ - /* reconfigure main VSI based on old value of TC and cached values * for MQPRIO opts */ @@ -10707,8 +11051,9 @@ static int ice_rebuild_channels(struct ice_pf *pf) /* rebuild ADQ VSI */ err = ice_vsi_rebuild(vsi, true); if (err) { - dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n", - ice_vsi_type_str(type), vsi->idx, err); + ice_dev_err_errno(dev, err, + "VSI (type:%s) at index %d rebuild failed", + ice_vsi_type_str(type), vsi->idx); goto cleanup; } @@ -10734,7 +11079,7 @@ static int ice_rebuild_channels(struct ice_pf *pf) main_vsi->tc_map_vsi[tc_idx++] = vsi; } - /* ADQ VSI(s) has been rebuild successfully, so setup + /* ADQ VSI(s) has been rebuilt successfully, so setup * channel for main VSI's Tx and Rx rings */ list_for_each_entry(ch, &main_vsi->ch_list, list) { @@ -10747,16 +11092,17 @@ static int ice_rebuild_channels(struct ice_pf *pf) /* reconfig channel resources */ ice_cfg_chnl_all_res(main_vsi, ch); - /* replay BW rate limit it it is non-zero */ + /* replay BW rate limit if it is non-zero */ if (!ch->max_tx_rate && !ch->min_tx_rate) continue; err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate, ch->min_tx_rate); if (err) - dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n", - err, ch->max_tx_rate, ch->min_tx_rate, - ch_vsi->vsi_num); + ice_dev_err_errno(dev, err, + "failed to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)", + ch->max_tx_rate, ch->min_tx_rate, + ch_vsi->vsi_num); else dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n", ch->max_tx_rate, ch->min_tx_rate, @@ -10818,7 +11164,7 @@ static int ice_cfg_q_channels(struct ice_vsi *vsi) list_add_tail(&ch->list, &vsi->ch_list); vsi->tc_map_vsi[i] = ch->ch_vsi; dev_dbg(ice_pf_to_dev(pf), - "successfully created channel: VSI %pK\n", ch->ch_vsi); + "successfully created channel: VSI %p\n", ch->ch_vsi); } return ret; @@ -10852,6 +11198,7 @@ static int ice_setup_tc_qdisc(struct net_device *netdev, void *type_data) mode = mqprio_qopt->mode; if (!hw) { clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); + pf->max_qps = num_online_cpus(); vsi->ch_rss_size = 0; memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); goto config_tcf; @@ -10879,6 +11226,7 @@ static int ice_setup_tc_qdisc(struct net_device *netdev, void *type_data) } memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); set_bit(ICE_FLAG_TC_MQPRIO, pf->flags); + pf->max_qps = pf->max_adq_qps; /* don't assume state of hw_tc_offload during driver load * and set the flag for TC flower filter if hw_tc_offload * already ON @@ -11003,6 +11351,13 @@ config_tcf: if (vsi->ch_rss_size) ice_vsi_cfg_rss_lut_key(vsi); +#ifdef HAVE_DEVLINK_PARAMS + if (hw) + ret = ice_devlink_tc_params_register(vsi); + else + ice_devlink_tc_params_unregister(vsi); +#endif /* HAVE_DEVLINK_PARAMS */ + exit: /* if error, reset the all_numtc and all_enatc */ if (ret) { @@ -11076,13 +11431,13 @@ ice_setup_tc(struct net_device *netdev, u32 __always_unused handle, } } } -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS if (ice_is_offloaded_macvlan_ena(pf)) { netdev_err(netdev, "TC_SETUP_QDISC_MQPRIO not supported when MACVLAN offloade support is ON. Turn off MACVLAN offload support thru ethtool and try again\n"); mutex_unlock(&pf->tc_mutex); return -EOPNOTSUPP; } -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ if (ice_is_dcf_enabled(pf)) { netdev_err(netdev, "TC_SETUP_QDISC_MQPRIO not supported when Device Control Functionality is enabled.\n"); mutex_unlock(&pf->tc_mutex); @@ -11123,9 +11478,11 @@ ice_indr_block_priv_lookup(struct ice_netdev_priv *np, { struct ice_indr_block_priv *cb_priv; +#ifndef HAVE_FLOW_INDR_BLOCK_LOCK /* All callback list access should be protected by RTNL. */ ASSERT_RTNL(); +#endif /* HAVE_FLOW_INDR_BLOCK_LOCK */ list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) { if (!cb_priv->netdev) return NULL; @@ -11177,7 +11534,10 @@ ice_indr_setup_tc_block(struct net_device *netdev, struct ice_netdev_priv *np, int tunnel_type = ice_tc_tun_get_type(netdev, NULL); if (tunnel_type != TNL_VXLAN && tunnel_type != TNL_GENEVE && - !(is_vlan_dev(netdev) && +#ifdef HAVE_GTP_SUPPORT + tunnel_type != TNL_GTPU && tunnel_type != TNL_GTPC && +#endif /* HAVE_GTP_SUPPORT */ + tunnel_type != TNL_GRETAP && !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == np->vsi->netdev)) return -EOPNOTSUPP; #endif @@ -11311,8 +11671,9 @@ ice_indr_register_block(struct ice_netdev_priv *np, struct net_device *netdev) err = __flow_indr_block_cb_register(netdev, np, ice_indr_setup_tc_cb, np); if (err) { - dev_err(ice_pf_to_dev(pf), "Failed to register remote block notifier for %s err=%d\n", - netdev_name(netdev), err); + ice_dev_err_errno(ice_pf_to_dev(pf), err, + "Failed to register remote block notifier for %s", + netdev_name(netdev)); } return err; } @@ -11343,6 +11704,9 @@ ice_netdevice_event(struct notifier_block *nb, unsigned long event, void *ptr) int tunnel_type = ice_tc_tun_get_type(netdev, NULL); if (tunnel_type != TNL_VXLAN && tunnel_type != TNL_GENEVE && +#ifdef HAVE_GTP_SUPPORT + tunnel_type != TNL_GTPU && tunnel_type != TNL_GTPC && +#endif /* HAVE_GTP_SUPPORT */ !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == np->vsi->netdev)) return NOTIFY_OK; @@ -11401,7 +11765,6 @@ int ice_open_internal(struct net_device *netdev) struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_port_info *pi; - enum ice_status status; int err; /* disallow open if eeprom is corrupted */ @@ -11416,14 +11779,14 @@ int ice_open_internal(struct net_device *netdev) netif_carrier_off(netdev); pi = vsi->port_info; - status = ice_update_link_info(pi); - if (status) { - netdev_err(netdev, "Failed to get link info, error %s\n", - ice_stat_str(status)); - return -EIO; + err = ice_update_link_info(pi); + if (err) { + netdev_err(netdev, "Failed to get link info, error %d\n", + err); + return err; } - ice_check_module_power(pf, pi->phy.link_info.link_cfg_err); + ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); /* Set PHY if there is media, otherwise, turn off PHY */ if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { @@ -11510,6 +11873,7 @@ ice_features_check(struct sk_buff *skb, struct net_device __always_unused *netdev, netdev_features_t features) { + bool gso = skb_is_gso(skb); size_t len; /* No point in doing any of this if neither checksum nor GSO are @@ -11522,24 +11886,32 @@ ice_features_check(struct sk_buff *skb, /* We cannot support GSO if the MSS is going to be less than * 64 bytes. If it is then we need to drop support for GSO. */ - if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) + if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS)) features &= ~NETIF_F_GSO_MASK; - len = skb_network_header(skb) - skb->data; + len = skb_network_offset(skb); if (len > ICE_TXD_MACLEN_MAX || len & 0x1) goto out_rm_features; - len = skb_transport_header(skb) - skb_network_header(skb); + len = skb_network_header_len(skb); if (len > ICE_TXD_IPLEN_MAX || len & 0x1) goto out_rm_features; if (skb->encapsulation) { - len = skb_inner_network_header(skb) - skb_transport_header(skb); - if (len > ICE_TXD_L4LEN_MAX || len & 0x1) - goto out_rm_features; + /* this must work for VXLAN frames AND IPIP/SIT frames, and in + * the case of IPIP frames, the transport header pointer is + * after the inner header! So check to make sure that this + * is a GRE or UDP_TUNNEL frame before doing that math. + */ + if (gso && (skb_shinfo(skb)->gso_type & + (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) { + len = skb_inner_network_header(skb) - + skb_transport_header(skb); + if (len > ICE_TXD_L4LEN_MAX || len & 0x1) + goto out_rm_features; + } - len = skb_inner_transport_header(skb) - - skb_inner_network_header(skb); + len = skb_inner_network_header_len(skb); if (len > ICE_TXD_IPLEN_MAX || len & 0x1) goto out_rm_features; } @@ -11563,12 +11935,20 @@ static const struct net_device_ops ice_netdev_safe_mode_ops = { #endif .ndo_get_stats64 = ice_get_stats64, .ndo_tx_timeout = ice_tx_timeout, +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_NDO_BPF + .ndo_bpf = ice_xdp_safe_mode, +#else + .ndo_xdp = ice_xdp_safe_mode, +#endif /* HAVE_NDO_BPF */ +#endif /* HAVE_XDP_SUPPORT */ }; static const struct net_device_ops ice_netdev_ops = { .ndo_open = ice_open, .ndo_stop = ice_stop, .ndo_start_xmit = ice_start_xmit, + .ndo_select_queue = ice_select_queue, #ifdef HAVE_NDO_FEATURES_CHECK .ndo_features_check = ice_features_check, #endif /* HAVE_NDO_FEATURES_CHECK */ @@ -11594,7 +11974,11 @@ static const struct net_device_ops ice_netdev_ops = { .ndo_set_tx_maxrate = ice_set_tx_maxrate, #endif /* HAVE_RHEL7_EXTENDED_NDO_SET_TX_MAXRATE */ #endif /* HAVE_NDO_SET_TX_MAXRATE */ - .ndo_do_ioctl = ice_do_ioctl, +#ifdef HAVE_NDO_ETH_IOCTL + .ndo_eth_ioctl = ice_eth_ioctl, +#else + .ndo_do_ioctl = ice_eth_ioctl, +#endif /* HAVE_NDO_ETH_IOCTL */ .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, #ifdef HAVE_NDO_SET_VF_TRUST .ndo_set_vf_mac = ice_set_vf_mac, @@ -11647,7 +12031,9 @@ static const struct net_device_ops ice_netdev_ops = { #else .ndo_xdp = ice_xdp, #endif /* HAVE_NDO_BPF */ +#ifndef CONFIG_ICE_USE_SKB .ndo_xdp_xmit = ice_xdp_xmit, +#endif /* CONFIG_ICE_USE_SKB */ #ifndef NO_NDO_XDP_FLUSH .ndo_xdp_flush = ice_xdp_flush, #endif /* !NO_NDO_XDP_FLUSH */ @@ -11683,7 +12069,7 @@ static const struct net_device_ops ice_netdev_ops = { #endif #endif /* HAVE_GENEVE_RX_OFFLOAD */ #endif /* HAVE_UDP_ENC_RX_OFFLOAD */ -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS #ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT .extended.ndo_dfwd_add_station = ice_fwd_add_macvlan, .extended.ndo_dfwd_del_station = ice_fwd_del_macvlan, @@ -11691,5 +12077,10 @@ static const struct net_device_ops ice_netdev_ops = { .ndo_dfwd_add_station = ice_fwd_add_macvlan, .ndo_dfwd_del_station = ice_fwd_del_macvlan, #endif /* HAVE_RHEL7_NET_DEVICE_OPS_EXT */ -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ +#if IS_ENABLED(CONFIG_NET_DEVLINK) +#ifdef HAVE_NDO_GET_DEVLINK_PORT + .ndo_get_devlink_port = ice_get_devlink_port, +#endif /* HAVE_NDO_GET_DEVLINK_PORT */ +#endif /* CONFIG_NET_DEVLINK */ }; diff --git a/drivers/thirdparty/ice/ice_metainit.c b/drivers/thirdparty/ice/ice_metainit.c new file mode 100644 index 000000000000..c6700710dce5 --- /dev/null +++ b/drivers/thirdparty/ice/ice_metainit.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#include "ice_common.h" +#include "ice_parser_util.h" + +#define ICE_METAINIT_TABLE_SIZE 16 + +/** + * ice_metainit_dump - dump an metainit item info + * @hw: pointer to the hardware structure + * @item: metainit item to dump + */ +void ice_metainit_dump(struct ice_hw *hw, struct ice_metainit_item *item) +{ + dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx); + dev_info(ice_hw_to_dev(hw), "tsr = %d\n", item->tsr); + dev_info(ice_hw_to_dev(hw), "ho = %d\n", item->ho); + dev_info(ice_hw_to_dev(hw), "pc = %d\n", item->pc); + dev_info(ice_hw_to_dev(hw), "pg_rn = %d\n", item->pg_rn); + dev_info(ice_hw_to_dev(hw), "cd = %d\n", item->cd); + dev_info(ice_hw_to_dev(hw), "gpr_a_ctrl = %d\n", item->gpr_a_ctrl); + dev_info(ice_hw_to_dev(hw), "gpr_a_data_mdid = %d\n", + item->gpr_a_data_mdid); + dev_info(ice_hw_to_dev(hw), "gpr_a_data_start = %d\n", + item->gpr_a_data_start); + dev_info(ice_hw_to_dev(hw), "gpr_a_data_len = %d\n", + item->gpr_a_data_len); + dev_info(ice_hw_to_dev(hw), "gpr_a_id = %d\n", item->gpr_a_id); + dev_info(ice_hw_to_dev(hw), "gpr_b_ctrl = %d\n", item->gpr_b_ctrl); + dev_info(ice_hw_to_dev(hw), "gpr_b_data_mdid = %d\n", + item->gpr_b_data_mdid); + dev_info(ice_hw_to_dev(hw), "gpr_b_data_start = %d\n", + item->gpr_b_data_start); + dev_info(ice_hw_to_dev(hw), "gpr_b_data_len = %d\n", + item->gpr_b_data_len); + dev_info(ice_hw_to_dev(hw), "gpr_b_id = %d\n", item->gpr_b_id); + dev_info(ice_hw_to_dev(hw), "gpr_c_ctrl = %d\n", item->gpr_c_ctrl); + dev_info(ice_hw_to_dev(hw), "gpr_c_data_mdid = %d\n", + item->gpr_c_data_mdid); + dev_info(ice_hw_to_dev(hw), "gpr_c_data_start = %d\n", + item->gpr_c_data_start); + dev_info(ice_hw_to_dev(hw), "gpr_c_data_len = %d\n", + item->gpr_c_data_len); + dev_info(ice_hw_to_dev(hw), "gpr_c_id = %d\n", item->gpr_c_id); + dev_info(ice_hw_to_dev(hw), "gpr_d_ctrl = %d\n", item->gpr_d_ctrl); + dev_info(ice_hw_to_dev(hw), "gpr_d_data_mdid = %d\n", + item->gpr_d_data_mdid); + dev_info(ice_hw_to_dev(hw), "gpr_d_data_start = %d\n", + item->gpr_d_data_start); + dev_info(ice_hw_to_dev(hw), "gpr_d_data_len = %d\n", + item->gpr_d_data_len); + dev_info(ice_hw_to_dev(hw), "gpr_d_id = %d\n", item->gpr_d_id); + dev_info(ice_hw_to_dev(hw), "flags = 0x%llx\n", + (unsigned long long)(item->flags)); +} + +/** The function parses a 192 bits Metadata Init entry with below format: + * BIT 0-7: TCAM Search Key Register (mi->tsr) + * BIT 8-16: Header Offset (mi->ho) + * BIT 17-24: Program Counter (mi->pc) + * BIT 25-35: Parse Graph Root Node (mi->pg_rn) + * BIT 36-38: Control Domain (mi->cd) + * BIT 39: GPR_A Data Control (mi->gpr_a_ctrl) + * BIT 40-44: GPR_A MDID.ID (mi->gpr_a_data_mdid) + * BIT 45-48: GPR_A MDID.START (mi->gpr_a_data_start) + * BIT 49-53: GPR_A MDID.LEN (mi->gpr_a_data_len) + * BIT 54-55: reserved + * BIT 56-59: GPR_A ID (mi->gpr_a_id) + * BIT 60: GPR_B Data Control (mi->gpr_b_ctrl) + * BIT 61-65: GPR_B MDID.ID (mi->gpr_b_data_mdid) + * BIT 66-69: GPR_B MDID.START (mi->gpr_b_data_start) + * BIT 70-74: GPR_B MDID.LEN (mi->gpr_b_data_len) + * BIT 75-76: reserved + * BIT 77-80: GPR_B ID (mi->gpr_a_id) + * BIT 81: GPR_C Data Control (mi->gpr_c_ctrl) + * BIT 82-86: GPR_C MDID.ID (mi->gpr_c_data_mdid) + * BIT 87-90: GPR_C MDID.START (mi->gpr_c_data_start) + * BIT 91-95: GPR_C MDID.LEN (mi->gpr_c_data_len) + * BIT 96-97: reserved + * BIT 98-101: GPR_C ID (mi->gpr_c_id) + * BIT 102: GPR_D Data Control (mi->gpr_d_ctrl) + * BIT 103-107:GPR_D MDID.ID (mi->gpr_d_data_mdid) + * BIT 108-111:GPR_D MDID.START (mi->gpr_d_data_start) + * BIT 112-116:GPR_D MDID.LEN (mi->gpr_d_data_len) + * BIT 117-118:reserved + * BIT 119-122:GPR_D ID (mi->gpr_d_id) + * BIT 123-186:Flags (mi->flags) + * BIT 187-191:rserved + */ +static void _metainit_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int size) +{ + struct ice_metainit_item *mi = (struct ice_metainit_item *)item; + u8 *buf = (u8 *)data; + u64 d64; + + mi->idx = idx; + d64 = *(u64 *)buf; + + mi->tsr = (u8)(d64 & 0xff); + mi->ho = (u16)((d64 >> 8) & 0x1ff); + mi->pc = (u16)((d64 >> 17) & 0xff); + mi->pg_rn = (u16)((d64 >> 25) & 0x3ff); + mi->cd = (u16)((d64 >> 36) & 0x7); + mi->gpr_a_ctrl = ((d64 >> 39) & 0x1) != 0; + mi->gpr_a_data_mdid = (u8)((d64 >> 40) & 0x1f); + mi->gpr_a_data_start = (u8)((d64 >> 45) & 0xf); + mi->gpr_a_data_len = (u8)((d64 >> 49) & 0x1f); + mi->gpr_a_id = (u8)((d64 >> 56) & 0xf); + + d64 = *(u64 *)&buf[7] >> 4; + mi->gpr_b_ctrl = (d64 & 0x1) != 0; + mi->gpr_b_data_mdid = (u8)((d64 >> 1) & 0x1f); + mi->gpr_b_data_start = (u8)((d64 >> 6) & 0xf); + mi->gpr_b_data_len = (u8)((d64 >> 10) & 0x1f); + mi->gpr_b_id = (u8)((d64 >> 17) & 0xf); + + mi->gpr_c_ctrl = ((d64 >> 21) & 0x1) != 0; + mi->gpr_c_data_mdid = (u8)((d64 >> 22) & 0x1f); + mi->gpr_c_data_start = (u8)((d64 >> 27) & 0xf); + mi->gpr_c_data_len = (u8)((d64 >> 31) & 0x1f); + mi->gpr_c_id = (u8)((d64 >> 38) & 0xf); + + mi->gpr_d_ctrl = ((d64 >> 42) & 0x1) != 0; + mi->gpr_d_data_mdid = (u8)((d64 >> 43) & 0x1f); + mi->gpr_d_data_start = (u8)((d64 >> 48) & 0xf); + mi->gpr_d_data_len = (u8)((d64 >> 52) & 0x1f); + + d64 = *(u64 *)&buf[14] >> 7; + mi->gpr_d_id = (u8)(d64 & 0xf); + + d64 = *(u64 *)&buf[15] >> 3; + mi->flags = d64; + + d64 = ((*(u64 *)&buf[16] >> 56) & 0x7); + mi->flags |= (d64 << 61); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_metainit_dump(hw, mi); +} + +/** + * ice_metainit_table_get - create a metainit table + * @hw: pointer to the hardware structure + */ +struct ice_metainit_item *ice_metainit_table_get(struct ice_hw *hw) +{ + return (struct ice_metainit_item *) + ice_parser_create_table(hw, ICE_SID_RXPARSER_METADATA_INIT, + sizeof(struct ice_metainit_item), + ICE_METAINIT_TABLE_SIZE, + ice_parser_sect_item_get, + _metainit_parse_item, false); +} diff --git a/drivers/thirdparty/ice/ice_metainit.h b/drivers/thirdparty/ice/ice_metainit.h new file mode 100644 index 000000000000..cfd2e84aad86 --- /dev/null +++ b/drivers/thirdparty/ice/ice_metainit.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_METAINIT_H_ +#define _ICE_METAINIT_H_ + +struct ice_metainit_item { + u16 idx; + + u8 tsr; + u16 ho; + u16 pc; + u16 pg_rn; + u8 cd; + + bool gpr_a_ctrl; + u8 gpr_a_data_mdid; + u8 gpr_a_data_start; + u8 gpr_a_data_len; + u8 gpr_a_id; + + bool gpr_b_ctrl; + u8 gpr_b_data_mdid; + u8 gpr_b_data_start; + u8 gpr_b_data_len; + u8 gpr_b_id; + + bool gpr_c_ctrl; + u8 gpr_c_data_mdid; + u8 gpr_c_data_start; + u8 gpr_c_data_len; + u8 gpr_c_id; + + bool gpr_d_ctrl; + u8 gpr_d_data_mdid; + u8 gpr_d_data_start; + u8 gpr_d_data_len; + u8 gpr_d_id; + + u64 flags; +}; + +void ice_metainit_dump(struct ice_hw *hw, struct ice_metainit_item *item); +struct ice_metainit_item *ice_metainit_table_get(struct ice_hw *hw); +#endif /*_ICE_METAINIT_H_ */ diff --git a/drivers/thirdparty/ice/ice_migration.c b/drivers/thirdparty/ice/ice_migration.c new file mode 100644 index 000000000000..3d4b87193e08 --- /dev/null +++ b/drivers/thirdparty/ice/ice_migration.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#include "ice.h" + +/** + * ice_migration_get_vf - Get ice vf structure pointer by pdev + * @vf_pdev: pointer to ice vfio pci vf pdev structure + * + * Return nonzero for success, NULL for failure. + */ +void *ice_migration_get_vf(struct pci_dev *vf_pdev) +{ + struct pci_dev *pf_pdev = vf_pdev->physfn; + int vf_id = pci_iov_vf_id(vf_pdev); + struct ice_pf *pf; + + if (!pf_pdev || vf_id < 0) + return NULL; + + pf = pci_get_drvdata(pf_pdev); + return ice_get_vf_by_id(pf, vf_id); +} +EXPORT_SYMBOL(ice_migration_get_vf); + diff --git a/drivers/thirdparty/ice/ice_migration.h b/drivers/thirdparty/ice/ice_migration.h new file mode 100644 index 000000000000..0adaff41e149 --- /dev/null +++ b/drivers/thirdparty/ice/ice_migration.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_MIGRATION_H_ +#define _ICE_MIGRATION_H_ + +#include "kcompat.h" + +#if IS_ENABLED(CONFIG_VFIO_PCI_CORE) && defined(HAVE_LMV1_SUPPORT) +void *ice_migration_get_vf(struct pci_dev *vf_pdev); +#else +static inline void *ice_migration_get_vf(struct pci_dev *vf_pdev) +{ + return NULL; +} +#endif /* CONFIG_VFIO_PCI_CORE && HAVE_LMV1_SUPPORT */ + +#endif /* _ICE_MIGRATION_H_ */ diff --git a/drivers/thirdparty/ice/ice_mk_grp.c b/drivers/thirdparty/ice/ice_mk_grp.c new file mode 100644 index 000000000000..cb70524eb62f --- /dev/null +++ b/drivers/thirdparty/ice/ice_mk_grp.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#include "ice_common.h" +#include "ice_parser_util.h" + +#define ICE_MK_GRP_TABLE_SIZE 128 +#define ICE_MK_COUNT_PER_GRP 8 + +/** + * ice_mk_grp_dump - dump an marker group item info + * @hw: pointer to the hardware structure + * @item: marker group item to dump + */ +void ice_mk_grp_dump(struct ice_hw *hw, struct ice_mk_grp_item *item) +{ + int i; + + dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx); + dev_info(ice_hw_to_dev(hw), "markers: "); + for (i = 0; i < ICE_MK_COUNT_PER_GRP; i++) + dev_info(ice_hw_to_dev(hw), "%d ", item->markers[i]); + dev_info(ice_hw_to_dev(hw), "\n"); +} + +static void _mk_grp_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int size) +{ + struct ice_mk_grp_item *grp = (struct ice_mk_grp_item *)item; + u8 *buf = (u8 *)data; + int i; + + grp->idx = idx; + + for (i = 0; i < ICE_MK_COUNT_PER_GRP; i++) + grp->markers[i] = buf[i]; + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_mk_grp_dump(hw, grp); +} + +/** + * ice_mk_grp_table_get - create a marker group table + * @hw: pointer to the hardware structure + */ +struct ice_mk_grp_item *ice_mk_grp_table_get(struct ice_hw *hw) +{ + return (struct ice_mk_grp_item *) + ice_parser_create_table(hw, ICE_SID_RXPARSER_MARKER_GRP, + sizeof(struct ice_mk_grp_item), + ICE_MK_GRP_TABLE_SIZE, + ice_parser_sect_item_get, + _mk_grp_parse_item, false); +} diff --git a/drivers/thirdparty/ice/ice_mk_grp.h b/drivers/thirdparty/ice/ice_mk_grp.h new file mode 100644 index 000000000000..26f23764a033 --- /dev/null +++ b/drivers/thirdparty/ice/ice_mk_grp.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_MK_GRP_H_ +#define _ICE_MK_GRP_H_ + +struct ice_mk_grp_item { + int idx; + u8 markers[8]; +}; + +void ice_mk_grp_dump(struct ice_hw *hw, struct ice_mk_grp_item *item); +struct ice_mk_grp_item *ice_mk_grp_table_get(struct ice_hw *hw); +#endif /* _ICE_MK_GRP_H_ */ diff --git a/drivers/thirdparty/ice/ice_nvm.c b/drivers/thirdparty/ice/ice_nvm.c index 51ebb17dac85..6b3bc5f452f3 100644 --- a/drivers/thirdparty/ice/ice_nvm.c +++ b/drivers/thirdparty/ice/ice_nvm.c @@ -3,6 +3,7 @@ #include "ice_common.h" +#define GL_MNG_DEF_DEVID 0x000B611C /** * ice_aq_read_nvm @@ -17,7 +18,7 @@ * * Read the NVM using the admin queue commands (0x0701) */ -static enum ice_status +int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, void *data, bool last_command, bool read_shadow_ram, struct ice_sq_cd *cd) @@ -28,7 +29,7 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, cmd = &desc.params.nvm; if (offset > ICE_AQC_NVM_MAX_OFFSET) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read); @@ -61,21 +62,21 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, * Returns a status code on failure. Note that the data pointer may be * partially updated if some reads succeed before a failure. */ -enum ice_status +int ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, bool read_shadow_ram) { - enum ice_status status; u32 inlen = *length; u32 bytes_read = 0; bool last_cmd; + int status; *length = 0; /* Verify the length of the read if this is for the Shadow RAM */ if (read_shadow_ram && ((offset + inlen) > (hw->flash.sr_words * 2u))) { ice_debug(hw, ICE_DBG_NVM, "NVM error: requested data is beyond Shadow RAM limit\n"); - return ICE_ERR_PARAM; + return -EINVAL; } do { @@ -124,7 +125,7 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, * * Update the NVM using the admin queue commands (0x0703) */ -enum ice_status +int ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, void *data, bool last_command, u8 command_flags, struct ice_sq_cd *cd) @@ -136,7 +137,7 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, /* In offset the highest byte must be zeroed. */ if (offset & 0xFF000000) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write); @@ -163,12 +164,12 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, * * Erase the NVM sector using the admin queue commands (0x0702) */ -enum ice_status +int ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd) { struct ice_aq_desc desc; struct ice_aqc_nvm *cmd; - enum ice_status status; + int status; __le16 len; /* read a length value from SR, so module_typeid is equal to 0 */ @@ -191,7 +192,6 @@ ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd) return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } - /** * ice_read_sr_word_aq - Reads Shadow RAM via AQ * @hw: pointer to the HW structure @@ -200,12 +200,12 @@ ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd) * * Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm. */ -static enum ice_status +static int ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) { u32 bytes = sizeof(u16); - enum ice_status status; __le16 data_local; + int status; /* Note that ice_read_flat_nvm checks if the read is past the Shadow * RAM size, and ensures we don't read across a Shadow RAM sector @@ -220,7 +220,6 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) return 0; } - /** * ice_acquire_nvm - Generic request for acquiring the NVM ownership * @hw: pointer to the HW structure @@ -228,7 +227,7 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) * * This function will request NVM ownership. */ -enum ice_status +int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) { if (hw->flash.blank_nvm_mode) @@ -336,18 +335,18 @@ static u32 ice_get_flash_bank_offset(struct ice_hw *hw, enum ice_bank_select ban * hw->flash.banks data being setup by ice_determine_active_flash_banks() * during initialization. */ -static enum ice_status +static int ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module, u32 offset, u8 *data, u32 length) { - enum ice_status status; + int status; u32 start; start = ice_get_flash_bank_offset(hw, bank, module); if (!start) { ice_debug(hw, ICE_DBG_NVM, "Unable to calculate flash bank offset for module 0x%04x\n", module); - return ICE_ERR_PARAM; + return -EINVAL; } status = ice_acquire_nvm(hw, ICE_RES_READ); @@ -371,11 +370,11 @@ ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module, * Read the specified word from the active NVM module. This includes the CSS * header at the start of the NVM module. */ -static enum ice_status +static int ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { - enum ice_status status; __le16 data_local; + int status; status = ice_read_flash_module(hw, bank, ICE_SR_1ST_NVM_BANK_PTR, offset * sizeof(u16), (__force u8 *)&data_local, sizeof(u16)); @@ -385,6 +384,42 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1 return status; } +/** + * ice_get_nvm_css_hdr_len - Read the CSS header length from the NVM CSS header + * @hw: pointer to the HW struct + * @bank: whether to read from the active or inactive flash bank + * @hdr_len: storage for header length in words + * + * Read the CSS header length from the NVM CSS header and add the Authentication + * header size, and then convert to words. + */ +static int +ice_get_nvm_css_hdr_len(struct ice_hw *hw, enum ice_bank_select bank, + u32 *hdr_len) +{ + u16 hdr_len_l, hdr_len_h; + u32 hdr_len_dword; + int status; + + status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_L, + &hdr_len_l); + if (status) + return status; + + status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_H, + &hdr_len_h); + if (status) + return status; + + /* CSS header length is in DWORD, so convert to words and add + * authentication header size + */ + hdr_len_dword = hdr_len_h << 16 | hdr_len_l; + *hdr_len = (hdr_len_dword * 2) + ICE_NVM_AUTH_HEADER_LEN; + + return 0; +} + /** * ice_read_nvm_sr_copy - Read a word from the Shadow RAM copy in the NVM bank * @hw: pointer to the HW structure @@ -395,10 +430,19 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1 * Read the specified word from the copy of the Shadow RAM found in the * specified NVM module. */ -static enum ice_status +static int ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { - return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data); + u32 hdr_len; + int status; + + status = ice_get_nvm_css_hdr_len(hw, bank, &hdr_len); + if (status) + return status; + + hdr_len = roundup(hdr_len, 32); + + return ice_read_nvm_module(hw, bank, hdr_len + offset, data); } /** @@ -412,11 +456,11 @@ ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u * Note that unlike the NVM module, the CSS data is stored at the end of the * module instead of at the beginning. */ -static enum ice_status +static int ice_read_orom_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { - enum ice_status status; __le16 data_local; + int status; status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, offset * sizeof(u16), (__force u8 *)&data_local, sizeof(u16)); @@ -435,11 +479,11 @@ ice_read_orom_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u * * Read a word from the specified netlist bank. */ -static enum ice_status +static int ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { - enum ice_status status; __le16 data_local; + int status; status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, offset * sizeof(u16), (__force u8 *)&data_local, sizeof(u16)); @@ -457,9 +501,9 @@ ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset * * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq. */ -enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) +int ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) { - enum ice_status status; + int status; status = ice_acquire_nvm(hw, ICE_RES_READ); if (!status) { @@ -481,13 +525,13 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) * Area (PFA) and returns the TLV pointer and length. The caller can * use these to read the variable length TLV value. */ -enum ice_status +int ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, u16 module_type) { - enum ice_status status; u16 pfa_len, pfa_ptr; u16 next_tlv; + int status; status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); if (status) { @@ -525,7 +569,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, *module_tlv_len = tlv_len; return 0; } - return ICE_ERR_INVAL_SIZE; + return -EINVAL; } /* Check next TLV, i.e. current TLV pointer + length + 2 words * (for current TLV's type and length) @@ -533,7 +577,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, next_tlv = next_tlv + tlv_len + 2; } /* Module does not exist */ - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } /** @@ -544,12 +588,12 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, * * Reads the part number string from the NVM. */ -enum ice_status +int ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) { u16 pba_tlv, pba_tlv_len; - enum ice_status status; u16 pba_word, pba_size; + int status; u16 i; status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len, @@ -568,7 +612,7 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) if (pba_tlv_len < pba_size) { ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n"); - return ICE_ERR_INVAL_SIZE; + return -EINVAL; } /* Subtract one to get PBA word count (PBA Size word is included in @@ -577,7 +621,7 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) pba_size--; if (pba_num_size < (((u32)pba_size * 2) + 1)) { ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n"); - return ICE_ERR_PARAM; + return -EINVAL; } for (i = 0; i < pba_size; i++) { @@ -604,10 +648,10 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) * Read the security revision out of the CSS header of the active NVM module * bank. */ -static enum ice_status ice_get_nvm_srev(struct ice_hw *hw, enum ice_bank_select bank, u32 *srev) +static int ice_get_nvm_srev(struct ice_hw *hw, enum ice_bank_select bank, u32 *srev) { - enum ice_status status; u16 srev_l, srev_h; + int status; status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_SREV_L, &srev_l); if (status) @@ -631,11 +675,11 @@ static enum ice_status ice_get_nvm_srev(struct ice_hw *hw, enum ice_bank_select * Read the NVM EETRACK ID and map version of the main NVM image bank, filling * in the nvm info structure. */ -static enum ice_status +static int ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nvm_info *nvm) { u16 eetrack_lo, eetrack_hi, ver; - enum ice_status status; + int status; status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_DEV_STARTER_VER, &ver); if (status) { @@ -675,7 +719,7 @@ ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nv * inactive NVM bank. Used to access version data for a pending update that * has not yet been activated. */ -enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm) +int ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm) { return ice_get_nvm_ver_info(hw, ICE_INACTIVE_FLASH_BANK, nvm); } @@ -689,24 +733,28 @@ enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info * Read the security revision out of the CSS header of the active OROM module * bank. */ -static enum ice_status ice_get_orom_srev(struct ice_hw *hw, enum ice_bank_select bank, u32 *srev) +static int ice_get_orom_srev(struct ice_hw *hw, enum ice_bank_select bank, u32 *srev) { - enum ice_status status; + u32 orom_size_word = hw->flash.banks.orom_size / 2; u16 srev_l, srev_h; u32 css_start; + u32 hdr_len; + int status; - if (hw->flash.banks.orom_size < ICE_NVM_OROM_TRAILER_LENGTH) { + status = ice_get_nvm_css_hdr_len(hw, bank, &hdr_len); + if (status) + return status; + + if (orom_size_word < hdr_len) { ice_debug(hw, ICE_DBG_NVM, "Unexpected Option ROM Size of %u\n", hw->flash.banks.orom_size); - return ICE_ERR_CFG; + return -EIO; } /* calculate how far into the Option ROM the CSS header starts. Note - * that ice_read_orom_module takes a word offset so we need to - * divide by 2 here. + * that ice_read_orom_module takes a word offset */ - css_start = (hw->flash.banks.orom_size - ICE_NVM_OROM_TRAILER_LENGTH) / 2; - + css_start = orom_size_word - hdr_len; status = ice_read_orom_module(hw, bank, css_start + ICE_NVM_CSS_SREV_L, &srev_l); if (status) return status; @@ -729,12 +777,11 @@ static enum ice_status ice_get_orom_srev(struct ice_hw *hw, enum ice_bank_select * Searches through the Option ROM flash contents to locate the CIVD data for * the image. */ -static enum ice_status +static int ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, struct ice_orom_civd_info *civd) { struct ice_orom_civd_info tmp; - enum ice_status status; u32 offset; /* The CIVD section is located in the Option ROM aligned to 512 bytes. @@ -744,6 +791,7 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, */ for (offset = 0; (offset + 512) <= hw->flash.banks.orom_size; offset += 512) { u8 sum = 0, i; + int status; status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, offset, (u8 *)&tmp, sizeof(tmp)); @@ -758,20 +806,22 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, /* Verify that the simple checksum is zero */ for (i = 0; i < sizeof(tmp); i++) +#ifdef __CHECKER__ /* cppcheck-suppress objectIndex */ +#endif /* __CHECKER__ */ sum += ((u8 *)&tmp)[i]; if (sum) { ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n", sum); - return ICE_ERR_NVM; + return -EIO; } *civd = tmp; return 0; } - return ICE_ERR_NVM; + return -EIO; } /** @@ -783,12 +833,12 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, * Read Option ROM version and security revision from the Option ROM flash * section. */ -static enum ice_status +static int ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_orom_info *orom) { struct ice_orom_civd_info civd; - enum ice_status status; u32 combo_ver; + int status; status = ice_get_orom_civd_data(hw, bank, &civd); if (status) { @@ -820,7 +870,7 @@ ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_o * section of flash. Used to access version data for a pending update that has * not yet been activated. */ -enum ice_status ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom) +int ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom) { return ice_get_orom_ver_info(hw, ICE_INACTIVE_FLASH_BANK, orom); } @@ -835,13 +885,13 @@ enum ice_status ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_inf * Topology section to find the Netlist ID block and extract the relevant * information into the netlist version structure. */ -static enum ice_status +static int ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_netlist_info *netlist) { u16 module_id, length, node_count, i; - enum ice_status status; u16 *id_blk; + int status; status = ice_read_netlist_module(hw, bank, ICE_NETLIST_TYPE_OFFSET, &module_id); if (status) @@ -850,7 +900,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank, if (module_id != ICE_NETLIST_LINK_TOPO_MOD_ID) { ice_debug(hw, ICE_DBG_NVM, "Expected netlist module_id ID of 0x%04x, but got 0x%04x\n", ICE_NETLIST_LINK_TOPO_MOD_ID, module_id); - return ICE_ERR_NVM; + return -EIO; } status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_MODULE_LEN, &length); @@ -861,7 +911,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank, if (length < ICE_NETLIST_ID_BLK_SIZE) { ice_debug(hw, ICE_DBG_NVM, "Netlist Link Topology module too small. Expected at least %u words, but got %u words.\n", ICE_NETLIST_ID_BLK_SIZE, length); - return ICE_ERR_NVM; + return -EIO; } status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_NODE_COUNT, &node_count); @@ -872,7 +922,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank, id_blk = devm_kcalloc(ice_hw_to_dev(hw), ICE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk), GFP_KERNEL); if (!id_blk) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Read out the entire Netlist ID Block at once. */ status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, @@ -903,7 +953,6 @@ exit_error: return status; } - /** * ice_get_inactive_netlist_ver * @hw: pointer to the HW struct @@ -913,7 +962,7 @@ exit_error: * extract version data of a pending flash update in order to display the * version data. */ -enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist) +int ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist) { return ice_get_netlist_info(hw, ICE_INACTIVE_FLASH_BANK, netlist); } @@ -926,10 +975,10 @@ enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netli * the actual size is smaller. Use bisection to determine the accessible size * of flash memory. */ -static enum ice_status ice_discover_flash_size(struct ice_hw *hw) +static int ice_discover_flash_size(struct ice_hw *hw) { u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1; - enum ice_status status; + int status; status = ice_acquire_nvm(hw, ICE_RES_READ); if (status) @@ -941,7 +990,7 @@ static enum ice_status ice_discover_flash_size(struct ice_hw *hw) u8 data; status = ice_read_flat_nvm(hw, offset, &len, &data, false); - if (status == ICE_ERR_AQ_ERROR && + if (status == -EIO && hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) { ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n", __func__, offset); @@ -981,10 +1030,10 @@ err_read_flat_nvm: * sector size by using the highest bit. The reported pointer value will be in * bytes, intended for flat NVM reads. */ -static enum ice_status +static int ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer) { - enum ice_status status; + int status; u16 value; status = ice_read_sr_word(hw, offset, &value); @@ -1013,10 +1062,10 @@ ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer) * Each area size word is specified in 4KB sector units. This function reports * the size in bytes, intended for flat NVM reads. */ -static enum ice_status +static int ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size) { - enum ice_status status; + int status; u16 value; status = ice_read_sr_word(hw, offset, &value); @@ -1039,12 +1088,12 @@ ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size) * structure for later use in order to calculate the correct offset to read * from the active module. */ -static enum ice_status +static int ice_determine_active_flash_banks(struct ice_hw *hw) { struct ice_bank_info *banks = &hw->flash.banks; - enum ice_status status; u16 ctrl_word; + int status; status = ice_read_sr_word(hw, ICE_SR_NVM_CTRL_WORD, &ctrl_word); if (status) { @@ -1055,7 +1104,7 @@ ice_determine_active_flash_banks(struct ice_hw *hw) /* Check that the control word indicates validity */ if ((ctrl_word & ICE_SR_CTRL_WORD_1_M) >> ICE_SR_CTRL_WORD_1_S != ICE_SR_CTRL_WORD_VALID) { ice_debug(hw, ICE_DBG_NVM, "Shadow RAM control word is invalid\n"); - return ICE_ERR_CFG; + return -EIO; } if (!(ctrl_word & ICE_SR_CTRL_WORD_NVM_BANK)) @@ -1119,12 +1168,12 @@ ice_determine_active_flash_banks(struct ice_hw *hw) * This function reads and populates NVM settings such as Shadow RAM size, * max_timeout, and blank_nvm_mode */ -enum ice_status ice_init_nvm(struct ice_hw *hw) +int ice_init_nvm(struct ice_hw *hw) { struct ice_flash_info *flash = &hw->flash; - enum ice_status status; u32 fla, gens_stat; u8 sr_size; + int status; /* The SR size is stored regardless of the NVM programming mode * as the blank mode may be used in the factory line. @@ -1143,7 +1192,7 @@ enum ice_status ice_init_nvm(struct ice_hw *hw) /* Blank programming mode */ flash->blank_nvm_mode = true; ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n"); - return ICE_ERR_NVM_BLANK_MODE; + return -EIO; } status = ice_discover_flash_size(hw); @@ -1175,18 +1224,17 @@ enum ice_status ice_init_nvm(struct ice_hw *hw) return 0; } - /** * ice_nvm_validate_checksum * @hw: pointer to the HW struct * * Verify NVM PFA checksum validity (0x0706) */ -enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw) +int ice_nvm_validate_checksum(struct ice_hw *hw) { struct ice_aqc_nvm_checksum *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; status = ice_acquire_nvm(hw, ICE_RES_READ); if (status) @@ -1203,7 +1251,7 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw) if (!status) if (le16_to_cpu(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT) - status = ICE_ERR_NVM_CHECKSUM; + status = -EIO; return status; } @@ -1214,11 +1262,11 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw) * * Recalculate NVM PFA checksum (0x0706) */ -enum ice_status ice_nvm_recalculate_checksum(struct ice_hw *hw) +int ice_nvm_recalculate_checksum(struct ice_hw *hw) { struct ice_aqc_nvm_checksum *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; status = ice_acquire_nvm(hw, ICE_RES_READ); if (status) @@ -1239,22 +1287,41 @@ enum ice_status ice_nvm_recalculate_checksum(struct ice_hw *hw) /** * ice_nvm_write_activate * @hw: pointer to the HW struct - * @cmd_flags: NVM activate admin command bits (banks to be validated) + * @cmd_flags: flags for write activate command + * @response_flags: response indicators from firmware * * Update the control word with the required banks' validity bits * and dumps the Shadow RAM to flash (0x0707) + * + * cmd_flags controls which banks to activate, the preservation level to use + * when activating the NVM bank, and whether an EMP reset is required for + * activation. + * + * Note that the 16bit cmd_flags value is split between two separate 1 byte + * flag values in the descriptor. + * + * On successful return of the firmware command, the response_flags variable + * is updated with the flags reported by firmware indicating certain status, + * such as whether EMP reset is enabled. */ -enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags) +int +ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags) { struct ice_aqc_nvm *cmd; struct ice_aq_desc desc; + int status; cmd = &desc.params.nvm; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate); - cmd->cmd_flags = cmd_flags; + cmd->cmd_flags = ICE_LO_BYTE(cmd_flags); + cmd->offset_high = ICE_HI_BYTE(cmd_flags); - return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!status && response_flags) + *response_flags = cmd->cmd_flags; + + return status; } /** @@ -1265,11 +1332,11 @@ enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags) * Read the Minimum Security Revision TLV and extract the revision values from * the flash image into a readable structure for processing. */ -enum ice_status +int ice_get_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs) { struct ice_aqc_nvm_minsrev data; - enum ice_status status; + int status; u16 valid; status = ice_acquire_nvm(hw, ICE_RES_READ); @@ -1321,15 +1388,15 @@ ice_get_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs) * fields to determine what update is being requested. If the valid bit is not * set for that module, then the associated minsrev will be left as is. */ -enum ice_status +int ice_update_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs) { struct ice_aqc_nvm_minsrev data; - enum ice_status status; + int status; if (!minsrevs->nvm_valid && !minsrevs->orom_valid) { ice_debug(hw, ICE_DBG_NVM, "At least one of NVM and OROM MinSrev must be valid"); - return ICE_ERR_PARAM; + return -EINVAL; } status = ice_acquire_nvm(hw, ICE_RES_WRITE); @@ -1356,12 +1423,12 @@ ice_update_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs) /* Update flash data */ status = ice_aq_update_nvm(hw, ICE_AQC_NVM_MINSREV_MOD_ID, 0, sizeof(data), &data, - true, ICE_AQC_NVM_SPECIAL_UPDATE, NULL); + false, ICE_AQC_NVM_SPECIAL_UPDATE, NULL); if (status) goto exit_release_res; /* Dump the Shadow RAM to the flash */ - status = ice_nvm_write_activate(hw, 0); + status = ice_nvm_write_activate(hw, 0, NULL); exit_release_res: ice_release_nvm(hw); @@ -1377,7 +1444,7 @@ exit_release_res: * Fill in the data section of the NVM access request with a copy of the NVM * features structure. */ -static enum ice_status +static int ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd, union ice_nvm_access_data *data) { @@ -1387,7 +1454,7 @@ ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd, * work on older drivers. */ if (cmd->data_size < sizeof(struct ice_nvm_features)) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Initialize the data buffer to zeros */ memset(data, 0, cmd->data_size); @@ -1443,7 +1510,7 @@ static u32 ice_nvm_access_get_adapter(struct ice_nvm_access_cmd *cmd) * register offset. First validates that the module and flags are correct, and * then ensures that the register offset is one of the accepted registers. */ -static enum ice_status +static int ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd) { u32 module, flags, offset; @@ -1457,7 +1524,7 @@ ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd) if (module != ICE_NVM_REG_RW_MODULE || flags != ICE_NVM_REG_RW_FLAGS || cmd->data_size != sizeof_field(union ice_nvm_access_data, regval)) - return ICE_ERR_PARAM; + return -EINVAL; switch (offset) { case GL_HICR: @@ -1467,6 +1534,7 @@ ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd) case GLGEN_CSR_DEBUG_C: case GLGEN_RSTAT: case GLPCI_LBARCTRL: + case GL_MNG_DEF_DEVID: case GLNVM_GENS: case GLNVM_FLA: case PF_FUNC_RID: @@ -1475,16 +1543,16 @@ ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd) break; } - for (i = 0; i <= ICE_NVM_ACCESS_GL_HIDA_MAX; i++) + for (i = 0; i <= GL_HIDA_MAX_INDEX; i++) if (offset == (u32)GL_HIDA(i)) return 0; - for (i = 0; i <= ICE_NVM_ACCESS_GL_HIBA_MAX; i++) + for (i = 0; i <= GL_HIBA_MAX_INDEX; i++) if (offset == (u32)GL_HIBA(i)) return 0; /* All other register offsets are not valid */ - return ICE_ERR_OUT_OF_RANGE; + return -EIO; } /** @@ -1495,11 +1563,11 @@ ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd) * * Process an NVM access request to read a register. */ -static enum ice_status +static int ice_nvm_access_read(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, union ice_nvm_access_data *data) { - enum ice_status status; + int status; /* Always initialize the output data, even on failure */ memset(data, 0, cmd->data_size); @@ -1526,11 +1594,11 @@ ice_nvm_access_read(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, * * Process an NVM access request to write a register. */ -static enum ice_status +static int ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, union ice_nvm_access_data *data) { - enum ice_status status; + int status; /* Make sure this is a valid read/write access request */ status = ice_validate_nvm_rw_reg(cmd); @@ -1541,7 +1609,7 @@ ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, switch (cmd->offset) { case GL_HICR_EN: case GLGEN_RSTAT: - return ICE_ERR_OUT_OF_RANGE; + return -EIO; default: break; } @@ -1568,7 +1636,7 @@ ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, * For valid commands, perform the necessary function, copying the data into * the provided data buffer. */ -enum ice_status +int ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, union ice_nvm_access_data *data) { @@ -1576,12 +1644,12 @@ ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, /* Extended flags are currently reserved and must be zero */ if ((cmd->config & ICE_NVM_CFG_EXT_FLAGS_M) != 0) - return ICE_ERR_PARAM; + return -EINVAL; /* Adapter info must match the HW device ID */ adapter_info = ice_nvm_access_get_adapter(cmd); if (adapter_info != hw->device_id) - return ICE_ERR_PARAM; + return -EINVAL; switch (cmd->command) { case ICE_NVM_CMD_READ: @@ -1601,7 +1669,7 @@ ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, case ICE_NVM_CMD_WRITE: return ice_nvm_access_write(hw, cmd, data); default: - return ICE_ERR_PARAM; + return -EINVAL; } } @@ -1612,7 +1680,7 @@ ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, * Update empr (0x0709). This command allows SW to * request an EMPR to activate new FW. */ -enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw) +int ice_aq_nvm_update_empr(struct ice_hw *hw) { struct ice_aq_desc desc; @@ -1635,7 +1703,7 @@ enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw) * as part of the NVM update as the first cmd in the flow. */ -enum ice_status +int ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data, u16 length, struct ice_sq_cd *cd) { @@ -1643,7 +1711,7 @@ ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data, struct ice_aq_desc desc; if (length != 0 && !data) - return ICE_ERR_PARAM; + return -EINVAL; cmd = &desc.params.pkg_data; @@ -1672,17 +1740,17 @@ ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data, * the TransferFlag is set to End or StartAndEnd. */ -enum ice_status +int ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length, u8 transfer_flag, u8 *comp_response, u8 *comp_response_code, struct ice_sq_cd *cd) { struct ice_aqc_nvm_pass_comp_tbl *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (!data || !comp_response || !comp_response_code) - return ICE_ERR_PARAM; + return -EINVAL; cmd = &desc.params.pass_comp_tbl; diff --git a/drivers/thirdparty/ice/ice_nvm.h b/drivers/thirdparty/ice/ice_nvm.h index c37481f62786..78151348a119 100644 --- a/drivers/thirdparty/ice/ice_nvm.h +++ b/drivers/thirdparty/ice/ice_nvm.h @@ -63,59 +63,51 @@ union ice_nvm_access_data { struct ice_nvm_features drv_features; /* NVM features */ }; -/* NVM Access registers */ -#define GL_HIDA(_i) (0x00082000 + ((_i) * 4)) -#define GL_HIBA(_i) (0x00081000 + ((_i) * 4)) -#define GL_HICR 0x00082040 -#define GL_HICR_EN 0x00082044 -#define GLGEN_CSR_DEBUG_C 0x00075750 -#define GLPCI_LBARCTRL 0x0009DE74 -#define GLNVM_GENS 0x000B6100 -#define GLNVM_FLA 0x000B6108 - -#define ICE_NVM_ACCESS_GL_HIDA_MAX 15 -#define ICE_NVM_ACCESS_GL_HIBA_MAX 1023 - -enum ice_status +int ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, union ice_nvm_access_data *data); -enum ice_status +int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access); void ice_release_nvm(struct ice_hw *hw); -enum ice_status +int +ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, + void *data, bool last_command, bool read_shadow_ram, + struct ice_sq_cd *cd); +int ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, bool read_shadow_ram); -enum ice_status +int ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, u16 module_type); -enum ice_status +int ice_get_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs); -enum ice_status +int ice_update_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs); -enum ice_status +int ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom); -enum ice_status +int ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm); -enum ice_status +int ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist); -enum ice_status +int ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size); -enum ice_status ice_init_nvm(struct ice_hw *hw); -enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data); -enum ice_status +int ice_init_nvm(struct ice_hw *hw); +int ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data); +int ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, void *data, bool last_command, u8 command_flags, struct ice_sq_cd *cd); -enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw); -enum ice_status ice_nvm_recalculate_checksum(struct ice_hw *hw); -enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags); -enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw); -enum ice_status +int ice_nvm_validate_checksum(struct ice_hw *hw); +int ice_nvm_recalculate_checksum(struct ice_hw *hw); +int +ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags); +int ice_aq_nvm_update_empr(struct ice_hw *hw); +int ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data, u16 length, struct ice_sq_cd *cd); -enum ice_status +int ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length, u8 transfer_flag, u8 *comp_response, u8 *comp_response_code, struct ice_sq_cd *cd); diff --git a/drivers/thirdparty/ice/ice_osdep.h b/drivers/thirdparty/ice/ice_osdep.h index 3dacbda6597b..41763568ad1f 100644 --- a/drivers/thirdparty/ice/ice_osdep.h +++ b/drivers/thirdparty/ice/ice_osdep.h @@ -5,9 +5,14 @@ #define _ICE_OSDEP_H_ #include +#include +#include #include #include +#include +#include #include +#include #include "kcompat.h" #define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) @@ -25,8 +30,8 @@ struct ice_dma_mem { size_t size; }; -#define ice_hw_to_dev(ptr) \ - (&(container_of((ptr), struct ice_pf, hw))->pdev->dev) +struct ice_hw; +struct device *ice_hw_to_dev(struct ice_hw *hw); #define ice_info_fwlog(hw, rowsize, groupsize, buf, len) \ print_hex_dump(KERN_INFO, " FWLOG: ", \ @@ -34,12 +39,57 @@ struct ice_dma_mem { rowsize, groupsize, buf, \ len, false) +#ifdef CONFIG_SYMBOLIC_ERRNAME +/** + * ice_print_errno - logs message with appended error + * @func: logging function (such as dev_err, netdev_warn, etc.) + * @obj: first argument that func takes + * @code: standard error code (negative integer) + * @fmt: format string (without "\n" in the end) + * + * Uses kernel logging function of your choice to log provided message + * with error code and (if allowed by kernel) its symbolic + * representation apended. All additional format arguments can be + * added at the end. + * Supports only functions that take an additional + * argument before formatted string. + */ +#define ice_print_errno(func, obj, code, fmt, args...) ({ \ + long code_ = (code); \ + BUILD_BUG_ON(fmt[strlen(fmt) - 1] == '\n'); \ + func(obj, fmt ", error: %ld (%pe)\n", \ + ##args, code_, ERR_PTR(code_)); \ +}) +/** + * ice_err_arg - replaces error code as a logging function argument + * @err: standard error code (negative integer) + */ +#define ice_err_arg(err) ERR_PTR(err) +/** + * ice_err_format - replaces %(l)d format corresponding to an error code + */ +#define ice_err_format() "%pe" +#else +#define ice_print_errno(func, obj, code, fmt, args...) ({ \ + BUILD_BUG_ON(fmt[strlen(fmt) - 1] == '\n'); \ + func(obj, fmt ", error: %ld\n", ##args, (long)code); \ +}) +#define ice_err_arg(err) ((long)err) +#define ice_err_format() "%ld" +#endif /* CONFIG_SYMBOLIC_ERRNAME */ +#define ice_dev_err_errno(dev, code, fmt, args...) \ + ice_print_errno(dev_err, dev, code, fmt, ##args) +#define ice_dev_warn_errno(dev, code, fmt, args...) \ + ice_print_errno(dev_warn, dev, code, fmt, ##args) +#define ice_dev_info_errno(dev, code, fmt, args...) \ + ice_print_errno(dev_info, dev, code, fmt, ##args) +#define ice_dev_dbg_errno(dev, code, fmt, args...) \ + ice_print_errno(dev_dbg, dev, code, fmt, ##args) #ifdef CONFIG_DYNAMIC_DEBUG #define ice_debug(hw, type, fmt, args...) \ dev_dbg(ice_hw_to_dev(hw), fmt, ##args) - #define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \ print_hex_dump_debug(KBUILD_MODNAME " ", \ DUMP_PREFIX_OFFSET, rowsize, \ diff --git a/drivers/thirdparty/ice/ice_parser.c b/drivers/thirdparty/ice/ice_parser.c new file mode 100644 index 000000000000..f02218a892ec --- /dev/null +++ b/drivers/thirdparty/ice/ice_parser.c @@ -0,0 +1,595 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#include "ice_common.h" +#include "ice_parser_util.h" + +#define ICE_SEC_DATA_OFFSET 4 +#define ICE_SID_RXPARSER_IMEM_ENTRY_SIZE 48 +#define ICE_SID_RXPARSER_METADATA_INIT_ENTRY_SIZE 24 +#define ICE_SID_RXPARSER_CAM_ENTRY_SIZE 16 +#define ICE_SID_RXPARSER_PG_SPILL_ENTRY_SIZE 17 +#define ICE_SID_RXPARSER_NOMATCH_CAM_ENTRY_SIZE 12 +#define ICE_SID_RXPARSER_NOMATCH_SPILL_ENTRY_SIZE 13 +#define ICE_SID_RXPARSER_BOOST_TCAM_ENTRY_SIZE 88 +#define ICE_SID_RXPARSER_MARKER_TYPE_ENTRY_SIZE 24 +#define ICE_SID_RXPARSER_MARKER_GRP_ENTRY_SIZE 8 +#define ICE_SID_RXPARSER_PROTO_GRP_ENTRY_SIZE 24 +#define ICE_SID_RXPARSER_FLAG_REDIR_ENTRY_SIZE 1 + +#define ICE_SEC_LBL_DATA_OFFSET 2 +#define ICE_SID_LBL_ENTRY_SIZE 66 + +void ice_lbl_dump(struct ice_hw *hw, struct ice_lbl_item *item) +{ + dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx); + dev_info(ice_hw_to_dev(hw), "label = %s\n", item->label); +} + +void ice_parse_item_dflt(struct ice_hw *hw, u16 idx, void *item, + void *data, int size) +{ + memcpy(item, data, size); +} + +/** + * ice_parser_sect_item_get - parse a item from a section + * @sect_type: section type + * @section: section object + * @index: index of the item to get + * @offset: dummy as prototype of ice_pkg_enum_entry's last parameter + */ +void *ice_parser_sect_item_get(u32 sect_type, void *section, + u32 index, u32 *offset) +{ + struct ice_pkg_sect_hdr *hdr; + int data_off = ICE_SEC_DATA_OFFSET; + int size; + + if (!section) + return NULL; + + switch (sect_type) { + case ICE_SID_RXPARSER_IMEM: + size = ICE_SID_RXPARSER_IMEM_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_METADATA_INIT: + size = ICE_SID_RXPARSER_METADATA_INIT_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_CAM: + size = ICE_SID_RXPARSER_CAM_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_PG_SPILL: + size = ICE_SID_RXPARSER_PG_SPILL_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_NOMATCH_CAM: + size = ICE_SID_RXPARSER_NOMATCH_CAM_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_NOMATCH_SPILL: + size = ICE_SID_RXPARSER_NOMATCH_SPILL_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_BOOST_TCAM: + size = ICE_SID_RXPARSER_BOOST_TCAM_ENTRY_SIZE; + break; + case ICE_SID_LBL_RXPARSER_TMEM: + data_off = ICE_SEC_LBL_DATA_OFFSET; + size = ICE_SID_LBL_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_MARKER_PTYPE: + size = ICE_SID_RXPARSER_MARKER_TYPE_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_MARKER_GRP: + size = ICE_SID_RXPARSER_MARKER_GRP_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_PROTO_GRP: + size = ICE_SID_RXPARSER_PROTO_GRP_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_FLAG_REDIR: + size = ICE_SID_RXPARSER_FLAG_REDIR_ENTRY_SIZE; + break; + default: + return NULL; + } + + hdr = (struct ice_pkg_sect_hdr *)section; + if (index >= le16_to_cpu(hdr->count)) + return NULL; + + return (void *)((u64)section + data_off + index * size); +} + +/** + * ice_parser_create_table - create a item table from a section + * @hw: pointer to the hardware structure + * @sect_type: section type + * @item_size: item size in byte + * @length: number of items in the table to create + * @item_get: the function will be parsed to ice_pkg_enum_entry + * @parse_item: the function to parse the item + * @no_offset: ignore header offset, calculate index from 0 + */ +void *ice_parser_create_table(struct ice_hw *hw, u32 sect_type, + u32 item_size, u32 length, + void *(*item_get)(u32 sect_type, void *section, + u32 index, u32 *offset), + void (*parse_item)(struct ice_hw *hw, u16 idx, + void *item, void *data, + int size), + bool no_offset) +{ + struct ice_seg *seg = hw->seg; + struct ice_pkg_enum state; + u16 idx = 0xffff; + void *table; + void *data; + + if (!seg) + return NULL; + + table = devm_kzalloc(ice_hw_to_dev(hw), item_size * length, + GFP_KERNEL); + if (!table) { + ice_debug(hw, ICE_DBG_PARSER, "failed to allocate memory for table type %d.\n", + sect_type); + return NULL; + } + + memset(&state, 0, sizeof(state)); + do { + data = ice_pkg_enum_entry(seg, &state, sect_type, NULL, + item_get); + seg = NULL; + if (data) { + struct ice_pkg_sect_hdr *hdr = + (struct ice_pkg_sect_hdr *)state.sect; + + if (no_offset) + idx++; + else + idx = le16_to_cpu(hdr->offset) + + state.entry_idx; + parse_item(hw, idx, + (void *)((u64)table + idx * item_size), + data, item_size); + } + } while (data); + + return table; +} + +/** + * ice_parser_create - create a parser instance + * @hw: pointer to the hardware structure + * @psr: output parameter for a new parser instance be created + */ +int ice_parser_create(struct ice_hw *hw, struct ice_parser **psr) +{ + struct ice_parser *p; + int status; + + p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(struct ice_parser), + GFP_KERNEL); + if (!p) + return -ENOMEM; + + p->hw = hw; + p->rt.psr = p; + + p->imem_table = ice_imem_table_get(hw); + if (!p->imem_table) { + status = -EINVAL; + goto err; + } + + p->mi_table = ice_metainit_table_get(hw); + if (!p->mi_table) { + status = -EINVAL; + goto err; + } + + p->pg_cam_table = ice_pg_cam_table_get(hw); + if (!p->pg_cam_table) { + status = -EINVAL; + goto err; + } + + p->pg_sp_cam_table = ice_pg_sp_cam_table_get(hw); + if (!p->pg_sp_cam_table) { + status = -EINVAL; + goto err; + } + + p->pg_nm_cam_table = ice_pg_nm_cam_table_get(hw); + if (!p->pg_nm_cam_table) { + status = -EINVAL; + goto err; + } + + p->pg_nm_sp_cam_table = ice_pg_nm_sp_cam_table_get(hw); + if (!p->pg_nm_sp_cam_table) { + status = -EINVAL; + goto err; + } + + p->bst_tcam_table = ice_bst_tcam_table_get(hw); + if (!p->bst_tcam_table) { + status = -EINVAL; + goto err; + } + + p->bst_lbl_table = ice_bst_lbl_table_get(hw); + if (!p->bst_lbl_table) { + status = -EINVAL; + goto err; + } + + p->ptype_mk_tcam_table = ice_ptype_mk_tcam_table_get(hw); + if (!p->ptype_mk_tcam_table) { + status = -EINVAL; + goto err; + } + + p->mk_grp_table = ice_mk_grp_table_get(hw); + if (!p->mk_grp_table) { + status = -EINVAL; + goto err; + } + + p->proto_grp_table = ice_proto_grp_table_get(hw); + if (!p->proto_grp_table) { + status = -EINVAL; + goto err; + } + + p->flg_rd_table = ice_flg_rd_table_get(hw); + if (!p->flg_rd_table) { + status = -EINVAL; + goto err; + } + + p->xlt_kb_sw = ice_xlt_kb_get_sw(hw); + if (!p->xlt_kb_sw) { + status = -EINVAL; + goto err; + } + + p->xlt_kb_acl = ice_xlt_kb_get_acl(hw); + if (!p->xlt_kb_acl) { + status = -EINVAL; + goto err; + } + + p->xlt_kb_fd = ice_xlt_kb_get_fd(hw); + if (!p->xlt_kb_fd) { + status = -EINVAL; + goto err; + } + + p->xlt_kb_rss = ice_xlt_kb_get_rss(hw); + if (!p->xlt_kb_rss) { + status = -EINVAL; + goto err; + } + + *psr = p; + return 0; +err: + ice_parser_destroy(p); + return status; +} + +/** + * ice_parser_destroy - destroy a parser instance + * @psr: pointer to a parser instance + */ +void ice_parser_destroy(struct ice_parser *psr) +{ + devm_kfree(ice_hw_to_dev(psr->hw), psr->imem_table); + devm_kfree(ice_hw_to_dev(psr->hw), psr->mi_table); + devm_kfree(ice_hw_to_dev(psr->hw), psr->pg_cam_table); + devm_kfree(ice_hw_to_dev(psr->hw), psr->pg_sp_cam_table); + devm_kfree(ice_hw_to_dev(psr->hw), psr->pg_nm_cam_table); + devm_kfree(ice_hw_to_dev(psr->hw), psr->pg_nm_sp_cam_table); + devm_kfree(ice_hw_to_dev(psr->hw), psr->bst_tcam_table); + devm_kfree(ice_hw_to_dev(psr->hw), psr->bst_lbl_table); + devm_kfree(ice_hw_to_dev(psr->hw), psr->ptype_mk_tcam_table); + devm_kfree(ice_hw_to_dev(psr->hw), psr->mk_grp_table); + devm_kfree(ice_hw_to_dev(psr->hw), psr->proto_grp_table); + devm_kfree(ice_hw_to_dev(psr->hw), psr->flg_rd_table); + devm_kfree(ice_hw_to_dev(psr->hw), psr->xlt_kb_sw); + devm_kfree(ice_hw_to_dev(psr->hw), psr->xlt_kb_acl); + devm_kfree(ice_hw_to_dev(psr->hw), psr->xlt_kb_fd); + devm_kfree(ice_hw_to_dev(psr->hw), psr->xlt_kb_rss); + + devm_kfree(ice_hw_to_dev(psr->hw), psr); +} + +/** + * ice_parser_run - parse on a packet in binary and return the result + * @psr: pointer to a parser instance + * @pkt_buf: packet data + * @pkt_len: packet length + * @rslt: input/output parameter to save parser result. + */ +int ice_parser_run(struct ice_parser *psr, const u8 *pkt_buf, + int pkt_len, struct ice_parser_result *rslt) +{ + ice_parser_rt_reset(&psr->rt); + ice_parser_rt_pktbuf_set(&psr->rt, pkt_buf, pkt_len); + + return ice_parser_rt_execute(&psr->rt, rslt); +} + +/** + * ice_parser_result_dump - dump a parser result info + * @hw: pointer to the hardware structure + * @rslt: parser result info to dump + */ +void ice_parser_result_dump(struct ice_hw *hw, struct ice_parser_result *rslt) +{ + int i; + + dev_info(ice_hw_to_dev(hw), "ptype = %d\n", rslt->ptype); + for (i = 0; i < rslt->po_num; i++) + dev_info(ice_hw_to_dev(hw), "proto = %d, offset = %d\n", + rslt->po[i].proto_id, rslt->po[i].offset); + + dev_info(ice_hw_to_dev(hw), "flags_psr = 0x%016llx\n", + (unsigned long long)rslt->flags_psr); + dev_info(ice_hw_to_dev(hw), "flags_pkt = 0x%016llx\n", + (unsigned long long)rslt->flags_pkt); + dev_info(ice_hw_to_dev(hw), "flags_sw = 0x%04x\n", rslt->flags_sw); + dev_info(ice_hw_to_dev(hw), "flags_fd = 0x%04x\n", rslt->flags_fd); + dev_info(ice_hw_to_dev(hw), "flags_rss = 0x%04x\n", rslt->flags_rss); +} + +static void _bst_vm_set(struct ice_parser *psr, const char *prefix, bool on) +{ + u16 i = 0; + + while (true) { + struct ice_bst_tcam_item *item; + item = ice_bst_tcam_search(psr->bst_tcam_table, + psr->bst_lbl_table, + prefix, &i); + if (!item) + break; + item->key[0] = (u8)(on ? 0xff : 0xfe); + item->key_inv[0] = (u8)(on ? 0xff : 0xfe); + i++; + } +} + +/** + * ice_parser_dvm_set - configure double vlan mode for parser + * @psr: pointer to a parser instance + * @on: true to turn on; false to turn off + */ +void ice_parser_dvm_set(struct ice_parser *psr, bool on) +{ + _bst_vm_set(psr, "BOOST_MAC_VLAN_DVM", on); + _bst_vm_set(psr, "BOOST_MAC_VLAN_SVM", !on); +} + +static int +_tunnel_port_set(struct ice_parser *psr, const char *prefix, u16 udp_port, + bool on) +{ + u8 *buf = (u8 *)&udp_port; + u16 i = 0; + + while (true) { + struct ice_bst_tcam_item *item; + item = ice_bst_tcam_search(psr->bst_tcam_table, + psr->bst_lbl_table, + prefix, &i); + if (!item) + break; + + /* found empty slot to add */ + if (on && item->key[16] == 0xfe && item->key_inv[16] == 0xfe) { + item->key_inv[15] = buf[0]; + item->key_inv[16] = buf[1]; + item->key[15] = (u8)(0xff - buf[0]); + item->key[16] = (u8)(0xff - buf[1]); + + return 0; + /* found a matched slot to delete */ + } else if (!on && (item->key_inv[15] == buf[0] || + item->key_inv[16] == buf[1])) { + item->key_inv[15] = 0xff; + item->key_inv[16] = 0xfe; + item->key[15] = 0xff; + item->key[16] = 0xfe; + + return 0; + } + i++; + } + + return -EINVAL; +} + +/** + * ice_parser_vxlan_tunnel_set - configure vxlan tunnel for parser + * @psr: pointer to a parser instance + * @udp_port: vxlan tunnel port in UDP header + * @on: true to turn on; false to turn off + */ +int ice_parser_vxlan_tunnel_set(struct ice_parser *psr, + u16 udp_port, bool on) +{ + return _tunnel_port_set(psr, "TNL_VXLAN", udp_port, on); +} + +/** + * ice_parser_geneve_tunnel_set - configure geneve tunnel for parser + * @psr: pointer to a parser instance + * @udp_port: geneve tunnel port in UDP header + * @on: true to turn on; false to turn off + */ +int ice_parser_geneve_tunnel_set(struct ice_parser *psr, + u16 udp_port, bool on) +{ + return _tunnel_port_set(psr, "TNL_GENEVE", udp_port, on); +} + +/** + * ice_parser_ecpri_tunnel_set - configure ecpri tunnel for parser + * @psr: pointer to a parser instance + * @udp_port: ecpri tunnel port in UDP header + * @on: true to turn on; false to turn off + */ +int ice_parser_ecpri_tunnel_set(struct ice_parser *psr, + u16 udp_port, bool on) +{ + return _tunnel_port_set(psr, "TNL_UDP_ECPRI", udp_port, on); +} + +static bool _nearest_proto_id(struct ice_parser_result *rslt, u16 offset, + u8 *proto_id, u16 *proto_off) +{ + u16 dist = 0xffff; + u8 p = 0; + int i; + + for (i = 0; i < rslt->po_num; i++) { + if (offset < rslt->po[i].offset) + continue; + if (offset - rslt->po[i].offset < dist) { + p = rslt->po[i].proto_id; + dist = offset - rslt->po[i].offset; + } + } + + if (dist % 2) + return false; + + *proto_id = p; + *proto_off = dist; + + return true; +} + +/** default flag mask to cover GTP_EH_PDU, GTP_EH_PDU_LINK and TUN2 + * In future, the flag masks should learn from DDP + */ +#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_SW 0x4002 +#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_ACL 0x0000 +#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_FD 0x6080 +#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_RSS 0x6010 + +/** + * ice_parser_profile_init - initialize a FXP profile base on parser result + * @rslt: a instance of a parser result + * @pkt_buf: packet data buffer + * @msk_buf: packet mask buffer + * @buf_len: packet length + * @blk: FXP pipeline stage + * @prefix_match: match protocol stack exactly or only prefix + * @prof: input/output parameter to save the profile + */ +int ice_parser_profile_init(struct ice_parser_result *rslt, + const u8 *pkt_buf, const u8 *msk_buf, + int buf_len, enum ice_block blk, + bool prefix_match, + struct ice_parser_profile *prof) +{ + u8 proto_id = 0xff; + u16 proto_off = 0; + u16 off; + + memset(prof, 0, sizeof(*prof)); + set_bit(rslt->ptype, prof->ptypes); + if (blk == ICE_BLK_SW) { + prof->flags = rslt->flags_sw; + prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_SW; + } else if (blk == ICE_BLK_ACL) { + prof->flags = rslt->flags_acl; + prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_ACL; + } else if (blk == ICE_BLK_FD) { + prof->flags = rslt->flags_fd; + prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_FD; + } else if (blk == ICE_BLK_RSS) { + prof->flags = rslt->flags_rss; + prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_RSS; + } else { + return -EINVAL; + } + + for (off = 0; off < buf_len - 1; off++) { + if (msk_buf[off] == 0 && msk_buf[off + 1] == 0) + continue; + if (!_nearest_proto_id(rslt, off, &proto_id, &proto_off)) + continue; + if (prof->fv_num >= 32) + return -EINVAL; + + prof->fv[prof->fv_num].proto_id = proto_id; + prof->fv[prof->fv_num].offset = proto_off; + prof->fv[prof->fv_num].spec = *(const u16 *)&pkt_buf[off]; + prof->fv[prof->fv_num].msk = *(const u16 *)&msk_buf[off]; + prof->fv_num++; + } + + return 0; +} + +/** + * ice_parser_profile_dump - dump an FXP profile info + * @hw: pointer to the hardware structure + * @prof: profile info to dump + */ +void ice_parser_profile_dump(struct ice_hw *hw, struct ice_parser_profile *prof) +{ + u16 i; + + dev_info(ice_hw_to_dev(hw), "ptypes:\n"); + for (i = 0; i < ICE_FLOW_PTYPE_MAX; i++) + if (test_bit(i, prof->ptypes)) + dev_info(ice_hw_to_dev(hw), "\t%d\n", i); + + for (i = 0; i < prof->fv_num; i++) + dev_info(ice_hw_to_dev(hw), + "proto = %d, offset = %d spec = 0x%04x, mask = 0x%04x\n", + prof->fv[i].proto_id, prof->fv[i].offset, + prof->fv[i].spec, prof->fv[i].msk); + + dev_info(ice_hw_to_dev(hw), "flags = 0x%04x\n", prof->flags); + dev_info(ice_hw_to_dev(hw), "flags_msk = 0x%04x\n", prof->flags_msk); +} + +/** + * ice_check_ddp_support_proto_id - check DDP package file support protocol ID + * @hw: pointer to the HW struct + * @proto_id: protocol ID value + * + * This function maintains the compatibility of the program process by checking + * whether the current DDP file supports the required protocol ID. + */ +bool ice_check_ddp_support_proto_id(struct ice_hw *hw, + enum ice_prot_id proto_id) +{ + struct ice_proto_grp_item *proto_grp_table; + struct ice_proto_grp_item *proto_grp; + bool exist = false; + u16 idx, i; + + proto_grp_table = ice_proto_grp_table_get(hw); + if (!proto_grp_table) + return false; + + for (idx = 0; idx < ICE_PROTO_GRP_TABLE_SIZE; idx++) { + proto_grp = &proto_grp_table[idx]; + for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++) { + if (proto_grp->po[i].proto_id == proto_id) { + exist = true; + goto exit; + } + } + } + +exit: + devm_kfree(ice_hw_to_dev(hw), proto_grp_table); + return exist; +} diff --git a/drivers/thirdparty/ice/ice_parser.h b/drivers/thirdparty/ice/ice_parser.h new file mode 100644 index 000000000000..250e4f3a8441 --- /dev/null +++ b/drivers/thirdparty/ice/ice_parser.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_PARSER_H_ +#define _ICE_PARSER_H_ + +#include "ice_metainit.h" +#include "ice_imem.h" +#include "ice_pg_cam.h" +#include "ice_bst_tcam.h" +#include "ice_ptype_mk.h" +#include "ice_mk_grp.h" +#include "ice_proto_grp.h" +#include "ice_flg_rd.h" +#include "ice_xlt_kb.h" +#include "ice_parser_rt.h" +#include "ice_tmatch.h" + +struct ice_parser { + struct ice_hw *hw; /* pointer to the hardware structure */ + + /* load data from section ICE_SID_RX_PARSER_IMEM */ + struct ice_imem_item *imem_table; + /* load data from section ICE_SID_RXPARSER_METADATA_INIT */ + struct ice_metainit_item *mi_table; + /* load data from section ICE_SID_RXPARSER_CAM */ + struct ice_pg_cam_item *pg_cam_table; + /* load data from section ICE_SID_RXPARSER_PG_SPILL */ + struct ice_pg_cam_item *pg_sp_cam_table; + /* load data from section ICE_SID_RXPARSER_NOMATCH_CAM */ + struct ice_pg_nm_cam_item *pg_nm_cam_table; + /* load data from section ICE_SID_RXPARSER_NOMATCH_SPILL */ + struct ice_pg_nm_cam_item *pg_nm_sp_cam_table; + /* load data from section ICE_SID_RXPARSER_BOOST_TCAM */ + struct ice_bst_tcam_item *bst_tcam_table; + /* load data from section ICE_SID_LBL_RXPARSER_TMEM */ + struct ice_lbl_item *bst_lbl_table; + /* load data from section ICE_SID_RXPARSER_MARKER_PTYPE */ + struct ice_ptype_mk_tcam_item *ptype_mk_tcam_table; + /* load data from section ICE_SID_RXPARSER_MARKER_GRP */ + struct ice_mk_grp_item *mk_grp_table; + /* load data from section ICE_SID_RXPARSER_PROTO_GRP */ + struct ice_proto_grp_item *proto_grp_table; + /* load data from section ICE_SID_RXPARSER_FLAG_REDIR */ + struct ice_flg_rd_item *flg_rd_table; + /* load data from section ICE_SID_XLT_KEY_BUILDER_SW */ + struct ice_xlt_kb *xlt_kb_sw; + /* load data from section ICE_SID_XLT_KEY_BUILDER_ACL */ + struct ice_xlt_kb *xlt_kb_acl; + /* load data from section ICE_SID_XLT_KEY_BUILDER_FD */ + struct ice_xlt_kb *xlt_kb_fd; + /* load data from section ICE_SID_XLT_KEY_BUILDER_RSS */ + struct ice_xlt_kb *xlt_kb_rss; + struct ice_parser_rt rt; /* parser runtime */ +}; + +int ice_parser_create(struct ice_hw *hw, struct ice_parser **psr); +void ice_parser_destroy(struct ice_parser *psr); +void ice_parser_dvm_set(struct ice_parser *psr, bool on); +int ice_parser_vxlan_tunnel_set(struct ice_parser *psr, + u16 udp_port, bool on); +int ice_parser_geneve_tunnel_set(struct ice_parser *psr, + u16 udp_port, bool on); +int ice_parser_ecpri_tunnel_set(struct ice_parser *psr, + u16 udp_port, bool on); + +struct ice_parser_proto_off { + u8 proto_id; /* hardware protocol ID */ + u16 offset; /* offset where the protocol header start */ +}; + +struct ice_parser_result { + u16 ptype; /* 16 bits hardware PTYPE */ + /* protocol and header offset pairs */ + struct ice_parser_proto_off po[16]; + int po_num; /* number of pairs must <= 16 */ + u64 flags_psr; /* 64 bits parser flags */ + u64 flags_pkt; /* 64 bits packet flags */ + u16 flags_sw; /* 16 bits key builder flag for SW */ + u16 flags_acl; /* 16 bits key builder flag for ACL */ + u16 flags_fd; /* 16 bits key builder flag for FD */ + u16 flags_rss; /* 16 bits key builder flag for RSS */ +}; + +int ice_parser_run(struct ice_parser *psr, const u8 *pkt_buf, + int pkt_len, struct ice_parser_result *rslt); +void ice_parser_result_dump(struct ice_hw *hw, struct ice_parser_result *rslt); + +struct ice_parser_fv { + u8 proto_id; /* hardware protocol ID */ + u16 offset; /* offset from the start of the protocol header */ + u16 spec; /* 16 bits pattern to match */ + u16 msk; /* 16 bits pattern mask */ +}; + +struct ice_parser_profile { + struct ice_parser_fv fv[48]; /* field vector arrary */ + int fv_num; /* field vector number must <= 48 */ + u16 flags; /* 16 bits key builder flag */ + u16 flags_msk; /* key builder flag masker */ + /* 1024 bits PTYPE bitmap */ + DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX); +}; + +int ice_parser_profile_init(struct ice_parser_result *rslt, + const u8 *pkt_buf, const u8 *msk_buf, + int buf_len, enum ice_block blk, + bool prefix_match, + struct ice_parser_profile *prof); +void ice_parser_profile_dump(struct ice_hw *hw, + struct ice_parser_profile *prof); +bool ice_check_ddp_support_proto_id(struct ice_hw *hw, + enum ice_prot_id proto_id); +#endif /* _ICE_PARSER_H_ */ diff --git a/drivers/thirdparty/ice/ice_parser_rt.c b/drivers/thirdparty/ice/ice_parser_rt.c new file mode 100644 index 000000000000..9a417f5cefc5 --- /dev/null +++ b/drivers/thirdparty/ice/ice_parser_rt.c @@ -0,0 +1,864 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#include "ice_common.h" + +#define GPR_HB_IDX 64 +#define GPR_ERR_IDX 84 +#define GPR_FLG_IDX 104 +#define GPR_TSR_IDX 108 +#define GPR_NN_IDX 109 +#define GPR_HO_IDX 110 +#define GPR_NP_IDX 111 + +static void _rt_tsr_set(struct ice_parser_rt *rt, u16 tsr) +{ + rt->gpr[GPR_TSR_IDX] = tsr; +} + +static void _rt_ho_set(struct ice_parser_rt *rt, u16 ho) +{ + rt->gpr[GPR_HO_IDX] = ho; + memcpy(&rt->gpr[GPR_HB_IDX], &rt->pkt_buf[ho], 32); +} + +static void _rt_np_set(struct ice_parser_rt *rt, u16 pc) +{ + rt->gpr[GPR_NP_IDX] = pc; +} + +static void _rt_nn_set(struct ice_parser_rt *rt, u16 node) +{ + rt->gpr[GPR_NN_IDX] = node; +} + +static void _rt_flag_set(struct ice_parser_rt *rt, int idx, bool val) +{ + int y = idx / 16; + int x = idx % 16; + + if (val) + rt->gpr[GPR_FLG_IDX + y] |= (u16)(1 << x); + else + rt->gpr[GPR_FLG_IDX + y] &= ~(u16)(1 << x); + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set parser flag %d value %d\n", + idx, val); +} + +static void _rt_gpr_set(struct ice_parser_rt *rt, int idx, u16 val) +{ + if (idx == GPR_HO_IDX) + _rt_ho_set(rt, val); + else + rt->gpr[idx] = val; + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set GPR %d value %d\n", + idx, val); +} + +static void _rt_err_set(struct ice_parser_rt *rt, int idx, bool val) +{ + if (val) + rt->gpr[GPR_ERR_IDX] |= (u16)(1 << idx); + else + rt->gpr[GPR_ERR_IDX] &= ~(u16)(1 << idx); + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set parser error %d value %d\n", + idx, val); +} + +/** + * ice_parser_rt_reset - reset the parser runtime + * @rt: pointer to the parser runtime + */ +void ice_parser_rt_reset(struct ice_parser_rt *rt) +{ + struct ice_parser *psr = rt->psr; + struct ice_metainit_item *mi = &psr->mi_table[0]; + int i; + + memset(rt, 0, sizeof(*rt)); + + _rt_tsr_set(rt, mi->tsr); + _rt_ho_set(rt, mi->ho); + _rt_np_set(rt, mi->pc); + _rt_nn_set(rt, mi->pg_rn); + + rt->psr = psr; + + for (i = 0; i < 64; i++) { + if ((mi->flags & (1ul << i)) != 0ul) + _rt_flag_set(rt, i, true); + } +} + +/** + * ice_parser_rt_pktbuf_set - set a packet into parser runtime + * @rt: pointer to the parser runtime + * @pkt_buf: buffer with packet data + * @pkt_len: packet buffer length + */ +void ice_parser_rt_pktbuf_set(struct ice_parser_rt *rt, const u8 *pkt_buf, + int pkt_len) +{ + int len = min(ICE_PARSER_MAX_PKT_LEN, pkt_len); + u16 ho = rt->gpr[GPR_HO_IDX]; + + memcpy(rt->pkt_buf, pkt_buf, len); + rt->pkt_len = pkt_len; + + memcpy(&rt->gpr[GPR_HB_IDX], &rt->pkt_buf[ho], 32); +} + +static void _bst_key_init(struct ice_parser_rt *rt, struct ice_imem_item *imem) +{ + u8 tsr = (u8)rt->gpr[GPR_TSR_IDX]; + u16 ho = rt->gpr[GPR_HO_IDX]; + u8 *key = rt->bst_key; + int i; + + if (imem->b_kb.tsr_ctrl) + key[19] = (u8)tsr; + else + key[19] = imem->b_kb.priority; + + for (i = 18; i >= 0; i--) { + int j; + j = ho + 18 - i; + if (j < ICE_PARSER_MAX_PKT_LEN) + key[i] = rt->pkt_buf[ho + 18 - i]; + else + key[i] = 0; + } + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generated Boost TCAM Key:\n"); + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", + key[0], key[1], key[2], key[3], key[4], + key[5], key[6], key[7], key[8], key[9], + key[10], key[11], key[12], key[13], key[14], + key[15], key[16], key[17], key[18], key[19]); + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "\n"); +} + +static u8 _bit_rev_u8(u8 v) +{ + u8 r = 0; + int i; + + for (i = 0; i < 8; i++) { + r |= (u8)((v & 0x1) << (7 - i)); + v >>= 1; + } + + return r; +} + +static u8 _bit_rev_u16(u16 v, int len) +{ + u16 r = 0; + int i; + + for (i = 0; i < len; i++) { + r |= (u16)((v & 0x1) << (len - 1 - i)); + v >>= 1; + } + + return r; +} + +static u32 _bit_rev_u32(u32 v, int len) +{ + u32 r = 0; + int i; + + for (i = 0; i < len; i++) { + r |= (u32)((v & 0x1) << (len - 1 - i)); + v >>= 1; + } + + return r; +} + +static u32 _hv_bit_sel(struct ice_parser_rt *rt, int start, int len) +{ + u64 d64, msk; + u8 b[8]; + int i; + + int offset = GPR_HB_IDX + start / 16; + + memcpy(b, &rt->gpr[offset], 8); + + for (i = 0; i < 8; i++) + b[i] = _bit_rev_u8(b[i]); + + d64 = *(u64 *)b; + msk = (1ul << len) - 1; + + return _bit_rev_u32((u32)((d64 >> (start % 16)) & msk), len); +} + +static u32 _pk_build(struct ice_parser_rt *rt, struct ice_np_keybuilder *kb) +{ + if (kb->ops == 0) + return _hv_bit_sel(rt, kb->start_or_reg0, kb->len_or_reg1); + else if (kb->ops == 1) + return rt->gpr[kb->start_or_reg0] | + ((u32)rt->gpr[kb->len_or_reg1] << 16); + else if (kb->ops == 2) + return 0; + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unsupported ops %d\n", kb->ops); + return 0xffffffff; +} + +static bool _flag_get(struct ice_parser_rt *rt, int index) +{ + int y = index / 16; + int x = index % 16; + + return (rt->gpr[GPR_FLG_IDX + y] & (u16)(1 << x)) != 0; +} + +static void _imem_pgk_init(struct ice_parser_rt *rt, struct ice_imem_item *imem) +{ + memset(&rt->pg_key, 0, sizeof(rt->pg_key)); + rt->pg_key.next_proto = _pk_build(rt, &imem->np_kb); + + if (imem->pg_kb.flag0_ena) + rt->pg_key.flag0 = _flag_get(rt, imem->pg_kb.flag0_idx); + if (imem->pg_kb.flag1_ena) + rt->pg_key.flag1 = _flag_get(rt, imem->pg_kb.flag1_idx); + if (imem->pg_kb.flag2_ena) + rt->pg_key.flag2 = _flag_get(rt, imem->pg_kb.flag2_idx); + if (imem->pg_kb.flag3_ena) + rt->pg_key.flag3 = _flag_get(rt, imem->pg_kb.flag3_idx); + + rt->pg_key.alu_reg = rt->gpr[imem->pg_kb.alu_reg_idx]; + rt->pg_key.node_id = rt->gpr[GPR_NN_IDX]; + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generate Parse Graph Key: node_id(%d),flag0(%d), flag1(%d), flag2(%d), flag3(%d), boost_idx(%d), alu_reg(0x%04x), next_proto(0x%08x)\n", + rt->pg_key.node_id, + rt->pg_key.flag0, + rt->pg_key.flag1, + rt->pg_key.flag2, + rt->pg_key.flag3, + rt->pg_key.boost_idx, + rt->pg_key.alu_reg, + rt->pg_key.next_proto); +} + +static void _imem_alu0_set(struct ice_parser_rt *rt, struct ice_imem_item *imem) +{ + rt->alu0 = &imem->alu0; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU0 from imem pc %d\n", + imem->idx); +} + +static void _imem_alu1_set(struct ice_parser_rt *rt, struct ice_imem_item *imem) +{ + rt->alu1 = &imem->alu1; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU1 from imem pc %d\n", + imem->idx); +} + +static void _imem_alu2_set(struct ice_parser_rt *rt, struct ice_imem_item *imem) +{ + rt->alu2 = &imem->alu2; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU2 from imem pc %d\n", + imem->idx); +} + +static void _imem_pgp_set(struct ice_parser_rt *rt, struct ice_imem_item *imem) +{ + rt->pg = imem->pg; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load PG priority %d from imem pc %d\n", + rt->pg, imem->idx); +} + +static void +_bst_pgk_init(struct ice_parser_rt *rt, struct ice_bst_tcam_item *bst) +{ + memset(&rt->pg_key, 0, sizeof(rt->pg_key)); + rt->pg_key.boost_idx = bst->hit_idx_grp; + rt->pg_key.next_proto = _pk_build(rt, &bst->np_kb); + + if (bst->pg_kb.flag0_ena) + rt->pg_key.flag0 = _flag_get(rt, bst->pg_kb.flag0_idx); + if (bst->pg_kb.flag1_ena) + rt->pg_key.flag1 = _flag_get(rt, bst->pg_kb.flag1_idx); + if (bst->pg_kb.flag2_ena) + rt->pg_key.flag2 = _flag_get(rt, bst->pg_kb.flag2_idx); + if (bst->pg_kb.flag3_ena) + rt->pg_key.flag3 = _flag_get(rt, bst->pg_kb.flag3_idx); + + rt->pg_key.alu_reg = rt->gpr[bst->pg_kb.alu_reg_idx]; + rt->pg_key.node_id = rt->gpr[GPR_NN_IDX]; + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generate Parse Graph Key: node_id(%d),flag0(%d), flag1(%d), flag2(%d), flag3(%d), boost_idx(%d), alu_reg(0x%04x), next_proto(0x%08x)\n", + rt->pg_key.node_id, + rt->pg_key.flag0, + rt->pg_key.flag1, + rt->pg_key.flag2, + rt->pg_key.flag3, + rt->pg_key.boost_idx, + rt->pg_key.alu_reg, + rt->pg_key.next_proto); +} + +static void _bst_alu0_set(struct ice_parser_rt *rt, + struct ice_bst_tcam_item *bst) +{ + rt->alu0 = &bst->alu0; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU0 from boost address %d\n", + bst->address); +} + +static void _bst_alu1_set(struct ice_parser_rt *rt, + struct ice_bst_tcam_item *bst) +{ + rt->alu1 = &bst->alu1; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU1 from boost address %d\n", + bst->address); +} + +static void _bst_alu2_set(struct ice_parser_rt *rt, + struct ice_bst_tcam_item *bst) +{ + rt->alu2 = &bst->alu2; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU2 from boost address %d\n", + bst->address); +} + +static void _bst_pgp_set(struct ice_parser_rt *rt, + struct ice_bst_tcam_item *bst) +{ + rt->pg = bst->pg_pri; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load PG priority %d from boost address %d\n", + rt->pg, bst->address); +} + +static struct ice_pg_cam_item *__pg_cam_match(struct ice_parser_rt *rt) +{ + struct ice_parser *psr = rt->psr; + struct ice_pg_cam_item *item; + + item = ice_pg_cam_match(psr->pg_cam_table, ICE_PG_CAM_TABLE_SIZE, + &rt->pg_key); + if (item) + return item; + + item = ice_pg_cam_match(psr->pg_sp_cam_table, ICE_PG_SP_CAM_TABLE_SIZE, + &rt->pg_key); + return item; +} + +static struct ice_pg_nm_cam_item *__pg_nm_cam_match(struct ice_parser_rt *rt) +{ + struct ice_parser *psr = rt->psr; + struct ice_pg_nm_cam_item *item; + + item = ice_pg_nm_cam_match(psr->pg_nm_cam_table, + ICE_PG_NM_CAM_TABLE_SIZE, &rt->pg_key); + + if (item) + return item; + + item = ice_pg_nm_cam_match(psr->pg_nm_sp_cam_table, + ICE_PG_NM_SP_CAM_TABLE_SIZE, + &rt->pg_key); + return item; +} + +static void _gpr_add(struct ice_parser_rt *rt, int idx, u16 val) +{ + rt->pu.gpr_val_upd[idx] = true; + rt->pu.gpr_val[idx] = val; + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for register %d value %d\n", + idx, val); +} + +static void _pg_exe(struct ice_parser_rt *rt) +{ + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ParseGraph action ...\n"); + + _gpr_add(rt, GPR_NP_IDX, rt->action->next_pc); + _gpr_add(rt, GPR_NN_IDX, rt->action->next_node); + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ParseGraph action done.\n"); +} + +static void _flg_add(struct ice_parser_rt *rt, int idx, bool val) +{ + rt->pu.flg_msk |= (1ul << idx); + if (val) + rt->pu.flg_val |= (1ul << idx); + else + rt->pu.flg_val &= ~(1ul << idx); + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for flag %d value %d\n", + idx, val); +} + +static void _flg_update(struct ice_parser_rt *rt, struct ice_alu *alu) +{ + if (alu->dedicate_flags_ena) { + int i; + if (alu->flags_extr_imm) { + for (i = 0; i < alu->dst_len; i++) + _flg_add(rt, alu->dst_start + i, + (alu->flags_start_imm & + (1u << i)) != 0); + } else { + for (i = 0; i < alu->dst_len; i++) { + _flg_add(rt, alu->dst_start + i, + _hv_bit_sel(rt, + alu->flags_start_imm + i, + 1) != 0); + } + } + } +} + +static void _po_update(struct ice_parser_rt *rt, struct ice_alu *alu) +{ + if (alu->proto_offset_opc == 1) + rt->po = (u16)(rt->gpr[GPR_HO_IDX] + alu->proto_offset); + else if (alu->proto_offset_opc == 2) + rt->po = (u16)(rt->gpr[GPR_HO_IDX] - alu->proto_offset); + else if (alu->proto_offset_opc == 0) + rt->po = rt->gpr[GPR_HO_IDX]; + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Update Protocol Offset = %d\n", + rt->po); +} + +static u16 _reg_bit_sel(struct ice_parser_rt *rt, int reg_idx, + int start, int len) +{ + u32 d32, msk; + u8 b[4]; + u8 v[4]; + + memcpy(b, &rt->gpr[reg_idx + start / 16], 4); + + v[0] = _bit_rev_u8(b[0]); + v[1] = _bit_rev_u8(b[1]); + v[2] = _bit_rev_u8(b[2]); + v[3] = _bit_rev_u8(b[3]); + + d32 = *(u32 *)v; + msk = (1u << len) - 1; + + return _bit_rev_u16((u16)((d32 >> (start % 16)) & msk), len); +} + +static void _err_add(struct ice_parser_rt *rt, int idx, bool val) +{ + rt->pu.err_msk |= (u16)(1 << idx); + if (val) + rt->pu.flg_val |= (u64)(1 << idx); + else + rt->pu.flg_val &= ~(u64)(1 << idx); + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for error %d value %d\n", + idx, val); +} + +static void _dst_reg_bit_set(struct ice_parser_rt *rt, struct ice_alu *alu, + bool val) +{ + u16 flg_idx; + + if (alu->dedicate_flags_ena) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "DedicatedFlagsEnable should not be enabled in opcode %d\n", + alu->opc); + return; + } + + if (alu->dst_reg_id == GPR_ERR_IDX) { + if (alu->dst_start >= 16) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Invalid error %d\n", + alu->dst_start); + return; + } + _err_add(rt, alu->dst_start, val); + } else if (alu->dst_reg_id >= GPR_FLG_IDX) { + flg_idx = (u16)(((alu->dst_reg_id - GPR_FLG_IDX) << 4) + + alu->dst_start); + + if (flg_idx >= 64) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Invalid flag %d\n", + flg_idx); + return; + } + _flg_add(rt, flg_idx, val); + } else { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unexpected Dest Register Bit set, RegisterID %d Start %d\n", + alu->dst_reg_id, alu->dst_start); + } +} + +static void _alu_exe(struct ice_parser_rt *rt, struct ice_alu *alu) +{ + u16 dst, src, shift, imm; + + if (alu->shift_xlate_select) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "shift_xlate_select != 0 is not expected\n"); + return; + } + + _po_update(rt, alu); + _flg_update(rt, alu); + + dst = rt->gpr[alu->dst_reg_id]; + src = _reg_bit_sel(rt, alu->src_reg_id, alu->src_start, alu->src_len); + shift = alu->shift_xlate_key; + imm = alu->imm; + + switch (alu->opc) { + case ICE_ALU_PARK: + break; + case ICE_ALU_MOV_ADD: + dst = (u16)((src << shift) + imm); + _gpr_add(rt, alu->dst_reg_id, dst); + break; + case ICE_ALU_ADD: + dst += (u16)((src << shift) + imm); + _gpr_add(rt, alu->dst_reg_id, dst); + break; + case ICE_ALU_ORLT: + if (src < imm) + _dst_reg_bit_set(rt, alu, true); + _gpr_add(rt, GPR_NP_IDX, alu->branch_addr); + break; + case ICE_ALU_OREQ: + if (src == imm) + _dst_reg_bit_set(rt, alu, true); + _gpr_add(rt, GPR_NP_IDX, alu->branch_addr); + break; + case ICE_ALU_SETEQ: + if (src == imm) + _dst_reg_bit_set(rt, alu, true); + else + _dst_reg_bit_set(rt, alu, false); + _gpr_add(rt, GPR_NP_IDX, alu->branch_addr); + break; + case ICE_ALU_MOV_XOR: + dst = (u16)((u16)(src << shift) ^ (u16)imm); + _gpr_add(rt, alu->dst_reg_id, dst); + break; + default: + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unsupported ALU instruction %d\n", + alu->opc); + break; + } +} + +static void _alu0_exe(struct ice_parser_rt *rt) +{ + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU0 ...\n"); + _alu_exe(rt, rt->alu0); + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU0 done.\n"); +} + +static void _alu1_exe(struct ice_parser_rt *rt) +{ + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU1 ...\n"); + _alu_exe(rt, rt->alu1); + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU1 done.\n"); +} + +static void _alu2_exe(struct ice_parser_rt *rt) +{ + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU2 ...\n"); + _alu_exe(rt, rt->alu2); + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU2 done.\n"); +} + +static void _pu_exe(struct ice_parser_rt *rt) +{ + struct ice_gpr_pu *pu = &rt->pu; + int i; + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Updating Registers ...\n"); + + for (i = 0; i < 128; i++) { + if (pu->gpr_val_upd[i]) + _rt_gpr_set(rt, i, pu->gpr_val[i]); + } + + for (i = 0; i < 64; i++) { + if (pu->flg_msk & (1ul << i)) + _rt_flag_set(rt, i, pu->flg_val & (1ul << i)); + } + + for (i = 0; i < 16; i++) { + if (pu->err_msk & (1u << 1)) + _rt_err_set(rt, i, pu->err_val & (1u << i)); + } + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Updating Registers done.\n"); +} + +static void _alu_pg_exe(struct ice_parser_rt *rt) +{ + memset(&rt->pu, 0, sizeof(rt->pu)); + + if (rt->pg == 0) { + _pg_exe(rt); + _alu0_exe(rt); + _alu1_exe(rt); + _alu2_exe(rt); + } else if (rt->pg == 1) { + _alu0_exe(rt); + _pg_exe(rt); + _alu1_exe(rt); + _alu2_exe(rt); + } else if (rt->pg == 2) { + _alu0_exe(rt); + _alu1_exe(rt); + _pg_exe(rt); + _alu2_exe(rt); + } else if (rt->pg == 3) { + _alu0_exe(rt); + _alu1_exe(rt); + _alu2_exe(rt); + _pg_exe(rt); + } + + _pu_exe(rt); + + if (rt->action->ho_inc == 0) + return; + + if (rt->action->ho_polarity) + _rt_ho_set(rt, rt->gpr[GPR_HO_IDX] + rt->action->ho_inc); + else + _rt_ho_set(rt, rt->gpr[GPR_HO_IDX] - rt->action->ho_inc); +} + +static void _proto_off_update(struct ice_parser_rt *rt) +{ + struct ice_parser *psr = rt->psr; + + if (rt->action->is_pg) { + struct ice_proto_grp_item *proto_grp = + &psr->proto_grp_table[rt->action->proto_id]; + u16 po; + int i; + + for (i = 0; i < 8; i++) { + struct ice_proto_off *entry = &proto_grp->po[i]; + + if (entry->proto_id == 0xff) + break; + + if (!entry->polarity) + po = (u16)(rt->po + entry->offset); + else + po = (u16)(rt->po - entry->offset); + + rt->protocols[entry->proto_id] = true; + rt->offsets[entry->proto_id] = po; + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Protocol %d at offset %d\n", + entry->proto_id, po); + } + } else { + rt->protocols[rt->action->proto_id] = true; + rt->offsets[rt->action->proto_id] = rt->po; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Protocol %d at offset %d\n", + rt->action->proto_id, rt->po); + } +} + +static void _marker_set(struct ice_parser_rt *rt, int idx) +{ + int x = idx / 8; + int y = idx % 8; + + rt->markers[x] |= (u8)(1u << y); +} + +static void _marker_update(struct ice_parser_rt *rt) +{ + struct ice_parser *psr = rt->psr; + + if (rt->action->is_mg) { + struct ice_mk_grp_item *mk_grp = + &psr->mk_grp_table[rt->action->marker_id]; + int i; + + for (i = 0; i < 8; i++) { + u8 marker = mk_grp->markers[i]; + + if (marker == 71) + break; + + _marker_set(rt, marker); + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Marker %d\n", + marker); + } + } else { + if (rt->action->marker_id != 71) + _marker_set(rt, rt->action->marker_id); + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Marker %d\n", + rt->action->marker_id); + } +} + +static u16 _ptype_resolve(struct ice_parser_rt *rt) +{ + struct ice_parser *psr = rt->psr; + struct ice_ptype_mk_tcam_item *item; + + item = ice_ptype_mk_tcam_match(psr->ptype_mk_tcam_table, + rt->markers, 9); + if (item) + return item->ptype; + return 0xffff; +} + +static void _proto_off_resolve(struct ice_parser_rt *rt, + struct ice_parser_result *rslt) +{ + int i; + + for (i = 0; i < 255; i++) { + if (rt->protocols[i]) { + rslt->po[rslt->po_num].proto_id = (u8)i; + rslt->po[rslt->po_num].offset = rt->offsets[i]; + rslt->po_num++; + } + } +} + +static void _result_resolve(struct ice_parser_rt *rt, + struct ice_parser_result *rslt) +{ + struct ice_parser *psr = rt->psr; + + memset(rslt, 0, sizeof(*rslt)); + + rslt->ptype = _ptype_resolve(rt); + + memcpy(&rslt->flags_psr, &rt->gpr[GPR_FLG_IDX], 8); + rslt->flags_pkt = ice_flg_redirect(psr->flg_rd_table, rslt->flags_psr); + rslt->flags_sw = ice_xlt_kb_flag_get(psr->xlt_kb_sw, rslt->flags_pkt); + rslt->flags_fd = ice_xlt_kb_flag_get(psr->xlt_kb_fd, rslt->flags_pkt); + rslt->flags_rss = ice_xlt_kb_flag_get(psr->xlt_kb_rss, rslt->flags_pkt); + + _proto_off_resolve(rt, rslt); +} + +/** + * ice_parser_rt_execute - parser execution routine + * @rt: pointer to the parser runtime + * @rslt: input/output parameter to save parser result + */ +int ice_parser_rt_execute(struct ice_parser_rt *rt, + struct ice_parser_result *rslt) +{ + struct ice_pg_nm_cam_item *pg_nm_cam; + struct ice_parser *psr = rt->psr; + struct ice_pg_cam_item *pg_cam; + int status = 0; + u16 node; + u16 pc; + + node = rt->gpr[GPR_NN_IDX]; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Start with Node: %d\n", node); + + while (true) { + struct ice_bst_tcam_item *bst; + struct ice_imem_item *imem; + + pc = rt->gpr[GPR_NP_IDX]; + imem = &psr->imem_table[pc]; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load imem at pc: %d\n", + pc); + + _bst_key_init(rt, imem); + bst = ice_bst_tcam_match(psr->bst_tcam_table, rt->bst_key); + + if (!bst) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "No Boost TCAM Match\n"); + _imem_pgk_init(rt, imem); + _imem_alu0_set(rt, imem); + _imem_alu1_set(rt, imem); + _imem_alu2_set(rt, imem); + _imem_pgp_set(rt, imem); + } else { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Boost TCAM Match address: %d\n", + bst->address); + if (imem->b_m.pg) { + _bst_pgk_init(rt, bst); + _bst_pgp_set(rt, bst); + } else { + _imem_pgk_init(rt, imem); + _imem_pgp_set(rt, imem); + } + + if (imem->b_m.al0) + _bst_alu0_set(rt, bst); + else + _imem_alu0_set(rt, imem); + + if (imem->b_m.al1) + _bst_alu1_set(rt, bst); + else + _imem_alu1_set(rt, imem); + + if (imem->b_m.al2) + _bst_alu2_set(rt, bst); + else + _imem_alu2_set(rt, imem); + } + + rt->action = NULL; + pg_cam = __pg_cam_match(rt); + if (!pg_cam) { + pg_nm_cam = __pg_nm_cam_match(rt); + if (pg_nm_cam) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Match ParseGraph Nomatch CAM Address %d\n", + pg_nm_cam->idx); + rt->action = &pg_nm_cam->action; + } + } else { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Match ParseGraph CAM Address %d\n", + pg_cam->idx); + rt->action = &pg_cam->action; + } + + if (!rt->action) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Failed to match ParseGraph CAM, stop parsing.\n"); + status = -EINVAL; + break; + } + + _alu_pg_exe(rt); + _marker_update(rt); + _proto_off_update(rt); + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Go to node %d\n", + rt->action->next_node); + + if (rt->action->is_last_round) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Last Round in ParseGraph Action, stop parsing.\n"); + break; + } + + if (rt->gpr[GPR_HO_IDX] >= rt->pkt_len) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Header Offset %d is larger than packet len %d, stop parsing\n", + rt->gpr[GPR_HO_IDX], rt->pkt_len); + break; + } + } + + _result_resolve(rt, rslt); + + return status; +} diff --git a/drivers/thirdparty/ice/ice_parser_rt.h b/drivers/thirdparty/ice/ice_parser_rt.h new file mode 100644 index 000000000000..8c2b5d4fbf15 --- /dev/null +++ b/drivers/thirdparty/ice/ice_parser_rt.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_PARSER_RT_H_ +#define _ICE_PARSER_RT_H_ + +struct ice_parser_ctx; + +#define ICE_PARSER_MAX_PKT_LEN 504 +#define ICE_PARSER_GPR_NUM 128 + +struct ice_gpr_pu { + bool gpr_val_upd[128]; /* flag to indicate if GRP needs to be updated */ + u16 gpr_val[128]; + u64 flg_msk; + u64 flg_val; + u16 err_msk; + u16 err_val; +}; + +struct ice_parser_rt { + struct ice_parser *psr; + u16 gpr[ICE_PARSER_GPR_NUM]; + u8 pkt_buf[ICE_PARSER_MAX_PKT_LEN + 32]; + u16 pkt_len; + u16 po; + u8 bst_key[20]; + struct ice_pg_cam_key pg_key; + struct ice_alu *alu0; + struct ice_alu *alu1; + struct ice_alu *alu2; + struct ice_pg_cam_action *action; + u8 pg; + struct ice_gpr_pu pu; + u8 markers[9]; /* 8 * 9 = 72 bits*/ + bool protocols[256]; + u16 offsets[256]; +}; + +void ice_parser_rt_reset(struct ice_parser_rt *rt); +void ice_parser_rt_pktbuf_set(struct ice_parser_rt *rt, const u8 *pkt_buf, + int pkt_len); + +struct ice_parser_result; +int ice_parser_rt_execute(struct ice_parser_rt *rt, + struct ice_parser_result *rslt); +#endif /* _ICE_PARSER_RT_H_ */ diff --git a/drivers/thirdparty/ice/ice_parser_util.h b/drivers/thirdparty/ice/ice_parser_util.h new file mode 100644 index 000000000000..3f843a03209f --- /dev/null +++ b/drivers/thirdparty/ice/ice_parser_util.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_PARSER_UTIL_H_ +#define _ICE_PARSER_UTIL_H_ + +#include "ice_imem.h" +#include "ice_metainit.h" + +struct ice_lbl_item { + u16 idx; + char label[64]; +}; + +struct ice_pkg_sect_hdr { + __le16 count; + __le16 offset; +}; + +void ice_lbl_dump(struct ice_hw *hw, struct ice_lbl_item *item); +void ice_parse_item_dflt(struct ice_hw *hw, u16 idx, void *item, + void *data, int size); + +void *ice_parser_sect_item_get(u32 sect_type, void *section, + u32 index, u32 *offset); + +void *ice_parser_create_table(struct ice_hw *hw, u32 sect_type, + u32 item_size, u32 length, + void *(*handler)(u32 sect_type, void *section, + u32 index, u32 *offset), + void (*parse_item)(struct ice_hw *hw, u16 idx, + void *item, void *data, + int size), + bool no_offset); +#endif /* _ICE_PARSER_UTIL_H_ */ diff --git a/drivers/thirdparty/ice/ice_pg_cam.c b/drivers/thirdparty/ice/ice_pg_cam.c new file mode 100644 index 000000000000..09b70fcb738d --- /dev/null +++ b/drivers/thirdparty/ice/ice_pg_cam.c @@ -0,0 +1,376 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#include "ice_common.h" +#include "ice_parser_util.h" + +static void _pg_cam_key_dump(struct ice_hw *hw, struct ice_pg_cam_key *key) +{ + dev_info(ice_hw_to_dev(hw), "key:\n"); + dev_info(ice_hw_to_dev(hw), "\tvalid = %d\n", key->valid); + dev_info(ice_hw_to_dev(hw), "\tnode_id = %d\n", key->node_id); + dev_info(ice_hw_to_dev(hw), "\tflag0 = %d\n", key->flag0); + dev_info(ice_hw_to_dev(hw), "\tflag1 = %d\n", key->flag1); + dev_info(ice_hw_to_dev(hw), "\tflag2 = %d\n", key->flag2); + dev_info(ice_hw_to_dev(hw), "\tflag3 = %d\n", key->flag3); + dev_info(ice_hw_to_dev(hw), "\tboost_idx = %d\n", key->boost_idx); + dev_info(ice_hw_to_dev(hw), "\talu_reg = 0x%04x\n", key->alu_reg); + dev_info(ice_hw_to_dev(hw), "\tnext_proto = 0x%08x\n", + key->next_proto); +} + +static void _pg_nm_cam_key_dump(struct ice_hw *hw, + struct ice_pg_nm_cam_key *key) +{ + dev_info(ice_hw_to_dev(hw), "key:\n"); + dev_info(ice_hw_to_dev(hw), "\tvalid = %d\n", key->valid); + dev_info(ice_hw_to_dev(hw), "\tnode_id = %d\n", key->node_id); + dev_info(ice_hw_to_dev(hw), "\tflag0 = %d\n", key->flag0); + dev_info(ice_hw_to_dev(hw), "\tflag1 = %d\n", key->flag1); + dev_info(ice_hw_to_dev(hw), "\tflag2 = %d\n", key->flag2); + dev_info(ice_hw_to_dev(hw), "\tflag3 = %d\n", key->flag3); + dev_info(ice_hw_to_dev(hw), "\tboost_idx = %d\n", key->boost_idx); + dev_info(ice_hw_to_dev(hw), "\talu_reg = 0x%04x\n", key->alu_reg); +} + +static void _pg_cam_action_dump(struct ice_hw *hw, + struct ice_pg_cam_action *action) +{ + dev_info(ice_hw_to_dev(hw), "action:\n"); + dev_info(ice_hw_to_dev(hw), "\tnext_node = %d\n", action->next_node); + dev_info(ice_hw_to_dev(hw), "\tnext_pc = %d\n", action->next_pc); + dev_info(ice_hw_to_dev(hw), "\tis_pg = %d\n", action->is_pg); + dev_info(ice_hw_to_dev(hw), "\tproto_id = %d\n", action->proto_id); + dev_info(ice_hw_to_dev(hw), "\tis_mg = %d\n", action->is_mg); + dev_info(ice_hw_to_dev(hw), "\tmarker_id = %d\n", action->marker_id); + dev_info(ice_hw_to_dev(hw), "\tis_last_round = %d\n", + action->is_last_round); + dev_info(ice_hw_to_dev(hw), "\tho_polarity = %d\n", + action->ho_polarity); + dev_info(ice_hw_to_dev(hw), "\tho_inc = %d\n", action->ho_inc); +} + +/** + * ice_pg_cam_dump - dump an parse graph cam info + * @hw: pointer to the hardware structure + * @item: parse graph cam to dump + */ +void ice_pg_cam_dump(struct ice_hw *hw, struct ice_pg_cam_item *item) +{ + dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx); + _pg_cam_key_dump(hw, &item->key); + _pg_cam_action_dump(hw, &item->action); +} + +/** + * ice_pg_nm_cam_dump - dump an parse graph no match cam info + * @hw: pointer to the hardware structure + * @item: parse graph no match cam to dump + */ +void ice_pg_nm_cam_dump(struct ice_hw *hw, struct ice_pg_nm_cam_item *item) +{ + dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx); + _pg_nm_cam_key_dump(hw, &item->key); + _pg_cam_action_dump(hw, &item->action); +} + +/** The function parses a 55 bits Parse Graph CAM Action with below format: + * BIT 0-11: Next Node ID (action->next_node) + * BIT 12-19: Next PC (action->next_pc) + * BIT 20: Is Protocol Group (action->is_pg) + * BIT 21-23: reserved + * BIT 24-31: Protocol ID (action->proto_id) + * BIT 32: Is Marker Group (action->is_mg) + * BIT 33-40: Marker ID (action->marker_id) + * BIT 41: Is Last Round (action->is_last_round) + * BIT 42: Header Offset Polarity (action->ho_poloarity) + * BIT 43-51: Header Offset Inc (action->ho_inc) + * BIT 52-54: reserved + */ +static void _pg_cam_action_init(struct ice_pg_cam_action *action, u64 data) +{ + action->next_node = (u16)(data & 0x7ff); + action->next_pc = (u8)((data >> 11) & 0xff); + action->is_pg = ((data >> 19) & 0x1) != 0; + action->proto_id = ((data >> 23) & 0xff); + action->is_mg = ((data >> 31) & 0x1) != 0; + action->marker_id = ((data >> 32) & 0xff); + action->is_last_round = ((data >> 40) & 0x1) != 0; + action->ho_polarity = ((data >> 41) & 0x1) != 0; + action->ho_inc = ((data >> 42) & 0x1ff); +} + +/** The function parses a 41 bits Parse Graph NoMatch CAM Key with below format: + * BIT 0: Valid (key->valid) + * BIT 1-11: Node ID (key->node_id) + * BIT 12: Flag 0 (key->flag0) + * BIT 13: Flag 1 (key->flag1) + * BIT 14: Flag 2 (key->flag2) + * BIT 15: Flag 3 (key->flag3) + * BIT 16: Boost Hit (key->boost_idx to 0 if it is 0) + * BIT 17-24: Boost Index (key->boost_idx only if Boost Hit is not 0) + * BIT 25-40: ALU Reg (key->alu_reg) + */ +static void _pg_nm_cam_key_init(struct ice_pg_nm_cam_key *key, u64 data) +{ + key->valid = (data & 0x1) != 0; + key->node_id = (u16)((data >> 1) & 0x7ff); + key->flag0 = ((data >> 12) & 0x1) != 0; + key->flag1 = ((data >> 13) & 0x1) != 0; + key->flag2 = ((data >> 14) & 0x1) != 0; + key->flag3 = ((data >> 15) & 0x1) != 0; + if ((data >> 16) & 0x1) + key->boost_idx = (u8)((data >> 17) & 0xff); + else + key->boost_idx = 0; + key->alu_reg = (u16)((data >> 25) & 0xffff); +} + +/** The function parses a 73 bits Parse Graph CAM Key with below format: + * BIT 0: Valid (key->valid) + * BIT 1-11: Node ID (key->node_id) + * BIT 12: Flag 0 (key->flag0) + * BIT 13: Flag 1 (key->flag1) + * BIT 14: Flag 2 (key->flag2) + * BIT 15: Flag 3 (key->flag3) + * BIT 16: Boost Hit (key->boost_idx to 0 if it is 0) + * BIT 17-24: Boost Index (key->boost_idx only if Boost Hit is not 0) + * BIT 25-40: ALU Reg (key->alu_reg) + * BIT 41-72: Next Proto Key (key->next_proto) + */ +static void _pg_cam_key_init(struct ice_pg_cam_key *key, u8 *data) +{ + u64 d64 = *(u64 *)data; + + key->valid = (d64 & 0x1) != 0; + key->node_id = (u16)((d64 >> 1) & 0x7ff); + key->flag0 = ((d64 >> 12) & 0x1) != 0; + key->flag1 = ((d64 >> 13) & 0x1) != 0; + key->flag2 = ((d64 >> 14) & 0x1) != 0; + key->flag3 = ((d64 >> 15) & 0x1) != 0; + if ((d64 >> 16) & 0x1) + key->boost_idx = (u8)((d64 >> 17) & 0xff); + else + key->boost_idx = 0; + key->alu_reg = (u16)((d64 >> 25) & 0xffff); + + key->next_proto = (*(u32 *)&data[5] >> 1); + key->next_proto |= ((u32)(data[9] & 0x1) << 31); +} + +/** The function parses a 128 bits Parse Graph CAM Entry with below format: + * BIT 0-72: Key (ci->key) + * BIT 73-127: Action (ci->action) + */ +static void _pg_cam_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int size) +{ + struct ice_pg_cam_item *ci = (struct ice_pg_cam_item *)item; + u8 *buf = (u8 *)data; + u64 d64; + + ci->idx = idx; + d64 = (*(u64 *)&buf[9] >> 1); + _pg_cam_key_init(&ci->key, buf); + _pg_cam_action_init(&ci->action, d64); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_pg_cam_dump(hw, ci); +} + +/** The function parses a 136 bits Parse Graph Spill CAM Entry with below + * format: + * BIT 0-55: Action (ci->key) + * BIT 56-135: Key (ci->action) + */ +static void _pg_sp_cam_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int size) +{ + struct ice_pg_cam_item *ci = (struct ice_pg_cam_item *)item; + u8 *buf = (u8 *)data; + u64 d64; + + ci->idx = idx; + d64 = *(u64 *)buf; + _pg_cam_action_init(&ci->action, d64); + _pg_cam_key_init(&ci->key, &buf[7]); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_pg_cam_dump(hw, ci); +} + +/** The function parses a 96 bits Parse Graph NoMatch CAM Entry with below + * format: + * BIT 0-40: Key (ci->key) + * BIT 41-95: Action (ci->action) + */ +static void _pg_nm_cam_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int size) +{ + struct ice_pg_nm_cam_item *ci = (struct ice_pg_nm_cam_item *)item; + u8 *buf = (u8 *)data; + u64 d64; + + ci->idx = idx; + d64 = *(u64 *)buf; + _pg_nm_cam_key_init(&ci->key, d64); + d64 = (*(u64 *)&buf[5] >> 1); + _pg_cam_action_init(&ci->action, d64); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_pg_nm_cam_dump(hw, ci); +} + +/** The function parses a 104 bits Parse Graph NoMatch Spill CAM Entry with + * below format: + * BIT 0-55: Key (ci->key) + * BIT 56-103: Action (ci->action) + */ +static void _pg_nm_sp_cam_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int size) +{ + struct ice_pg_nm_cam_item *ci = (struct ice_pg_nm_cam_item *)item; + u8 *buf = (u8 *)data; + u64 d64; + + ci->idx = idx; + d64 = *(u64 *)buf; + _pg_cam_action_init(&ci->action, d64); + d64 = *(u64 *)&buf[7]; + _pg_nm_cam_key_init(&ci->key, d64); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_pg_nm_cam_dump(hw, ci); +} + +/** + * ice_pg_cam_table_get - create a parse graph cam table + * @hw: pointer to the hardware structure + */ +struct ice_pg_cam_item *ice_pg_cam_table_get(struct ice_hw *hw) +{ + return (struct ice_pg_cam_item *) + ice_parser_create_table(hw, ICE_SID_RXPARSER_CAM, + sizeof(struct ice_pg_cam_item), + ICE_PG_CAM_TABLE_SIZE, + ice_parser_sect_item_get, + _pg_cam_parse_item, false); +} + +/** + * ice_pg_sp_cam_table_get - create a parse graph spill cam table + * @hw: pointer to the hardware structure + */ +struct ice_pg_cam_item *ice_pg_sp_cam_table_get(struct ice_hw *hw) +{ + return (struct ice_pg_cam_item *) + ice_parser_create_table(hw, ICE_SID_RXPARSER_PG_SPILL, + sizeof(struct ice_pg_cam_item), + ICE_PG_SP_CAM_TABLE_SIZE, + ice_parser_sect_item_get, + _pg_sp_cam_parse_item, false); +} + +/** + * ice_pg_nm_cam_table_get - create a parse graph no match cam table + * @hw: pointer to the hardware structure + */ +struct ice_pg_nm_cam_item *ice_pg_nm_cam_table_get(struct ice_hw *hw) +{ + return (struct ice_pg_nm_cam_item *) + ice_parser_create_table(hw, ICE_SID_RXPARSER_NOMATCH_CAM, + sizeof(struct ice_pg_nm_cam_item), + ICE_PG_NM_CAM_TABLE_SIZE, + ice_parser_sect_item_get, + _pg_nm_cam_parse_item, false); +} + +/** + * ice_pg_nm_sp_cam_table_get - create a parse graph no match spill cam table + * @hw: pointer to the hardware structure + */ +struct ice_pg_nm_cam_item *ice_pg_nm_sp_cam_table_get(struct ice_hw *hw) +{ + return (struct ice_pg_nm_cam_item *) + ice_parser_create_table(hw, ICE_SID_RXPARSER_NOMATCH_SPILL, + sizeof(struct ice_pg_nm_cam_item), + ICE_PG_NM_SP_CAM_TABLE_SIZE, + ice_parser_sect_item_get, + _pg_nm_sp_cam_parse_item, false); +} + +static bool _pg_cam_match(struct ice_pg_cam_item *item, + struct ice_pg_cam_key *key) +{ + if (!item->key.valid || + item->key.node_id != key->node_id || + item->key.flag0 != key->flag0 || + item->key.flag1 != key->flag1 || + item->key.flag2 != key->flag2 || + item->key.flag3 != key->flag3 || + item->key.boost_idx != key->boost_idx || + item->key.alu_reg != key->alu_reg || + item->key.next_proto != key->next_proto) + return false; + + return true; +} + +static bool _pg_nm_cam_match(struct ice_pg_nm_cam_item *item, + struct ice_pg_cam_key *key) +{ + if (!item->key.valid || + item->key.node_id != key->node_id || + item->key.flag0 != key->flag0 || + item->key.flag1 != key->flag1 || + item->key.flag2 != key->flag2 || + item->key.flag3 != key->flag3 || + item->key.boost_idx != key->boost_idx || + item->key.alu_reg != key->alu_reg) + return false; + + return true; +} + +/** + * ice_pg_cam_match - search parse graph cam table by key + * @table: parse graph cam table to search + * @size: cam table size + * @key: search key + */ +struct ice_pg_cam_item *ice_pg_cam_match(struct ice_pg_cam_item *table, + int size, struct ice_pg_cam_key *key) +{ + int i; + + for (i = 0; i < size; i++) { + struct ice_pg_cam_item *item = &table[i]; + + if (_pg_cam_match(item, key)) + return item; + } + + return NULL; +} + +/** + * ice_pg_nm_cam_match - search parse graph no match cam table by key + * @table: parse graph no match cam table to search + * @size: cam table size + * @key: search key + */ +struct ice_pg_nm_cam_item * +ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size, + struct ice_pg_cam_key *key) +{ + int i; + + for (i = 0; i < size; i++) { + struct ice_pg_nm_cam_item *item = &table[i]; + + if (_pg_nm_cam_match(item, key)) + return item; + } + + return NULL; +} diff --git a/drivers/thirdparty/ice/ice_pg_cam.h b/drivers/thirdparty/ice/ice_pg_cam.h new file mode 100644 index 000000000000..36fdfc4af9e3 --- /dev/null +++ b/drivers/thirdparty/ice/ice_pg_cam.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_PG_CAM_H_ +#define _ICE_PG_CAM_H_ + +#define ICE_PG_CAM_TABLE_SIZE 2048 +#define ICE_PG_SP_CAM_TABLE_SIZE 128 +#define ICE_PG_NM_CAM_TABLE_SIZE 1024 +#define ICE_PG_NM_SP_CAM_TABLE_SIZE 64 + +struct ice_pg_cam_key { + bool valid; + u16 node_id; + bool flag0; + bool flag1; + bool flag2; + bool flag3; + u8 boost_idx; + u16 alu_reg; + u32 next_proto; +}; + +struct ice_pg_nm_cam_key { + bool valid; + u16 node_id; + bool flag0; + bool flag1; + bool flag2; + bool flag3; + u8 boost_idx; + u16 alu_reg; +}; + +struct ice_pg_cam_action { + u16 next_node; + u8 next_pc; + bool is_pg; + u8 proto_id; + bool is_mg; + u8 marker_id; + bool is_last_round; + bool ho_polarity; + u16 ho_inc; +}; + +struct ice_pg_cam_item { + u16 idx; + struct ice_pg_cam_key key; + struct ice_pg_cam_action action; +}; + +struct ice_pg_nm_cam_item { + u16 idx; + struct ice_pg_nm_cam_key key; + struct ice_pg_cam_action action; +}; + +void ice_pg_cam_dump(struct ice_hw *hw, struct ice_pg_cam_item *item); +void ice_pg_nm_cam_dump(struct ice_hw *hw, struct ice_pg_nm_cam_item *item); + +struct ice_pg_cam_item *ice_pg_cam_table_get(struct ice_hw *hw); +struct ice_pg_cam_item *ice_pg_sp_cam_table_get(struct ice_hw *hw); + +struct ice_pg_nm_cam_item *ice_pg_nm_cam_table_get(struct ice_hw *hw); +struct ice_pg_nm_cam_item *ice_pg_nm_sp_cam_table_get(struct ice_hw *hw); + +struct ice_pg_cam_item *ice_pg_cam_match(struct ice_pg_cam_item *table, + int size, struct ice_pg_cam_key *key); +struct ice_pg_nm_cam_item * +ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size, + struct ice_pg_cam_key *key); +#endif /* _ICE_PG_CAM_H_ */ diff --git a/drivers/thirdparty/ice/ice_proto_grp.c b/drivers/thirdparty/ice/ice_proto_grp.c new file mode 100644 index 000000000000..3579e698516c --- /dev/null +++ b/drivers/thirdparty/ice/ice_proto_grp.c @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#include "ice_common.h" +#include "ice_parser_util.h" + + +static void _proto_off_dump(struct ice_hw *hw, struct ice_proto_off *po, + int idx) +{ + dev_info(ice_hw_to_dev(hw), "proto %d\n", idx); + dev_info(ice_hw_to_dev(hw), "\tpolarity = %d\n", po->polarity); + dev_info(ice_hw_to_dev(hw), "\tproto_id = %d\n", po->proto_id); + dev_info(ice_hw_to_dev(hw), "\toffset = %d\n", po->offset); +} + +/** + * ice_proto_grp_dump - dump a proto group item info + * @hw: pointer to the hardware structure + * @item: proto group item to dump + */ +void ice_proto_grp_dump(struct ice_hw *hw, struct ice_proto_grp_item *item) +{ + int i; + + dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx); + + for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++) + _proto_off_dump(hw, &item->po[i], i); +} + +/** The function parses a 22 bits Protocol entry with below format: + * BIT 0: Polarity of Protocol Offset (po->polarity) + * BIT 1-8: Protocol ID (po->proto_id) + * BIT 9-11: reserved + * BIT 12-21: Protocol Offset (po->offset) + */ +static void _proto_off_parse(struct ice_proto_off *po, u32 data) +{ + po->polarity = (data & 0x1) != 0; + po->proto_id = (u8)((data >> 1) & 0xff); + po->offset = (u16)((data >> 12) & 0x3ff); +} + +/** The function parses a 192 bits Protocol Group Table entry with below + * format: + * BIT 0-21: Protocol 0 (grp->po[0]) + * BIT 22-43: Protocol 1 (grp->po[1]) + * BIT 44-65: Protocol 2 (grp->po[2]) + * BIT 66-87: Protocol 3 (grp->po[3]) + * BIT 88-109: Protocol 4 (grp->po[4]) + * BIT 110-131:Protocol 5 (grp->po[5]) + * BIT 132-153:Protocol 6 (grp->po[6]) + * BIT 154-175:Protocol 7 (grp->po[7]) + * BIT 176-191:reserved + */ +static void _proto_grp_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int size) +{ + struct ice_proto_grp_item *grp = (struct ice_proto_grp_item *)item; + u8 *buf = (u8 *)data; + u32 d32; + + grp->idx = idx; + + d32 = *(u32 *)buf; + _proto_off_parse(&grp->po[0], d32); + + d32 = (*(u32 *)&buf[2] >> 6); + _proto_off_parse(&grp->po[1], d32); + + d32 = (*(u32 *)&buf[5] >> 4); + _proto_off_parse(&grp->po[2], d32); + + d32 = (*(u32 *)&buf[8] >> 2); + _proto_off_parse(&grp->po[3], d32); + + d32 = *(u32 *)&buf[11]; + _proto_off_parse(&grp->po[4], d32); + + d32 = (*(u32 *)&buf[13] >> 6); + _proto_off_parse(&grp->po[5], d32); + + d32 = (*(u32 *)&buf[16] >> 4); + _proto_off_parse(&grp->po[6], d32); + + d32 = (*(u32 *)&buf[19] >> 2); + _proto_off_parse(&grp->po[7], d32); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_proto_grp_dump(hw, grp); +} + +/** + * ice_proto_grp_table_get - create a proto group table + * @hw: pointer to the hardware structure + */ +struct ice_proto_grp_item *ice_proto_grp_table_get(struct ice_hw *hw) +{ + return (struct ice_proto_grp_item *) + ice_parser_create_table(hw, ICE_SID_RXPARSER_PROTO_GRP, + sizeof(struct ice_proto_grp_item), + ICE_PROTO_GRP_TABLE_SIZE, + ice_parser_sect_item_get, + _proto_grp_parse_item, false); +} diff --git a/drivers/thirdparty/ice/ice_proto_grp.h b/drivers/thirdparty/ice/ice_proto_grp.h new file mode 100644 index 000000000000..d816145af888 --- /dev/null +++ b/drivers/thirdparty/ice/ice_proto_grp.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_PROTO_GRP_H_ +#define _ICE_PROTO_GRP_H_ + +#define ICE_PROTO_COUNT_PER_GRP 8 +#define ICE_PROTO_GRP_TABLE_SIZE 192 + +struct ice_proto_off { + bool polarity; /* true: positive, false: nagtive */ + u8 proto_id; + u16 offset; +}; + +struct ice_proto_grp_item { + u16 idx; + struct ice_proto_off po[ICE_PROTO_COUNT_PER_GRP]; +}; + +void ice_proto_grp_dump(struct ice_hw *hw, struct ice_proto_grp_item *item); +struct ice_proto_grp_item *ice_proto_grp_table_get(struct ice_hw *hw); +#endif /* _ICE_PROTO_GRP_H_ */ diff --git a/drivers/thirdparty/ice/ice_protocol_type.h b/drivers/thirdparty/ice/ice_protocol_type.h index 8b5861138096..26d7c7a65e92 100644 --- a/drivers/thirdparty/ice/ice_protocol_type.h +++ b/drivers/thirdparty/ice/ice_protocol_type.h @@ -30,6 +30,7 @@ enum ice_protocol_type { ICE_MAC_OFOS = 0, ICE_MAC_IL, ICE_ETYPE_OL, + ICE_ETYPE_IL, ICE_VLAN_OFOS, ICE_IPV4_OFOS, ICE_IPV4_IL, @@ -44,15 +45,16 @@ enum ice_protocol_type { ICE_VXLAN_GPE, ICE_NVGRE, ICE_GTP, + ICE_GTP_NO_PAY, ICE_PPPOE, ICE_PFCP, ICE_L2TPV3, ICE_ESP, ICE_AH, ICE_NAT_T, - ICE_GTP_NO_PAY, ICE_VLAN_EX, ICE_VLAN_IN, + ICE_FLG_DIR, ICE_PROTOCOL_LAST }; @@ -68,6 +70,10 @@ enum ice_sw_tunnel_type { ICE_SW_TUN_UDP, /* This means all "UDP" tunnel types: VXLAN-GPE, VXLAN * and GENEVE */ + ICE_SW_IPV4_TCP, + ICE_SW_IPV4_UDP, + ICE_SW_IPV6_TCP, + ICE_SW_IPV6_UDP, ICE_SW_TUN_IPV4_GTP_IPV4_TCP, ICE_SW_TUN_IPV4_GTP_IPV4_UDP, ICE_SW_TUN_IPV4_GTP_IPV6_TCP, @@ -84,6 +90,8 @@ enum ice_sw_tunnel_type { ICE_SW_TUN_GTP_IPV4_UDP, ICE_SW_TUN_GTP_IPV6_TCP, ICE_SW_TUN_GTP_IPV6_UDP, + ICE_SW_TUN_GTPU, + ICE_SW_TUN_GTPC, ICE_SW_TUN_IPV4_GTPU_IPV4, ICE_SW_TUN_IPV4_GTPU_IPV6, ICE_SW_TUN_IPV6_GTPU_IPV4, @@ -117,8 +125,11 @@ enum ice_prot_id { ICE_PROT_MPLS_IL = 29, ICE_PROT_IPV4_OF_OR_S = 32, ICE_PROT_IPV4_IL = 33, + ICE_PROT_IPV4_IL_IL = 34, ICE_PROT_IPV6_OF_OR_S = 40, ICE_PROT_IPV6_IL = 41, + ICE_PROT_IPV6_IL_IL = 42, + ICE_PROT_IPV6_NEXT_PROTO = 43, ICE_PROT_IPV6_FRAG = 47, ICE_PROT_TCP_IL = 49, ICE_PROT_UDP_OF = 52, @@ -147,10 +158,11 @@ enum ice_prot_id { #define ICE_VNI_OFFSET 12 /* offset of VNI from ICE_PROT_UDP_OF */ - +#define ICE_NAN_OFFSET 511 #define ICE_MAC_OFOS_HW 1 #define ICE_MAC_IL_HW 4 #define ICE_ETYPE_OL_HW 9 +#define ICE_ETYPE_IL_HW 10 #define ICE_VLAN_OF_HW 16 #define ICE_VLAN_OL_HW 17 #define ICE_IPV4_OFOS_HW 32 @@ -171,12 +183,15 @@ enum ice_prot_id { */ #define ICE_UDP_OF_HW 52 /* UDP Tunnels */ #define ICE_GRE_OF_HW 64 /* NVGRE */ +#define ICE_PPPOE_HW 103 #define ICE_META_DATA_ID_HW 255 /* this is used for tunnel type */ #define ICE_MDID_SIZE 2 -#define ICE_TUN_FLAG_MDID 21 -#define ICE_TUN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_TUN_FLAG_MDID) +#define ICE_TUN_FLAG_MDID 20 +#define ICE_TUN_FLAG_MDID_OFF(word) \ + (ICE_MDID_SIZE * (ICE_TUN_FLAG_MDID + (word))) #define ICE_TUN_FLAG_MASK 0xFF +#define ICE_DIR_FLAG_MASK 0x10 #define ICE_TUN_FLAG_VLAN_MASK 0x01 #define ICE_TUN_FLAG_FV_IND 2 @@ -188,7 +203,6 @@ struct ice_protocol_entry { u8 protocol_id; }; - struct ice_ether_hdr { u8 dst_addr[ETH_ALEN]; u8 src_addr[ETH_ALEN]; @@ -205,8 +219,8 @@ struct ice_ether_vlan_hdr { }; struct ice_vlan_hdr { - __be16 vlan; __be16 type; + __be16 vlan; }; struct ice_ipv4_hdr { @@ -275,7 +289,6 @@ struct ice_udp_gtp_hdr { u8 qfi; u8 rsvrd; }; - struct ice_pppoe_hdr { u8 rsrvd_ver_type; u8 rsrvd_code; @@ -315,7 +328,6 @@ struct ice_nat_t_hdr { struct ice_esp_hdr esp; }; - struct ice_nvgre { __be16 flags; __be16 protocol; diff --git a/drivers/thirdparty/ice/ice_ptp.c b/drivers/thirdparty/ice/ice_ptp.c index 7198f55cdb0d..d0ecede167ea 100644 --- a/drivers/thirdparty/ice/ice_ptp.c +++ b/drivers/thirdparty/ice/ice_ptp.c @@ -6,10 +6,11 @@ #define E810_OUT_PROP_DELAY_NS 1 +#define INITIAL_PHC_RECALC_ID 0 #define LOCKED_INCVAL_E822 0x100000000ULL -static const struct ptp_pin_desc ice_e810t_pin_desc[] = { +static const struct ptp_pin_desc ice_pin_desc_e810t[] = { /* name idx func chan */ { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } }, { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } }, @@ -18,57 +19,929 @@ static const struct ptp_pin_desc ice_e810t_pin_desc[] = { { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } }, }; +#define MAX_DPLL_NAME_LEN 4 +struct ice_dpll_desc { + char name[MAX_DPLL_NAME_LEN]; + u8 index; +}; + +static const struct ice_dpll_desc ice_e810t_dplls[] = { + /* name idx */ + { "EEC", ICE_CGU_DPLL_SYNCE }, + { "PPS", ICE_CGU_DPLL_PTP }, +}; + +struct dpll_attribute { + struct device_attribute attr; + u8 dpll_num; +}; + +static ssize_t synce_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count); + +static ssize_t pin_cfg_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len); + +static ssize_t pin_cfg_show(struct device *dev, + struct device_attribute *attr, + char *buf); + +static ssize_t dpll_1_offset_show(struct device *dev, + struct device_attribute *attr, + char *buf); + +static ssize_t dpll_name_show(struct device *dev, + struct device_attribute *attr, + char *buf); + +static ssize_t dpll_state_show(struct device *dev, + struct device_attribute *attr, + char *buf); + +static ssize_t dpll_ref_pin_show(struct device *dev, + struct device_attribute *attr, + char *buf); + +static struct kobj_attribute synce_attribute = __ATTR_WO(synce); +static DEVICE_ATTR_RW(pin_cfg); +static DEVICE_ATTR_RO(dpll_1_offset); +static struct dpll_attribute *dpll_name_attrs; +static struct dpll_attribute *dpll_state_attrs; +static struct dpll_attribute *dpll_ref_pin_attrs; + +#define DPLL_MAX_INPUT_PIN_PRIO 14 /** - * ice_enable_e810t_sma_ctrl - * @hw: pointer to the hw struct - * @ena: set true to enable and false to disable + * ice_ptp_parse_and_apply_pin_prio - parse and apply pin prio from the buffer + * @pf: pointer to a pf structure + * @argc: number of arguments to parse + * @argv: list of human readable configuration parameters * - * Enables or disable the SMA control logic + * Parse pin prio config from the split user buffer and apply it on given pin. + * Return 0 on success, negative value otherwise */ -static int ice_enable_e810t_sma_ctrl(struct ice_hw *hw, bool ena) +static int +ice_ptp_parse_and_apply_pin_prio(struct ice_pf *pf, int argc, char **argv) { - int err; - u8 data; + u8 dpll = 0, pin = 0, prio = 0; + int i, ret; - /* Set expander bits as outputs */ - err = ice_read_e810t_pca9575_reg(hw, ICE_PCA9575_P1_CFG, &data); - if (err) - return err; + for (i = 0; i < argc; i++) { + if (!strncmp(argv[i], "prio", sizeof("prio"))) + ret = kstrtou8(argv[++i], 0, &prio); + else if (!strncmp(argv[i], "dpll", sizeof("dpll"))) + ret = kstrtou8(argv[++i], 0, &dpll); + else if (!strncmp(argv[i], "pin", sizeof("pin"))) + ret = kstrtou8(argv[++i], 0, &pin); + else + ret = -EINVAL; - if (ena) - data &= (~ICE_E810T_SMA_CTRL_MASK); - else - data |= ICE_E810T_SMA_CTRL_MASK; + if (ret) + return ret; + } - return ice_write_e810t_pca9575_reg(hw, ICE_PCA9575_P1_CFG, data); + /* priority needs to be in range 0-14 */ + if (prio > DPLL_MAX_INPUT_PIN_PRIO) + return -EINVAL; + + dev_info(ice_pf_to_dev(pf), "%s: dpll: %u, pin:%u, prio:%u\n", + __func__, dpll, pin, prio); + return ice_aq_set_cgu_ref_prio(&pf->hw, dpll, pin, prio); } /** - * ice_get_e810t_sma_config + * ice_ptp_parse_and_apply_output_pin_cfg - parse and apply output pin config + * @pf: pointer to a pf structure + * @argc: number of arguments to parse + * @argv: list of human readable configuration parameters + * + * Parse and apply given configuration items in a split user buffer for the + * output pin. + * Return 0 on success, negative value otherwise + */ +static int +ice_ptp_parse_and_apply_output_pin_cfg(struct ice_pf *pf, int argc, char **argv) +{ + u8 output_idx, flags = 0, old_flags, old_src_sel; + u32 freq = 0, old_freq, old_src_freq; + struct ice_hw *hw = &pf->hw; + bool esync_en_valid = false; + bool pin_en_valid = false; + bool esync_en = false; + bool pin_en = false; + s32 phase_delay = 0; + int i, ret; + + output_idx = ICE_PTP_PIN_INVALID; + for (i = 0; i < argc; i++) { + if (!strncmp(argv[i], "pin", sizeof("pin"))) { + ret = kstrtou8(argv[++i], 0, &output_idx); + } else if (!strncmp(argv[i], "freq", sizeof("freq"))) { + ret = kstrtou32(argv[++i], 0, &freq); + flags |= ICE_AQC_SET_CGU_OUT_CFG_UPDATE_FREQ; + } else if (!strncmp(argv[i], "phase_delay", + sizeof("phase_delay"))) { + ret = kstrtos32(argv[++i], 0, &phase_delay); + flags |= ICE_AQC_SET_CGU_OUT_CFG_UPDATE_PHASE; + } else if (!strncmp(argv[i], "esync", sizeof("esync"))) { + ret = kstrtobool(argv[++i], &esync_en); + esync_en_valid = true; + } else if (!strncmp(argv[i], "enable", sizeof("enable"))) { + ret = kstrtobool(argv[++i], &pin_en); + pin_en_valid = true; + } else { + ret = -EINVAL; + } + + if (ret) + return ret; + } + + if (!esync_en_valid || !pin_en_valid) { + ret = ice_aq_get_output_pin_cfg(hw, output_idx, + &old_flags, + &old_src_sel, + &old_freq, + &old_src_freq); + if (ret) { + dev_err(ice_pf_to_dev(pf), + "Failed to read prev output pin cfg (%u:%s)", + ret, ice_aq_str(hw->adminq.sq_last_status)); + return ret; + } + } + + if (!esync_en_valid) + if (old_flags & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN) + flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN; + else + flags &= ~ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN; + else + if (esync_en) + flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN; + else + flags &= ~ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN; + + if (!pin_en_valid) + if (old_flags & ICE_AQC_SET_CGU_OUT_CFG_OUT_EN) + flags |= ICE_AQC_SET_CGU_OUT_CFG_OUT_EN; + else + flags &= ~ICE_AQC_SET_CGU_OUT_CFG_OUT_EN; + else + if (pin_en) + flags |= ICE_AQC_SET_CGU_OUT_CFG_OUT_EN; + else + flags &= ~ICE_AQC_SET_CGU_OUT_CFG_OUT_EN; + + dev_info(ice_pf_to_dev(pf), + "output pin:%u, enable: %u, freq:%u, phase_delay:%u, esync:%u, flags:%u\n", + output_idx, pin_en, freq, phase_delay, esync_en, + flags); + return ice_aq_set_output_pin_cfg(hw, output_idx, flags, + 0, freq, phase_delay); +} + +/** + * ice_ptp_parse_and_apply_input_pin_cfg - parse and apply input pin config + * @pf: pointer to a pf structure + * @argc: number of arguments to parse + * @argv: list of human readable configuration parameters + * + * Parse and apply given list of configuration items for the input pin. + * Return 0 on success, negative value otherwise + */ +static int +ice_ptp_parse_and_apply_input_pin_cfg(struct ice_pf *pf, int argc, char **argv) +{ + struct ice_aqc_get_cgu_input_config old_cfg = {0}; + u8 flags1 = 0, flags2 = 0, input_idx; + struct ice_hw *hw = &pf->hw; + bool esync_en_valid = false; + bool pin_en_valid = false; + bool esync_en = false; + bool pin_en = false; + s32 phase_delay = 0; + u32 freq = 0; + int i, ret; + + input_idx = ICE_PTP_PIN_INVALID; + for (i = 0; i < argc; i++) { + if (!strncmp(argv[i], "pin", sizeof("pin"))) { + ret = kstrtou8(argv[++i], 0, &input_idx); + } else if (!strncmp(argv[i], "freq", sizeof("freq"))) { + ret = kstrtou32(argv[++i], 0, &freq); + flags1 |= ICE_AQC_SET_CGU_IN_CFG_FLG1_UPDATE_FREQ; + } else if (!strncmp(argv[i], "phase_delay", + sizeof("phase_delay"))) { + ret = kstrtos32(argv[++i], 0, &phase_delay); + flags1 |= ICE_AQC_SET_CGU_IN_CFG_FLG1_UPDATE_DELAY; + } else if (!strncmp(argv[i], "esync", sizeof("esync"))) { + ret = kstrtobool(argv[++i], &esync_en); + esync_en_valid = true; + } else if (!strncmp(argv[i], "enable", sizeof("enable"))) { + ret = kstrtobool(argv[++i], &pin_en); + pin_en_valid = true; + } else { + ret = -EINVAL; + } + + if (ret) + return ret; + } + + if (!esync_en_valid || !pin_en_valid) { + ret = ice_aq_get_input_pin_cfg(hw, &old_cfg, input_idx); + if (ret) { + dev_err(ice_pf_to_dev(pf), + "Failed to read prev intput pin cfg (%u:%s)", + ret, ice_aq_str(hw->adminq.sq_last_status)); + return ret; + } + } + + if (flags1 == ICE_AQC_SET_CGU_IN_CFG_FLG1_UPDATE_FREQ && + !(old_cfg.flags1 & ICE_AQC_GET_CGU_IN_CFG_FLG1_ANYFREQ)) { + if (freq != ICE_PTP_PIN_FREQ_1HZ && + freq != ICE_PTP_PIN_FREQ_10MHZ) { + dev_err(ice_pf_to_dev(pf), + "Only %i or %i freq supported\n", + ICE_PTP_PIN_FREQ_1HZ, + ICE_PTP_PIN_FREQ_10MHZ); + return -EINVAL; + } + } + + if (!esync_en_valid) + if (old_cfg.flags2 & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN) + flags2 |= ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN; + else + flags2 &= ~ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN; + else + if (esync_en) + flags2 |= ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN; + else + flags2 &= ~ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN; + + if (!pin_en_valid) + if (old_cfg.flags2 & ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN) + flags2 |= ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN; + else + flags2 &= ~ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN; + else + if (pin_en) + flags2 |= ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN; + else + flags2 &= ~ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN; + + dev_info(ice_pf_to_dev(pf), + "input pin:%u, enable: %u, freq:%u, phase_delay:%u, esync:%u, flags1:%u, flags2:%u\n", + input_idx, pin_en, freq, phase_delay, esync_en, + flags1, flags2); + return ice_aq_set_input_pin_cfg(&pf->hw, input_idx, flags1, flags2, + freq, phase_delay); +} + +/** + * synce_store - sysfs interface for setting PHY recovered clock pins + * @kobj: sysfs node + * @attr: sysfs node attributes + * @buf: string representing enable and pin number + * @count: length of the 'buf' string + * + * Return number of bytes written on success or negative value on failure. + */ +static ssize_t +synce_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + unsigned int ena, phy_pin; + int status; + const char *pin_name; + struct ice_pf *pf; + u32 freq = 0; + u8 pin, phy; + int cnt; + + pf = ice_kobj_to_pf(kobj); + if (!pf) + return -EPERM; + + cnt = sscanf(buf, "%u %u", &ena, &phy_pin); + if (cnt != 2 || phy_pin >= ICE_C827_RCLK_PINS_NUM) + return -EINVAL; + + status = ice_aq_set_phy_rec_clk_out(&pf->hw, phy_pin, !!ena, &freq); + if (status) + return -EIO; + + if (ice_is_e810(&pf->hw)) { + status = ice_get_pf_c827_idx(&pf->hw, &phy); + if (status) + return -EIO; + + pin = E810T_CGU_INPUT_C827(phy, phy_pin); + pin_name = ice_zl_pin_idx_to_name_e810t(pin); + } else { + /* e822-based devices for now have only one phy available + * (from Rimmon) and only one DPLL RCLK input pin + */ + pin_name = E822_CGU_RCLK_PIN_NAME; + } + + dev_info(ice_hw_to_dev(&pf->hw), "%s recovered clock: pin %s\n", + !!ena ? "Enabled" : "Disabled", pin_name); + + return count; +} + +/** + * pin_cfg_store - sysfs interface callback for configuration of pins + * @dev: device that owns the attribute + * @attr: sysfs device attribute + * @buf: string representing configuration + * @len: length of the 'buf' string + * + * Allows set new configuration of a pin, given in a user buffer. + * Return number of bytes written on success or negative value on failure. + */ +static ssize_t pin_cfg_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct ice_pf *pf; + int argc, ret; + char **argv; + + pf = pci_get_drvdata(pdev); + if (ice_is_reset_in_progress(pf->state)) + return -EAGAIN; + + argv = argv_split(GFP_KERNEL, buf, &argc); + if (!argv) + return -ENOMEM; + + if (argc == ICE_PTP_PIN_PRIO_ARG_CNT) { + ret = ice_ptp_parse_and_apply_pin_prio(pf, argc, argv); + } else if (argc == ICE_PTP_PIN_CFG_1_ARG_CNT || + argc == ICE_PTP_PIN_CFG_2_ARG_CNT || + argc == ICE_PTP_PIN_CFG_3_ARG_CNT || + argc == ICE_PTP_PIN_CFG_4_ARG_CNT) { + if (!strncmp(argv[0], "in", sizeof("in"))) { + ret = ice_ptp_parse_and_apply_input_pin_cfg(pf, + argc - 1, + argv + 1); + } else if (!strncmp(argv[0], "out", sizeof("out"))) { + ret = ice_ptp_parse_and_apply_output_pin_cfg(pf, + argc - 1, + argv + 1); + } else { + ret = -EINVAL; + dev_dbg(ice_pf_to_dev(pf), + "%s: wrong pin direction argument:%s\n", + __func__, argv[0]); + } + } else { + ret = -EINVAL; + dev_dbg(ice_pf_to_dev(pf), + "%s: wrong number of arguments:%d\n", + __func__, argc); + } + + if (!ret) + ret = len; + argv_free(argv); + + return ret; +} + +/** + * ice_ptp_load_output_pin_cfg - load formated output pin config into buffer + * @pf: pointer to pf structure + * @buf: user buffer to fill with returned data + * @offset: added to buf pointer before first time writing to it + * @pin_num: number of output pins to be printed + * + * Acquires configuration of output pins from FW and load it into + * provided user buffer. + * Returns total number of bytes written to the buffer. + * Negative on failure. + */ +static int +ice_ptp_load_output_pin_cfg(struct ice_pf *pf, char *buf, ssize_t offset, + const u8 pin_num) +{ + u8 pin, pin_en, esync_en, dpll, flags; + struct ice_hw *hw = &pf->hw; + int count = offset; + u32 freq, src_freq; + + count += scnprintf(buf + count, PAGE_SIZE, "%s\n", "out"); + count += scnprintf(buf + count, PAGE_SIZE, + "|%4s|%8s|%5s|%11s|%6s|\n", + "pin", "enabled", "dpll", "freq", "esync"); + for (pin = 0; pin < pin_num; ++pin) { + int ret = ice_aq_get_output_pin_cfg(hw, pin, &flags, + &dpll, &freq, &src_freq); + + if (ret) { + dev_err(ice_pf_to_dev(pf), + "err:%d %s failed to read output pin cfg on pin:%u\n", + ret, ice_aq_str(hw->adminq.sq_last_status), + pin); + return ret; + } + esync_en = !!(flags & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN); + pin_en = !!(flags & ICE_AQC_GET_CGU_OUT_CFG_OUT_EN); + dpll &= ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL; + count += scnprintf(buf + count, PAGE_SIZE, + "|%4u|%8u|%5u|%11u|%6u|\n", + pin, pin_en, dpll, freq, esync_en); + } + + return count; +} + +/** + * ice_ptp_load_input_pin_cfg - load formated input pin config into buffer + * @pf: pointer to pf structure + * @buf: user buffer to fill with returned data + * @offset: added to buf pointer before first time writing to it + * @pin_num: number of input pins to be printed + * + * Acquires configuration of input pins from FW and load it into + * provided user buffer. + * Returns total number of bytes written to the buffer. + * Negative on failure. + */ +static int +ice_ptp_load_input_pin_cfg(struct ice_pf *pf, char *buf, + ssize_t offset, const u8 pin_num) +{ + u8 pin, pin_en, esync_en, esync_fail, dpll0_prio, dpll1_prio; + struct ice_aqc_get_cgu_input_config in_cfg; + struct ice_hw *hw = &pf->hw; + const char *pin_state; + int count = offset; + s32 phase_delay; + u32 freq; + + count += scnprintf(buf + count, PAGE_SIZE, "%s\n", "in"); + count += scnprintf(buf + count, PAGE_SIZE, + "|%4s|%8s|%8s|%11s|%12s|%6s|%11s|%11s|\n", + "pin", "enabled", "state", "freq", "phase_delay", + "esync", "DPLL0 prio", "DPLL1 prio"); + for (pin = 0; pin < pin_num; ++pin) { + int ret; + + memset(&in_cfg, 0, sizeof(in_cfg)); + ret = ice_aq_get_input_pin_cfg(hw, &in_cfg, pin); + if (ret) { + dev_err(ice_pf_to_dev(pf), + "err:%d %s failed to read input pin cfg on pin:%u\n", + ret, ice_aq_str(hw->adminq.sq_last_status), + pin); + return ret; + } + + ret = ice_aq_get_cgu_ref_prio(hw, ICE_CGU_DPLL_SYNCE, + pin, &dpll0_prio); + if (ret) { + dev_err(ice_pf_to_dev(pf), + "err:%d %s failed to read DPLL0 pin prio on pin:%u\n", + ret, ice_aq_str(hw->adminq.sq_last_status), + pin); + return ret; + } + + ret = ice_aq_get_cgu_ref_prio(hw, ICE_CGU_DPLL_PTP, + pin, &dpll1_prio); + if (ret) { + dev_err(ice_pf_to_dev(pf), + "err:%d %s failed to read DPLL1 pin prio on pin:%u\n", + ret, ice_aq_str(hw->adminq.sq_last_status), + pin); + return ret; + } + + esync_en = !!(in_cfg.flags2 & + ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN); + esync_fail = !!(in_cfg.status & + ICE_AQC_GET_CGU_IN_CFG_STATUS_ESYNC_FAIL); + pin_en = !!(in_cfg.flags2 & + ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN); + phase_delay = le32_to_cpu(in_cfg.phase_delay); + freq = le32_to_cpu(in_cfg.freq); + + if (in_cfg.status & ICE_CGU_IN_PIN_FAIL_FLAGS) + pin_state = ICE_DPLL_PIN_STATE_INVALID; + else if (esync_en && esync_fail) + pin_state = ICE_DPLL_PIN_STATE_INVALID; + else + pin_state = ICE_DPLL_PIN_STATE_VALID; + + count += scnprintf(buf + count, PAGE_SIZE, + "|%4u|%8u|%8s|%11u|%12d|%6u|%11u|%11u|\n", + in_cfg.input_idx, pin_en, pin_state, freq, + phase_delay, esync_en, dpll0_prio, + dpll1_prio); + } + + return count; +} + +/** + * ice_ptp_load_pin_cfg - load formated pin config into user buffer + * @pf: pointer to pf structure + * @buf: user buffer to fill with returned data + * @offset: added to buf pointer before first time writing to it + * + * Acquires configuration from FW and load it into provided buffer. + * Returns total number of bytes written to the buffer + */ +static ssize_t +ice_ptp_load_pin_cfg(struct ice_pf *pf, char *buf, ssize_t offset) +{ + struct ice_aqc_get_cgu_abilities abilities; + struct ice_hw *hw = &pf->hw; + int ret; + + ret = ice_aq_get_cgu_abilities(hw, &abilities); + if (ret) { + dev_err(ice_pf_to_dev(pf), + "err:%d %s failed to read cgu abilities\n", + ret, ice_aq_str(hw->adminq.sq_last_status)); + return ret; + } + + ret = ice_ptp_load_input_pin_cfg(pf, buf, offset, + abilities.num_inputs); + if (ret < 0) + return ret; + offset += ret; + ret = ice_ptp_load_output_pin_cfg(pf, buf, offset, + abilities.num_outputs); + if (ret < 0) + return ret; + ret += offset; + + return ret; +} + +/** + * pin_cfg_show - sysfs interface callback for reading pin_cfg file + * @dev: pointer to dev structure + * @attr: device attribute pointing sysfs file + * @buf: user buffer to fill with returned data + * + * Collect data and feed the user buffed. + * Returns total number of bytes written to the buffer + */ +static ssize_t pin_cfg_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct ice_pf *pf; + + pf = pci_get_drvdata(pdev); + + return ice_ptp_load_pin_cfg(pf, buf, 0); +} + +/** + * dpll_name_show - sysfs interface callback for reading dpll_name file + * @dev: pointer to dev structure + * @attr: device attribute pointing sysfs file + * @buf: user buffer to fill with returned data + * + * Collect data and feed the user buffed. + * Returns total number of bytes written to the buffer + */ +static ssize_t dpll_name_show(struct device __always_unused *dev, + struct device_attribute *attr, char *buf) +{ + struct dpll_attribute *dpll_attr; + u8 dpll_num; + + dpll_attr = container_of(attr, struct dpll_attribute, attr); + dpll_num = dpll_attr->dpll_num; + + if (dpll_num < ICE_CGU_DPLL_MAX) + return snprintf(buf, PAGE_SIZE, "%s\n", + ice_e810t_dplls[dpll_num].name); + + return -EINVAL; +} + +/** + * dpll_state_show - sysfs interface callback for reading dpll_state file + * @dev: pointer to dev structure + * @attr: device attribute pointing sysfs file + * @buf: user buffer to fill with returned data + * + * Collect data and feed the user buffed. + * Returns number of bytes written to the buffer or negative value on error + */ +static ssize_t dpll_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dpll_attribute *dpll_attr; + enum ice_cgu_state *dpll_state; + struct pci_dev *pdev; + struct ice_pf *pf; + ssize_t cnt; + + pdev = to_pci_dev(dev); + pf = pci_get_drvdata(pdev); + dpll_attr = container_of(attr, struct dpll_attribute, attr); + + switch (dpll_attr->dpll_num) { + case ICE_CGU_DPLL_SYNCE: + dpll_state = &pf->synce_dpll_state; + break; + case ICE_CGU_DPLL_PTP: + dpll_state = &pf->ptp_dpll_state; + break; + default: + return -EINVAL; + } + + cnt = snprintf(buf, PAGE_SIZE, "%d\n", *dpll_state); + + return cnt; +} + +/** + * dpll_ref_pin_show - sysfs callback for reading dpll_ref_pin file + * + * @dev: pointer to dev structure + * @attr: device attribute pointing sysfs file + * @buf: user buffer to fill with returned data + * + * Collect data and feed the user buffed. + * Returns number of bytes written to the buffer or negative value on error + */ +static ssize_t dpll_ref_pin_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dpll_attribute *dpll_attr; + enum ice_cgu_state *dpll_state; + struct pci_dev *pdev; + struct ice_pf *pf; + ssize_t cnt; + u8 pin; + + pdev = to_pci_dev(dev); + pf = pci_get_drvdata(pdev); + dpll_attr = container_of(attr, struct dpll_attribute, attr); + + switch (dpll_attr->dpll_num) { + case ICE_CGU_DPLL_SYNCE: + dpll_state = &pf->synce_dpll_state; + pin = pf->synce_ref_pin; + break; + case ICE_CGU_DPLL_PTP: + dpll_state = &pf->ptp_dpll_state; + pin = pf->ptp_ref_pin; + break; + default: + return -EINVAL; + } + + switch (*dpll_state) { + case ICE_CGU_STATE_LOCKED: + case ICE_CGU_STATE_LOCKED_HO_ACQ: + case ICE_CGU_STATE_HOLDOVER: + cnt = snprintf(buf, PAGE_SIZE, "%d\n", pin); + break; + default: + return -EAGAIN; + } + + return cnt; +} + +/** + * dpll_1_offset_show - sysfs interface callback for reading dpll_1_offset file + * @dev: pointer to dev structure + * @attr: device attribute pointing sysfs file + * @buf: user buffer to fill with returned data + * + * Returns number of bytes written to the buffer or negative value on error + */ +static ssize_t dpll_1_offset_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pci_dev *pdev; + struct ice_pf *pf; + + pdev = to_pci_dev(dev); + pf = pci_get_drvdata(pdev); + + return snprintf(buf, PAGE_SIZE, "%lld\n", pf->ptp_dpll_phase_offset); +} + +/** + * ice_phy_sysfs_init - initialize sysfs for DPLL + * @pf: pointer to pf structure + * + * Initialize sysfs for handling DPLL in HW. + */ +static void ice_phy_sysfs_init(struct ice_pf *pf) +{ + struct kobject *phy_kobj; + + phy_kobj = kobject_create_and_add("phy", &pf->pdev->dev.kobj); + if (!phy_kobj) { + dev_warn(ice_pf_to_dev(pf), "Failed to create PHY kobject\n"); + return; + } + + if (sysfs_create_file(phy_kobj, &synce_attribute.attr)) { + dev_warn(ice_pf_to_dev(pf), "Failed to create synce sysfs file\n"); + kobject_put(phy_kobj); + return; + } + + pf->ptp.phy_kobj = phy_kobj; +} + +/** + * ice_pin_cfg_sysfs_init - initialize sysfs for pin_cfg + * @pf: pointer to pf structure + * + * Initialize sysfs for handling pin configuration in DPLL. + */ +static void ice_pin_cfg_sysfs_init(struct ice_pf *pf) +{ + if (device_create_file(ice_pf_to_dev(pf), &dev_attr_pin_cfg)) + dev_warn(ice_pf_to_dev(pf), "Failed to create pin_cfg sysfs file\n"); +} + +/** + * ice_dpll_1_offset_init - initialize sysfs for dpll_1_offset + * @pf: pointer to pf structure + * + * Initialize sysfs for handling dpll_1_offset in DPLL. + */ +static void ice_dpll_1_offset_init(struct ice_pf *pf) +{ + if (device_create_file(ice_pf_to_dev(pf), &dev_attr_dpll_1_offset)) + dev_warn(ice_pf_to_dev(pf), + "Failed to create dpll_1_offset sysfs file\n"); +} + +/** + * ice_dpll_attrs_init - initialize sysfs for dpll_attribute + * @pf: pointer to pf structure + * @name_suffix: sysfs file name suffix + * @show: pointer to a show operation handler + * + * Helper function to allocate and initialize sysfs for dpll_attribute array + * Returns pointer to dpll_attribute struct on success, ERR_PTR on error + */ +static struct dpll_attribute * +ice_dpll_attrs_init(struct ice_pf *pf, const char *name_suffix, + ssize_t (*show)(struct device *dev, + struct device_attribute *attr, char *buf)) +{ + struct device *dev = ice_pf_to_dev(pf); + struct dpll_attribute *dpll_attr; + int err, i = 0; + char *name; + + dpll_attr = devm_kcalloc(dev, ICE_CGU_DPLL_MAX, sizeof(*dpll_attr), + GFP_KERNEL); + + if (!dpll_attr) { + err = -ENOMEM; + goto err; + } + + for (i = 0; i < ICE_CGU_DPLL_MAX; ++i) { + name = devm_kasprintf(dev, GFP_KERNEL, "dpll_%u_%s", i, + name_suffix); + if (!name) { + err = -ENOMEM; + goto err; + } + + dpll_attr[i].attr.attr.name = name; + dpll_attr[i].attr.attr.mode = 0444; + dpll_attr[i].attr.show = show; + dpll_attr[i].dpll_num = i; + + sysfs_bin_attr_init(&dpll_attr[i].attr); + err = device_create_file(dev, &dpll_attr[i].attr); + if (err) { + devm_kfree(dev, name); + goto err; + } + } + + return dpll_attr; + +err: + while (--i >= 0) { + devm_kfree(dev, (char *)dpll_attr[i].attr.attr.name); + device_remove_file(dev, &dpll_attr[i].attr); + } + + devm_kfree(dev, dpll_attr); + + dev_warn(dev, "Failed to create %s sysfs files\n", name_suffix); + return (struct dpll_attribute *)ERR_PTR(err); +} + +/** + * ice_ptp_sysfs_init - initialize sysfs for ptp and synce features + * @pf: pointer to pf structure + * + * Initialize sysfs for handling configuration of ptp and synce features. + */ +static void ice_ptp_sysfs_init(struct ice_pf *pf) +{ + if (ice_is_feature_supported(pf, ICE_F_PHY_RCLK)) + ice_phy_sysfs_init(pf); + + if (pf->hw.func_caps.ts_func_info.src_tmr_owned && + ice_is_feature_supported(pf, ICE_F_CGU)) { + ice_pin_cfg_sysfs_init(pf); + ice_dpll_1_offset_init(pf); + dpll_name_attrs = ice_dpll_attrs_init(pf, "name", + dpll_name_show); + dpll_state_attrs = ice_dpll_attrs_init(pf, "state", + dpll_state_show); + dpll_ref_pin_attrs = ice_dpll_attrs_init(pf, "ref_pin", + dpll_ref_pin_show); + } +} + +/** + * ice_ptp_sysfs_release - release sysfs resources of ptp and synce features + * @pf: pointer to pf structure + * + * Release sysfs interface resources for handling configuration of + * ptp and synce features. + */ +static void ice_ptp_sysfs_release(struct ice_pf *pf) +{ + if (pf->ptp.phy_kobj) { + sysfs_remove_file(pf->ptp.phy_kobj, &synce_attribute.attr); + kobject_put(pf->ptp.phy_kobj); + pf->ptp.phy_kobj = NULL; + } + + if (pf->hw.func_caps.ts_func_info.src_tmr_owned && + ice_is_feature_supported(pf, ICE_F_CGU)) { + struct device *dev = ice_pf_to_dev(pf); + int i; + + device_remove_file(dev, &dev_attr_pin_cfg); + device_remove_file(dev, &dev_attr_dpll_1_offset); + + for (i = 0; i < ICE_CGU_DPLL_MAX; ++i) { + if (!IS_ERR(dpll_name_attrs)) + device_remove_file(ice_pf_to_dev(pf), + &dpll_name_attrs[i].attr); + if (!IS_ERR(dpll_state_attrs)) + device_remove_file(ice_pf_to_dev(pf), + &dpll_state_attrs[i].attr); + if (!IS_ERR(dpll_ref_pin_attrs)) + device_remove_file(ice_pf_to_dev(pf), + &dpll_ref_pin_attrs[i].attr); + } + } +} + +/** + * ice_get_sma_config_e810t * @hw: pointer to the hw struct - * @ptp_pins:pointer to the ptp_pin_desc struture + * @ptp_pins: pointer to the ptp_pin_desc struture * * Read the configuration of the SMA control logic and put it into the * ptp_pin_desc structure */ static int -ice_get_e810t_sma_config(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins) +ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins) { - enum ice_status status; u8 data, i; + int status; /* Read initial pin state */ - status = ice_read_e810t_pca9575_reg(hw, ICE_PCA9575_P1_OUT, &data); + status = ice_read_sma_ctrl_e810t(hw, &data); if (status) - return ice_status_to_errno(status); + return status; /* initialize with defaults */ for (i = 0; i < NUM_E810T_PTP_PINS; i++) { snprintf(ptp_pins[i].name, sizeof(ptp_pins[i].name), - "%s", ice_e810t_pin_desc[i].name); - ptp_pins[i].index = ice_e810t_pin_desc[i].index; - ptp_pins[i].func = ice_e810t_pin_desc[i].func; - ptp_pins[i].chan = ice_e810t_pin_desc[i].chan; + "%s", ice_pin_desc_e810t[i].name); + ptp_pins[i].index = ice_pin_desc_e810t[i].index; + ptp_pins[i].func = ice_pin_desc_e810t[i].func; + ptp_pins[i].chan = ice_pin_desc_e810t[i].chan; } /* Parse SMA1/UFL1 */ @@ -121,7 +994,7 @@ ice_get_e810t_sma_config(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins) } /** - * ice_ptp_set_e810t_sma_state + * ice_ptp_set_sma_state_e810t * @hw: pointer to the hw struct * @ptp_pins: pointer to the ptp_pin_desc struture * @@ -129,26 +1002,26 @@ ice_get_e810t_sma_config(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins) * num_pins parameter */ static int -ice_ptp_set_e810t_sma_state(struct ice_hw *hw, +ice_ptp_set_sma_state_e810t(struct ice_hw *hw, const struct ptp_pin_desc *ptp_pins) { - enum ice_status status; + int status; u8 data; /* SMA1 and UFL1 cannot be set to TX at the same time */ if (ptp_pins[SMA1].func == PTP_PF_PEROUT && ptp_pins[UFL1].func == PTP_PF_PEROUT) - return ICE_ERR_PARAM; + return -EINVAL; /* SMA2 and UFL2 cannot be set to RX at the same time */ if (ptp_pins[SMA2].func == PTP_PF_EXTTS && ptp_pins[UFL2].func == PTP_PF_EXTTS) - return ICE_ERR_PARAM; + return -EINVAL; /* Read initial pin state value */ - status = ice_read_e810t_pca9575_reg(hw, ICE_PCA9575_P1_OUT, &data); + status = ice_read_sma_ctrl_e810t(hw, &data); if (status) - return ice_status_to_errno(status); + return status; /* Set the right sate based on the desired configuration */ data &= ~ICE_E810T_SMA1_CTRL_MASK; @@ -198,15 +1071,11 @@ ice_ptp_set_e810t_sma_state(struct ice_hw *hw, data |= ICE_E810T_P1_SMA2_DIR_EN; } - status = ice_write_e810t_pca9575_reg(hw, ICE_PCA9575_P1_OUT, data); - if (status) - return ice_status_to_errno(status); - - return 0; + return ice_write_sma_ctrl_e810t(hw, data); } /** - * ice_ptp_set_e810t_sma + * ice_ptp_set_sma_e810t * @info: the driver's PTP info structure * @pin: pin index in kernel structure * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT) @@ -214,7 +1083,7 @@ ice_ptp_set_e810t_sma_state(struct ice_hw *hw, * Set the configuration of a single SMA pin */ static int -ice_ptp_set_e810t_sma(struct ptp_clock_info *info, unsigned int pin, +ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin, enum ptp_pin_function func) { struct ptp_pin_desc ptp_pins[NUM_E810T_PTP_PINS]; @@ -225,7 +1094,7 @@ ice_ptp_set_e810t_sma(struct ptp_clock_info *info, unsigned int pin, if (pin < SMA1 || func > PTP_PF_PEROUT) return -EOPNOTSUPP; - err = ice_get_e810t_sma_config(hw, ptp_pins); + err = ice_get_sma_config_e810t(hw, ptp_pins); if (err) return err; @@ -243,11 +1112,28 @@ ice_ptp_set_e810t_sma(struct ptp_clock_info *info, unsigned int pin, /* Set up new pin function in the temp table */ ptp_pins[pin].func = func; - return ice_ptp_set_e810t_sma_state(hw, ptp_pins); + return ice_ptp_set_sma_state_e810t(hw, ptp_pins); } /** - * ice_e810t_verify_pin + * ice_ptp_set_gnss_e810t - Set the configuration of a GNSS pin + * @info: The driver's PTP info structure + * @func: Assigned function + */ +static int +ice_ptp_set_gnss_e810t(struct ptp_clock_info *info, enum ptp_pin_function func) +{ + struct ice_pf *pf = ptp_info_to_pf(info); + u8 input_idx, flags2; + + input_idx = ice_pin_desc_e810t[GNSS].index; + flags2 = func == PTP_PF_NONE ? 0 : ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN; + + return ice_aq_set_input_pin_cfg(&pf->hw, input_idx, 0, flags2, 0, 0); +} + +/** + * ice_verify_pin_e810t * @info: the driver's PTP info structure * @pin: Pin index * @func: Assigned function @@ -258,11 +1144,11 @@ ice_ptp_set_e810t_sma(struct ptp_clock_info *info, unsigned int pin, * desired functionality */ static int -ice_e810t_verify_pin(struct ptp_clock_info *info, unsigned int pin, +ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin, enum ptp_pin_function func, unsigned int chan) { /* Don't allow channel reassignment */ - if (chan != ice_e810t_pin_desc[pin].chan) + if (chan != ice_pin_desc_e810t[pin].chan) return -EOPNOTSUPP; /* Check if functions are properly assigned */ @@ -281,11 +1167,12 @@ ice_e810t_verify_pin(struct ptp_clock_info *info, unsigned int pin, return -EOPNOTSUPP; } - return ice_ptp_set_e810t_sma(info, pin, func); + if (pin == GNSS) + return ice_ptp_set_gnss_e810t(info, func); + else + return ice_ptp_set_sma_e810t(info, pin, func); } - - /** * mul_u128_u64_fac - Multiplies two 64bit factors to the 128b result * @a: First factor to multiply @@ -311,7 +1198,6 @@ static inline void mul_u128_u64_fac(u64 a, u64 b, u64 *hi, u64 *lo) ((a_lo * b_lo) & mask); } - /** * ice_set_tx_tstamp - Enable or disable Tx timestamping * @pf: The PF pointer to search in @@ -321,14 +1207,20 @@ static void ice_set_tx_tstamp(struct ice_pf *pf, bool on) { struct ice_vsi *vsi; u32 val; + u16 i; vsi = ice_get_main_vsi(pf); if (!vsi) return; - vsi->ptp_tx = on; + /* Set the timestamp enable flag for all the Tx rings */ + ice_for_each_txq(vsi, i) { + if (!vsi->tx_rings[i]) + continue; + vsi->tx_rings[i]->ptp_tx = on; + } - /* Enable/disable the TX timestamp interrupt */ + /* Configure the Tx timestamp interrupt */ val = rd32(&pf->hw, PFINT_OICR_ENA); if (on) val |= PFINT_OICR_TSYN_TX_M; @@ -336,10 +1228,7 @@ static void ice_set_tx_tstamp(struct ice_pf *pf, bool on) val &= ~PFINT_OICR_TSYN_TX_M; wr32(&pf->hw, PFINT_OICR_ENA, val); - if (on) - pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON; - else - pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF; + pf->ptp.tstamp_config.tx_type = on ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; } /** @@ -356,19 +1245,17 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on) if (!vsi) return; + /* Set the timestamp flag for all the Rx rings */ ice_for_each_rxq(vsi, i) { if (!vsi->rx_rings[i]) continue; vsi->rx_rings[i]->ptp_rx = on; } - if (on) - pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL; - else - pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + pf->ptp.tstamp_config.rx_filter = on ? HWTSTAMP_FILTER_ALL : + HWTSTAMP_FILTER_NONE; } - /** * ice_ptp_cfg_timestamp - Configure timestamp for init/deinit * @pf: Board private structure @@ -377,11 +1264,10 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on) * This function will configure timestamping during PTP initialization * and deinitialization */ -static void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) +void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) { ice_set_tx_tstamp(pf, ena); ice_set_rx_tstamp(pf, ena); - } /** @@ -402,9 +1288,9 @@ int ice_get_ptp_clock_index(struct ice_pf *pf) { enum ice_aqc_driver_params param_idx; struct ice_hw *hw = &pf->hw; - enum ice_status status; u8 tmr_idx; u32 value; + int err; /* Use the ptp_clock structure if we're the main PF */ if (pf->ptp.clock) @@ -416,12 +1302,10 @@ int ice_get_ptp_clock_index(struct ice_pf *pf) else param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1; - status = ice_aq_get_driver_param(hw, param_idx, &value, NULL); - if (status) { - dev_err(ice_pf_to_dev(pf), - "Failed to read PTP clock index parameter, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + err = ice_aq_get_driver_param(hw, param_idx, &value, NULL); + if (err) { + dev_err(ice_pf_to_dev(pf), "Failed to read PTP clock index parameter, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); return -1; } @@ -449,9 +1333,9 @@ static void ice_set_ptp_clock_index(struct ice_pf *pf) { enum ice_aqc_driver_params param_idx; struct ice_hw *hw = &pf->hw; - enum ice_status status; u8 tmr_idx; u32 value; + int err; if (!pf->ptp.clock) return; @@ -469,12 +1353,10 @@ static void ice_set_ptp_clock_index(struct ice_pf *pf) } value |= PTP_SHARED_CLK_IDX_VALID; - status = ice_aq_set_driver_param(hw, param_idx, value, NULL); - if (status) { - dev_err(ice_pf_to_dev(pf), - "Failed to set PTP clock index parameter, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + err = ice_aq_set_driver_param(hw, param_idx, value, NULL); + if (err) { + dev_err(ice_pf_to_dev(pf), "Failed to set PTP clock index parameter, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); } } @@ -490,8 +1372,8 @@ static void ice_clear_ptp_clock_index(struct ice_pf *pf) { enum ice_aqc_driver_params param_idx; struct ice_hw *hw = &pf->hw; - enum ice_status status; u8 tmr_idx; + int err; /* Do not clear the index if we don't own the timer */ if (!hw->func_caps.ts_func_info.src_tmr_owned) @@ -503,12 +1385,10 @@ static void ice_clear_ptp_clock_index(struct ice_pf *pf) else param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1; - status = ice_aq_set_driver_param(hw, param_idx, 0, NULL); - if (status) { - dev_dbg(ice_pf_to_dev(pf), - "Failed to clear PTP clock index parameter, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + err = ice_aq_set_driver_param(hw, param_idx, 0, NULL); + if (err) { + dev_dbg(ice_pf_to_dev(pf), "Failed to clear PTP clock index parameter, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); } } @@ -527,79 +1407,29 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts) tmr_idx = ice_get_ptp_src_clock_index(hw); /* Read the system timestamp pre PHC read */ - if (sts) - ptp_read_system_prets(sts); + ptp_read_system_prets(sts); lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); /* Read the system timestamp post PHC read */ - if (sts) - ptp_read_system_postts(sts); + ptp_read_system_postts(sts); hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx)); if (lo2 < lo) { /* if TIME_L rolled over read TIME_L again and update - *system timestamps + * system timestamps */ - if (sts) - ptp_read_system_prets(sts); + ptp_read_system_prets(sts); lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); - if (sts) - ptp_read_system_postts(sts); + ptp_read_system_postts(sts); hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); } return ((u64)hi << 32) | lo; } - -/** - * ice_ptp_update_cached_systime - Update the cached system time values - * @pf: Board specific private structure - * - * This function updates the system time values which are cached in the PF - * structure and the Rx rings. - * - * This should be called periodically at least once a second, and whenever the - * system time has been adjusted. - */ -static void ice_ptp_update_cached_systime(struct ice_pf *pf) -{ - u64 systime; - int i; - - /* Read the current system time */ - systime = ice_ptp_read_src_clk_reg(pf, NULL); - - /* Update the cached system time stored in the PF structure */ - WRITE_ONCE(pf->ptp.cached_phc_time, systime); - - ice_for_each_vsi(pf, i) { - struct ice_vsi *vsi = pf->vsi[i]; - int j; - - if (!vsi) - continue; - -#ifdef HAVE_NETDEV_SB_DEV - if (vsi->type != ICE_VSI_PF && - vsi->type != ICE_VSI_OFFLOAD_MACVLAN) - continue; -#else - if (vsi->type != ICE_VSI_PF) - continue; -#endif /* HAVE_NETDEV_SB_DEV */ - - ice_for_each_rxq(vsi, j) { - if (!vsi->rx_rings[j]) - continue; - WRITE_ONCE(vsi->rx_rings[j]->cached_systime, systime); - } - } -} - /** * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b * @cached_phc_time: recently cached copy of PHC time @@ -696,784 +1526,483 @@ static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp) static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp) { const u64 mask = GENMASK_ULL(31, 0); + unsigned long discard_time; + + /* Discard the hardware timestamp if the cached PHC time is too old */ + discard_time = pf->ptp.cached_phc_jiffies + 2 * HZ; + if (time_is_before_jiffies(discard_time)) { + pf->ptp.tx_hwtstamp_discarded++; + return 0; + } + return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time, (in_tstamp >> 8) & mask); } /** - * ice_ptp_get_ts_idx - Find the free Tx index based on current logical port - * @vsi: lport corresponding VSI - */ -int ice_ptp_get_ts_idx(struct ice_vsi *vsi) -{ - u8 own_idx_start, own_idx_end, lport, qport; - int i; - - lport = vsi->port_info->lport; - qport = lport % ICE_PORTS_PER_QUAD; - /* Check on own idx window */ - own_idx_start = qport * INDEX_PER_PORT; - own_idx_end = own_idx_start + INDEX_PER_PORT; - - for (i = own_idx_start; i < own_idx_end; i++) { - if (!test_and_set_bit(i, vsi->ptp_tx_idx)) - return i; - } - - return -1; -} - -/** - * ice_ptp_rel_all_skb - Free all pending skb waiting for timestamp - * @pf: The PF private structure - */ -static void ice_ptp_rel_all_skb(struct ice_pf *pf) -{ - struct ice_vsi *vsi; - int idx; - - vsi = ice_get_main_vsi(pf); - if (!vsi) - return; - for (idx = 0; idx < INDEX_PER_QUAD; idx++) { - if (vsi->ptp_tx_skb[idx]) { - dev_kfree_skb_any(vsi->ptp_tx_skb[idx]); - vsi->ptp_tx_skb[idx] = NULL; - } - } -} - -static const u64 txrx_lane_par_clk[NUM_ICE_PTP_LNK_SPD] = { - 31250000, /* 1G */ - 257812500, /* 10G */ - 644531250, /* 25G */ - 161132812, /* 25G RS */ - 257812500, /* 40G */ - 644531250, /* 50G */ - 644531250, /* 50G RS */ - 644531250, /* 100G RS */ -}; - -static const u64 txrx_lane_pcs_clk[NUM_ICE_PTP_LNK_SPD] = { - 125000000, /* 1G */ - 156250000, /* 10G */ - 390625000, /* 25G */ - 97656250, /* 25G RS */ - 156250000, /* 40G */ - 390625000, /* 50G */ - 644531250, /* 50G RS */ - 644531250, /* 100G RS */ -}; - -static const u64 txrx_rsgb_par_clk[NUM_ICE_PTP_LNK_SPD] = { - 0, /* 1G */ - 0, /* 10G */ - 0, /* 25G */ - 322265625, /* 25G RS */ - 0, /* 40G */ - 0, /* 50G */ - 644531250, /* 50G RS */ - 1289062500, /* 100G RS */ -}; - -static const u64 txrx_rsgb_pcs_clk[NUM_ICE_PTP_LNK_SPD] = { - 0, 0, 0, 97656250, 0, 0, 195312500, 390625000 -}; - -static const u64 rx_desk_par_pcs_clk[NUM_ICE_PTP_LNK_SPD] = { - 0, /* 1G */ - 0, /* 10G */ - 0, /* 25G */ - 0, /* 25G RS */ - 156250000, /* 40G */ - 19531250, /* 50G */ - 644531250, /* 50G RS */ - 644531250, /* 100G RS */ -}; - -/** - * ice_ptp_port_phy_set_parpcs_incval - Set PAR/PCS PHY cycle count - * @pf: Board private struct - * @port: Port we are configuring PHY for + * ice_ptp_tx_tstamp - Process Tx timestamps for a port + * @tx: the PTP Tx timestamp tracker * - * Note that this function is only expected to be called during port up and - * during a link event. + * Process timestamps captured by the PHY associated with this port. To do + * this, loop over each index with a waiting skb. + * + * If a given index has a valid timestamp, perform the following steps: + * + * 1) check that the timestamp request is not stale + * 2) check that a timestamp is ready and available in the PHY memory bank + * 3) check and clear the unread bit + * 4) read and copy the timestamp out of the PHY register + * 5) unlock the index by clearing the associated in_use bit + * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value + * 7) send this 64 bit timestamp to the stack + * + * After looping, if we still have waiting SKBs, then return true. This may + * cause us effectively poll even when not strictly necessary. We do this + * because it's possible a new timestamp was requested around the same time as + * the interrupt. In some cases hardware might not interrupt us again when the + * timestamp is captured. + * + * Note that we do not hold the tracking lock while reading the Tx timestamp. + * This is because reading the timestamp requires taking a mutex that might + * sleep. Instead, the in_use and unread bitmaps are used to prevent a race + * between this function and a thread calling ice_ptp_flush_tx_tracker. + * + * The only place where we set in_use and unread is when a new timestamp is + * initiated with a slot index. This is only called in the hard xmit routine + * where an SKB has a request flag set. The only places where we clear these + * bits are this work function or when flushing the Tx timestamp tracker. + * A timestamp index will never be re-used until the in_use bit for that index + * is cleared. + * + * The unread bitmap is used to prevent races between this thread work + * function and another thread calling ice_ptp_flush_tx_tracker. Each thread + * uses test_and_clear_bit on the unread bitmap to ensure mutual exclusion + * for cleaning up that timestamp index. If the unread bit has already been + * cleared then this thread knows that another thread is already flushing that + * timestamp. This ensures that we do not read the Tx timestamp register + * twice, and that we do not pass an invalid timestamp to the stack. + * + * If a Tx thread starts a new timestamp, we might not begin processing it + * right away but we will notice it at the end when we re-queue the task. + * + * If a Tx thread starts a new timestamp just after this function exits, the + * interrupt for that timestamp should re-trigger this function once + * a timestamp is ready. + * + * The complexity of this locking with multiple bits is necessary as it + * minimizes the time spent blocking the Tx hot path. + * + * If a Tx packet has been waiting for more than 2 seconds, it is not possible + * to correctly extend the timestamp using the cached PHC time. It is + * extremely unlikely that a packet will ever take this long to timestamp. If + * we detect a Tx timestamp request that has waited for this long we assume + * the packet will never be sent by hardware and discard it without reading + * the timestamp register. */ -static void ice_ptp_port_phy_set_parpcs_incval(struct ice_pf *pf, int port) +static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx) { - u64 cur_freq, clk_incval, uix, phy_tus; - enum ice_ptp_link_spd link_spd; - enum ice_ptp_fec_mode fec_mode; - struct ice_hw *hw = &pf->hw; - enum ice_status status; - u32 val; - - cur_freq = ice_e822_pll_freq(pf->ptp.time_ref_freq); - clk_incval = ice_ptp_read_src_incval(hw); - - status = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode); - if (status) - goto exit; - - /* UIX programming */ - /* We split a 'divide by 1e11' operation into a 'divide by 256' and a - * 'divide by 390625000' operation to be able to do the calculation - * using fixed-point math. - */ - if (link_spd == ICE_PTP_LNK_SPD_10G || - link_spd == ICE_PTP_LNK_SPD_40G) { -#define LINE_UI_10G_40G 640 /* 6600 UI at 10Gb line rate */ - uix = (cur_freq * LINE_UI_10G_40G) >> 8; - uix *= clk_incval; - uix /= 390625000; - - val = TS_LOW_M & uix; - status = ice_write_phy_reg_e822(hw, port, P_REG_UIX66_10G_40G_L, - val); - if (status) - goto exit; - val = (uix >> 32) & TS_LOW_M; - status = ice_write_phy_reg_e822(hw, port, P_REG_UIX66_10G_40G_U, - val); - if (status) - goto exit; - } else if (link_spd == ICE_PTP_LNK_SPD_25G || - link_spd == ICE_PTP_LNK_SPD_100G_RS) { -#define LINE_UI_25G_100G 256 /* 6600 UI at 25Gb line rate */ - uix = (cur_freq * LINE_UI_25G_100G) >> 8; - uix *= clk_incval; - uix /= 390625000; - - val = TS_LOW_M & uix; - status = ice_write_phy_reg_e822(hw, port, - P_REG_UIX66_25G_100G_L, val); - if (status) - goto exit; - val = (uix >> 32) & TS_LOW_M; - status = ice_write_phy_reg_e822(hw, port, - P_REG_UIX66_25G_100G_U, val); - if (status) - goto exit; - } - - if (link_spd == ICE_PTP_LNK_SPD_25G_RS) { - phy_tus = (cur_freq * clk_incval * 2) / - txrx_rsgb_par_clk[link_spd]; - val = phy_tus & TS_PHY_LOW_M; - ice_write_phy_reg_e822(hw, port, P_REG_DESK_PAR_RX_TUS_L, val); - ice_write_phy_reg_e822(hw, port, P_REG_DESK_PAR_TX_TUS_L, val); - val = (phy_tus >> 8) & TS_PHY_HIGH_M; - ice_write_phy_reg_e822(hw, port, P_REG_DESK_PAR_RX_TUS_U, val); - ice_write_phy_reg_e822(hw, port, P_REG_DESK_PAR_TX_TUS_U, val); - - phy_tus = (cur_freq * clk_incval) / - txrx_rsgb_pcs_clk[link_spd]; - val = phy_tus & TS_PHY_LOW_M; - ice_write_phy_reg_e822(hw, port, P_REG_DESK_PCS_RX_TUS_L, val); - ice_write_phy_reg_e822(hw, port, P_REG_DESK_PCS_TX_TUS_L, val); - val = (phy_tus >> 8) & TS_PHY_HIGH_M; - ice_write_phy_reg_e822(hw, port, P_REG_DESK_PCS_RX_TUS_U, val); - ice_write_phy_reg_e822(hw, port, P_REG_DESK_PCS_TX_TUS_U, val); - } else { - phy_tus = (cur_freq * clk_incval) / - txrx_lane_par_clk[link_spd]; - val = phy_tus & TS_PHY_LOW_M; - ice_write_phy_reg_e822(hw, port, P_REG_PAR_RX_TUS_L, val); - val = (phy_tus >> 8) & TS_PHY_HIGH_M; - ice_write_phy_reg_e822(hw, port, P_REG_PAR_RX_TUS_U, val); - - if (link_spd != ICE_PTP_LNK_SPD_50G_RS && - link_spd != ICE_PTP_LNK_SPD_100G_RS) { - val = phy_tus & TS_PHY_LOW_M; - ice_write_phy_reg_e822(hw, port, - P_REG_PAR_TX_TUS_L, val); - val = (phy_tus >> 8) & TS_PHY_HIGH_M; - ice_write_phy_reg_e822(hw, port, - P_REG_PAR_TX_TUS_U, val); - } else { - phy_tus = (cur_freq * clk_incval * 2) / - txrx_rsgb_par_clk[link_spd]; - val = phy_tus & TS_PHY_LOW_M; - ice_write_phy_reg_e822(hw, port, - P_REG_DESK_PAR_RX_TUS_L, val); - ice_write_phy_reg_e822(hw, port, - P_REG_DESK_PAR_TX_TUS_L, val); - val = (phy_tus >> 8) & TS_PHY_HIGH_M; - ice_write_phy_reg_e822(hw, port, - P_REG_DESK_PAR_RX_TUS_U, val); - ice_write_phy_reg_e822(hw, port, - P_REG_DESK_PAR_TX_TUS_U, val); - } - - phy_tus = (cur_freq * clk_incval) / - txrx_lane_pcs_clk[link_spd]; - val = phy_tus & TS_PHY_LOW_M; - ice_write_phy_reg_e822(hw, port, P_REG_PCS_RX_TUS_L, val); - val = (phy_tus >> 8) & TS_PHY_HIGH_M; - ice_write_phy_reg_e822(hw, port, P_REG_PCS_RX_TUS_U, val); - - if (link_spd != ICE_PTP_LNK_SPD_50G_RS && - link_spd != ICE_PTP_LNK_SPD_100G_RS) { - val = phy_tus & TS_PHY_LOW_M; - ice_write_phy_reg_e822(hw, port, P_REG_PCS_TX_TUS_L, - val); - val = (phy_tus >> 8) & TS_PHY_HIGH_M; - ice_write_phy_reg_e822(hw, port, P_REG_PCS_TX_TUS_U, - val); - } else { - phy_tus = (cur_freq * clk_incval) / - txrx_rsgb_pcs_clk[link_spd]; - val = phy_tus & TS_PHY_LOW_M; - ice_write_phy_reg_e822(hw, port, - P_REG_DESK_PCS_RX_TUS_L, val); - ice_write_phy_reg_e822(hw, port, - P_REG_DESK_PCS_TX_TUS_L, val); - val = (phy_tus >> 8) & TS_PHY_HIGH_M; - ice_write_phy_reg_e822(hw, port, - P_REG_DESK_PCS_RX_TUS_U, val); - ice_write_phy_reg_e822(hw, port, - P_REG_DESK_PCS_TX_TUS_U, val); - } - - if (link_spd == ICE_PTP_LNK_SPD_40G || - link_spd == ICE_PTP_LNK_SPD_50G) { - phy_tus = (cur_freq * clk_incval) / - rx_desk_par_pcs_clk[link_spd]; - val = phy_tus & TS_PHY_LOW_M; - ice_write_phy_reg_e822(hw, port, - P_REG_DESK_PAR_RX_TUS_L, val); - ice_write_phy_reg_e822(hw, port, - P_REG_DESK_PCS_RX_TUS_L, val); - val = (phy_tus >> 8) & TS_PHY_HIGH_M; - ice_write_phy_reg_e822(hw, port, - P_REG_DESK_PAR_RX_TUS_U, val); - ice_write_phy_reg_e822(hw, port, - P_REG_DESK_PCS_RX_TUS_U, val); - } - } - -exit: - if (status) - dev_err(ice_pf_to_dev(pf), "PTP Vernier configuration failed on port %d, status %s\n", - port, ice_stat_str(status)); -} - -/* Values of tx_offset_delay in units of 1/100th of a nanosecond */ -static const u64 tx_offset_delay[NUM_ICE_PTP_LNK_SPD] = { - 25140, /* 1G */ - 6938, /* 10G */ - 2778, /* 25G */ - 3928, /* 25G RS */ - 5666, /* 40G */ - 2778, /* 50G */ - 2095, /* 50G RS */ - 1620, /* 100G RS */ -}; - -/** - * ice_ptp_port_phy_set_tx_offset - Set PHY clock Tx timestamp offset - * @ptp_port: the PTP port we are configuring the PHY for - */ -static int ice_ptp_port_phy_set_tx_offset(struct ice_ptp_port *ptp_port) -{ - u64 cur_freq, clk_incval, offset; - enum ice_ptp_link_spd link_spd; - enum ice_status status; + struct ice_ptp_port *ptp_port; + bool ts_handled = true; struct ice_pf *pf; struct ice_hw *hw; - int port; - u32 val; + u64 tstamp_ready; + int err; + u8 idx; + if (!tx->init) + return false; + + ptp_port = container_of(tx, struct ice_ptp_port, tx); pf = ptp_port_to_pf(ptp_port); - port = ptp_port->port_num; hw = &pf->hw; - /* Get the PTP HW lock */ - if (!ice_ptp_lock(hw)) - return -EBUSY; + /* Read the Tx ready status first */ + err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); + if (err) + return false; - clk_incval = ice_ptp_read_src_incval(hw); - ice_ptp_unlock(hw); + for_each_set_bit(idx, tx->in_use, tx->len) { + struct skb_shared_hwtstamps shhwtstamps = {}; + u8 phy_idx = idx + tx->offset; + u64 raw_tstamp, tstamp; + bool drop_ts = false; + struct sk_buff *skb; + bool unread; - cur_freq = ice_e822_pll_freq(pf->ptp.time_ref_freq); + /* Drop packets which have waited for more than 2 seconds */ + if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) { + drop_ts = true; - status = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, NULL); - if (status) - goto exit; + /* Count the number of Tx timestamps that timed out */ + pf->ptp.tx_hwtstamp_timeouts++; + } - offset = cur_freq * clk_incval; - offset /= 10000; - offset *= tx_offset_delay[link_spd]; - offset /= 10000000; + /* Only read a timestamp from the PHY if its marked as ready + * by the tstamp_ready register. This avoids unnecessary + * reading of timestamps which are not yet valid. This is + * important as we must read all timestamps which are valid + * and only timestamps which are valid during each interrupt. + * If we do not, the hardware logic for generating a new + * interrupt can get stuck on some devices. + */ + if (!(tstamp_ready & BIT_ULL(phy_idx))) { + if (drop_ts) + goto skip_ts_read; + else + continue; + } - if (link_spd == ICE_PTP_LNK_SPD_1G || - link_spd == ICE_PTP_LNK_SPD_10G || - link_spd == ICE_PTP_LNK_SPD_25G || - link_spd == ICE_PTP_LNK_SPD_25G_RS || - link_spd == ICE_PTP_LNK_SPD_40G || - link_spd == ICE_PTP_LNK_SPD_50G) { - status = ice_read_phy_reg_e822(hw, port, - P_REG_PAR_PCS_TX_OFFSET_L, - &val); - if (status) - goto exit; - offset += val; - status = ice_read_phy_reg_e822(hw, port, - P_REG_PAR_PCS_TX_OFFSET_U, - &val); - if (status) - goto exit; - offset += (u64)val << 32; + spin_lock(&tx->lock); + unread = test_and_clear_bit(idx, tx->unread); + spin_unlock(&tx->lock); + + /* If another thread already initiated a Tx timestamp read, + * just skip this timestamp. That thread will flush the + * timestamp and clear in_use bit. + */ + if (!unread) + continue; + + ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx); + + err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp); + if (err) + continue; + + ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx); + + /* For e810 hardware, the tstamp_ready bitmask does not + * indicate whether a timestamp is ready. Instead, we check to + * make sure the timestamp is different from the previous + * cached value. If it is not, we need to re-read the + * timestamp later until we get a valid value. + */ + if (!drop_ts && ice_is_e810(hw) && + raw_tstamp == tx->tstamps[idx].cached_tstamp) { + /* Re-set the unread bit to allow reading again */ + spin_lock(&tx->lock); + set_bit(idx, tx->unread); + spin_unlock(&tx->lock); + continue; + } + + /* Discard any timestamp value without the valid bit set */ + if (!(raw_tstamp & ICE_PTP_TS_VALID)) + drop_ts = true; + +skip_ts_read: + spin_lock(&tx->lock); + tx->tstamps[idx].cached_tstamp = raw_tstamp; + clear_bit(idx, tx->in_use); + skb = tx->tstamps[idx].skb; + tx->tstamps[idx].skb = NULL; + spin_unlock(&tx->lock); + + if (!skb) + continue; + + if (drop_ts) { + dev_kfree_skb_any(skb); + continue; + } + + /* Extend the timestamp using cached PHC time */ + tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp); + if (tstamp) { + shhwtstamps.hwtstamp = ns_to_ktime(tstamp); + ice_trace(tx_tstamp_complete, skb, idx); + } + + skb_tstamp_tx(skb, &shhwtstamps); + dev_kfree_skb_any(skb); } - if (link_spd == ICE_PTP_LNK_SPD_50G_RS || - link_spd == ICE_PTP_LNK_SPD_100G_RS) { - status = ice_read_phy_reg_e822(hw, port, P_REG_PAR_TX_TIME_L, - &val); - if (status) - goto exit; - offset += val; - status = ice_read_phy_reg_e822(hw, port, P_REG_PAR_TX_TIME_U, - &val); - if (status) - goto exit; - offset += (u64)val << 32; - } + /* Check if we still have work to do. If so, re-queue this task to + * poll for remaining timestamps. + */ + spin_lock(&tx->lock); + if (!bitmap_empty(tx->in_use, tx->len)) + ts_handled = false; + spin_unlock(&tx->lock); - val = (u32)offset; - status = ice_write_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L, val); - if (status) - goto exit; - val = (u32)(offset >> 32); - status = ice_write_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_U, val); - if (status) - goto exit; - - status = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1); - if (status) - goto exit; - - atomic_set(&ptp_port->tx_offset_ready, 1); -exit: - if (status) - dev_err(ice_pf_to_dev(pf), - "PTP tx offset configuration failed on port %d status=%s\n", - port, ice_stat_str(status)); - return ice_status_to_errno(status); + return ts_handled; } /** - * ice_ptp_calc_pmd_adj - Calculate PMD adjustment using integers - * @cur_freq: PHY clock frequency - * @clk_incval: Source clock incval - * @calc_numerator: Value to divide - * @calc_denominator: Remainder of the division + * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps + * @tx: Tx tracking structure to initialize * - * This is the integer math calculation which attempts to avoid overflowing - * a u64. The division (in this case 1/25.78125e9) is split into two parts 125 - * and the remainder, which is the stored in calc_denominator. + * Assumes that the length has already been initialized. Do not call directly, + * use the ice_ptp_init_tx_* instead. */ -static u64 -ice_ptp_calc_pmd_adj(u64 cur_freq, u64 clk_incval, u64 calc_numerator, - u64 calc_denominator) +static int +ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx) { - u64 pmd_adj = calc_numerator; + unsigned long *in_use, *unread; + struct ice_tx_tstamp *tstamps; - pmd_adj *= cur_freq; - pmd_adj /= 125; - pmd_adj *= clk_incval; - pmd_adj /= calc_denominator; - return pmd_adj; -} + tstamps = kcalloc(tx->len, sizeof(*tstamps), GFP_KERNEL); + in_use = bitmap_zalloc(tx->len, GFP_KERNEL); + unread = bitmap_zalloc(tx->len, GFP_KERNEL); -/** - * ice_ptp_get_pmd_adj - Calculate total PMD adjustment - * @pf: Board private struct - * @port: Port we are configuring PHY for - * @cur_freq: PHY clock frequency - * @link_spd: PHY link speed - * @clk_incval: source clock incval - * @mode: FEC mode - * @pmd_adj: PMD adjustment to be calculated - */ -static int ice_ptp_get_pmd_adj(struct ice_pf *pf, int port, u64 cur_freq, - enum ice_ptp_link_spd link_spd, u64 clk_incval, - enum ice_ptp_fec_mode mode, u64 *pmd_adj) -{ - u64 calc_numerator, calc_denominator; - struct ice_hw *hw = &pf->hw; - enum ice_status status; - u32 val; - u8 pmd; + if (!tstamps || !in_use || !unread) { + kfree(tstamps); + bitmap_free(in_use); + bitmap_free(unread); - status = ice_read_phy_reg_e822(hw, port, P_REG_PMD_ALIGNMENT, &val); - if (status) - return -EIO; - - pmd = (u8)val; - - /* RS mode overrides all the other pmd_alignment calculations. */ - if (link_spd == ICE_PTP_LNK_SPD_25G_RS || - link_spd == ICE_PTP_LNK_SPD_50G_RS || - link_spd == ICE_PTP_LNK_SPD_100G_RS) { - u64 pmd_cycle_adj = 0; - u8 rx_cycle; - - if (link_spd == ICE_PTP_LNK_SPD_50G || - link_spd == ICE_PTP_LNK_SPD_50G_RS) { - ice_read_phy_reg_e822(hw, port, P_REG_RX_80_TO_160_CNT, - &val); - rx_cycle = val & P_REG_RX_80_TO_160_CNT_RXCYC_M; - } else { - ice_read_phy_reg_e822(hw, port, P_REG_RX_40_TO_160_CNT, - &val); - rx_cycle = val & P_REG_RX_40_TO_160_CNT_RXCYC_M; - } - calc_numerator = pmd; - if (pmd < 17) - calc_numerator += 40; - calc_denominator = 206250000; - - *pmd_adj = ice_ptp_calc_pmd_adj(cur_freq, clk_incval, - calc_numerator, - calc_denominator); - - if (rx_cycle != 0) { - if (link_spd == ICE_PTP_LNK_SPD_25G_RS) - calc_numerator = 4 - rx_cycle; - else if (link_spd == ICE_PTP_LNK_SPD_50G_RS) - calc_numerator = rx_cycle; - else - calc_numerator = 0; - calc_numerator *= 40; - pmd_cycle_adj = ice_ptp_calc_pmd_adj(cur_freq, - clk_incval, - calc_numerator, - calc_denominator); - } - *pmd_adj += pmd_cycle_adj; - } else { - calc_numerator = 0; - calc_denominator = 1; - if (link_spd == ICE_PTP_LNK_SPD_1G) { - if (pmd == 4) - calc_numerator = 10; - else - calc_numerator = (pmd + 6) % 10; - calc_denominator = 10000000; - } else if (link_spd == ICE_PTP_LNK_SPD_10G || - link_spd == ICE_PTP_LNK_SPD_40G) { - if (pmd != 65 || mode == ICE_PTP_FEC_MODE_CLAUSE74) { - calc_numerator = pmd; - calc_denominator = 82500000; - } - } else if (link_spd == ICE_PTP_LNK_SPD_25G) { - if (pmd != 65 || mode == ICE_PTP_FEC_MODE_CLAUSE74) { - calc_numerator = pmd; - calc_denominator = 206250000; - } - } else if (link_spd == ICE_PTP_LNK_SPD_50G) { - if (pmd != 65 || mode == ICE_PTP_FEC_MODE_CLAUSE74) { - calc_numerator = pmd * 2; - calc_denominator = 206250000; - } - } - *pmd_adj = ice_ptp_calc_pmd_adj(cur_freq, clk_incval, - calc_numerator, - calc_denominator); + return -ENOMEM; } + tx->tstamps = tstamps; + tx->in_use = in_use; + tx->unread = unread; + + spin_lock_init(&tx->lock); + + tx->init = 1; + return 0; } -/* Values of rx_offset_delay in units of 1/100th of a nanosecond */ -static const u64 rx_offset_delay[NUM_ICE_PTP_LNK_SPD] = { - 17372, /* 1G */ - 6212, /* 10G */ - 2491, /* 25G */ - 29535, /* 25G RS */ - 4244, /* 40G */ - 2868, /* 50G */ - 14524, /* 50G RS */ - 7775, /* 100G RS */ -}; - /** - * ice_ptp_port_phy_set_rx_offset - Set PHY clock Tx timestamp offset - * @ptp_port: PTP port we are configuring PHY for - */ -static int ice_ptp_port_phy_set_rx_offset(struct ice_ptp_port *ptp_port) -{ - u64 cur_freq, clk_incval, offset, pmd_adj; - enum ice_ptp_link_spd link_spd; - enum ice_ptp_fec_mode fec_mode; - enum ice_status status; - struct ice_pf *pf; - struct ice_hw *hw; - int err, port; - u32 val; - - pf = ptp_port_to_pf(ptp_port); - port = ptp_port->port_num; - hw = &pf->hw; - - /* Get the PTP HW lock */ - if (!ice_ptp_lock(hw)) { - err = -EBUSY; - goto exit; - } - - clk_incval = ice_ptp_read_src_incval(hw); - ice_ptp_unlock(hw); - - cur_freq = ice_e822_pll_freq(pf->ptp.time_ref_freq); - - status = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode); - if (status) { - err = ice_status_to_errno(status); - goto exit; - } - - offset = cur_freq * clk_incval; - offset /= 10000; - offset *= rx_offset_delay[link_spd]; - offset /= 10000000; - - status = ice_read_phy_reg_e822(hw, port, P_REG_PAR_PCS_RX_OFFSET_L, - &val); - if (status) { - err = ice_status_to_errno(status); - goto exit; - } - offset += val; - status = ice_read_phy_reg_e822(hw, port, P_REG_PAR_PCS_RX_OFFSET_U, - &val); - if (status) { - err = ice_status_to_errno(status); - goto exit; - } - offset += (u64)val << 32; - - if (link_spd == ICE_PTP_LNK_SPD_40G || - link_spd == ICE_PTP_LNK_SPD_50G || - link_spd == ICE_PTP_LNK_SPD_50G_RS || - link_spd == ICE_PTP_LNK_SPD_100G_RS) { - status = ice_read_phy_reg_e822(hw, port, P_REG_PAR_RX_TIME_L, - &val); - if (status) { - err = ice_status_to_errno(status); - goto exit; - } - offset += val; - status = ice_read_phy_reg_e822(hw, port, P_REG_PAR_RX_TIME_U, - &val); - if (status) { - err = ice_status_to_errno(status); - goto exit; - } - offset += (u64)val << 32; - } - - err = ice_ptp_get_pmd_adj(pf, port, cur_freq, link_spd, clk_incval, - fec_mode, &pmd_adj); - if (err) - goto exit; - - if (fec_mode == ICE_PTP_FEC_MODE_RS_FEC) - offset += pmd_adj; - else - offset -= pmd_adj; - - val = (u32)offset; - status = ice_write_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L, val); - if (status) { - err = ice_status_to_errno(status); - goto exit; - } - val = (u32)(offset >> 32); - status = ice_write_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_U, val); - if (status) { - err = ice_status_to_errno(status); - goto exit; - } - - status = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1); - if (status) { - err = ice_status_to_errno(status); - goto exit; - } - - atomic_set(&ptp_port->rx_offset_ready, 1); -exit: - if (err) - dev_err(ice_pf_to_dev(pf), - "PTP rx offset configuration failed on port %d, err=%d\n", - port, err); - return err; -} - -/** - * ice_ptp_port_sync_src_timer - Sync PHY timer with source timer + * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker * @pf: Board private structure - * @port: Port for which the PHY start is set - * - * Sync PHY timer with source timer after calculating and setting Tx/Rx - * Vernier offset. + * @tx: the tracker to flush */ -static enum ice_status ice_ptp_port_sync_src_timer(struct ice_pf *pf, int port) +static void +ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) { - u64 src_time = 0x0, tx_time, rx_time, temp_adj; - struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - enum ice_status status; - s64 time_adj; - u32 zo, lo; - u8 tmr_idx; + u64 tstamp_ready; + int err; + u8 idx; - /* Get the PTP HW lock */ - if (!ice_ptp_lock(hw)) { - dev_err(dev, "PTP failed to acquire semaphore\n"); - return ICE_ERR_NOT_READY; + err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); + if (err) { + dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n", + tx->block, err); + + /* If we fail to read the Tx timestamp ready bitmap just + * skip clearing the PHY timestamps. + */ + tstamp_ready = 0; } - /* Program cmd to source timer */ - ice_ptp_src_cmd(hw, READ_TIME); + for_each_set_bit(idx, tx->in_use, tx->len) { + u8 phy_idx = idx + tx->offset; + struct sk_buff *skb; + bool unread; - /* Program cmd to PHY port */ - status = ice_ptp_one_port_cmd(hw, port, READ_TIME, true); - if (status) - goto unlock; + spin_lock(&tx->lock); + unread = test_and_clear_bit(idx, tx->unread); + spin_unlock(&tx->lock); - /* Issue sync to activate commands */ - wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD); + /* If another thread has already initiated a Tx timestamp + * read, then we do not need to process it here. That thread + * will be responsible for reading the Tx timestamp and + * clearing the in_use bit appropriately. + */ + if (!unread) + continue; - tmr_idx = ice_get_ptp_src_clock_index(hw); + /* In case this timestamp is ready, we need to clear it. */ + if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx))) + ice_clear_phy_tstamp(hw, tx->block, phy_idx); - /* Read source timer SHTIME_0 and SHTIME_L */ - zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx)); - lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx)); - src_time |= (u64)lo; - src_time = (src_time << 32) | (u64)zo; + spin_lock(&tx->lock); + skb = tx->tstamps[idx].skb; + tx->tstamps[idx].skb = NULL; + clear_bit(idx, tx->in_use); + spin_unlock(&tx->lock); - /* Read Tx and Rx capture from PHY */ - status = ice_ptp_read_port_capture(hw, port, &tx_time, &rx_time); - if (status) - goto unlock; + /* Count the number of Tx timestamps flushed */ + pf->ptp.tx_hwtstamp_flushed++; - if (tx_time != rx_time) - dev_info(dev, "Port %d Rx and Tx times do not match\n", port); - - /* Calculate amount to adjust port timer and account for case where - * delta is larger/smaller than S64_MAX/S64_MIN - */ - if (src_time > tx_time) { - temp_adj = src_time - tx_time; - if (temp_adj & BIT_ULL(63)) { - time_adj = temp_adj >> 1; - } else { - time_adj = temp_adj; - /* Set to zero to indicate adjustment done */ - temp_adj = 0x0; - } - } else { - temp_adj = tx_time - src_time; - if (temp_adj & BIT_ULL(63)) { - time_adj = -(temp_adj >> 1); - } else { - time_adj = -temp_adj; - /* Set to zero to indicate adjustment done */ - temp_adj = 0x0; - } + /* Free the SKB after we've cleared the bit */ + dev_kfree_skb_any(skb); } - - status = ice_ptp_prep_port_adj_e822(hw, port, time_adj, true); - if (status) - goto unlock; - - status = ice_ptp_one_port_cmd(hw, port, ADJ_TIME, true); - if (status) - goto unlock; - - /* Issue sync to activate commands */ - wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD); - - /* Do a second adjustment if original was too large/small to fit into - * a S64 - */ - if (temp_adj) { - status = ice_ptp_prep_port_adj_e822(hw, port, time_adj, true); - if (status) - goto unlock; - - status = ice_ptp_one_port_cmd(hw, port, ADJ_TIME, true); - if (!status) - /* Issue sync to activate commands */ - wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD); - } - - /* This second register read is to flush out the port and source - * command registers. Multiple successive calls to this function - * require this - */ - - /* Program cmd to source timer */ - ice_ptp_src_cmd(hw, READ_TIME); - - /* Program cmd to PHY port */ - status = ice_ptp_one_port_cmd(hw, port, READ_TIME, true); - if (status) - goto unlock; - - /* Issue sync to activate commands */ - wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD); - - /* Read source timer SHTIME_0 and SHTIME_L */ - zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx)); - lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx)); - src_time = (u64)lo; - src_time = (src_time << 32) | (u64)zo; - - /* Read Tx and Rx capture from PHY */ - status = ice_ptp_read_port_capture(hw, port, &tx_time, &rx_time); - - if (status) - goto unlock; - dev_info(dev, "Port %d PTP synced to source 0x%016llX, 0x%016llX\n", - port, src_time, tx_time); -unlock: - ice_ptp_unlock(hw); - - if (status) - dev_err(dev, "PTP failed to sync port %d PHY time, status %s\n", - port, ice_stat_str(status)); - - return status; } /** - * ice_ptp_read_time - Read the time from the device + * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker * @pf: Board private structure - * @ts: timespec structure to hold the current time value - * @sts: Optional parameter for holding a pair of system timestamps from - * the system clock. Will be ignored if NULL is given. + * @tx: Tx tracking structure to release * - * This function reads the source clock registers and stores them in a timespec. - * However, since the registers are 64 bits of nanoseconds, we must convert the - * result to a timespec before we can return. + * Free memory associated with the Tx timestamp tracker. */ -static void ice_ptp_read_time(struct ice_pf *pf, struct timespec64 *ts, - struct ptp_system_timestamp *sts) +static void +ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) { - u64 time_ns; + tx->init = 0; - if (pf->ptp.src_tmr_mode != ICE_SRC_TMR_MODE_NANOSECONDS) { - dev_err(ice_pf_to_dev(pf), - "PTP Locked mode is not supported!\n"); - return; + ice_ptp_flush_tx_tracker(pf, tx); + + kfree(tx->tstamps); + tx->tstamps = NULL; + + bitmap_free(tx->in_use); + tx->in_use = NULL; + + bitmap_free(tx->unread); + tx->unread = NULL; + + tx->len = 0; +} + +/** + * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps + * @pf: Board private structure + * @tx: the Tx tracking structure to initialize + * @port: the port this structure tracks + * + * Initialize the Tx timestamp tracker for this port. For generic MAC devices, + * the timestamp block is shared for all ports in the same quad. To avoid + * ports using the same timestamp index, logically break the block of + * registers into chunks based on the port number. + */ +static int +ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) +{ + tx->block = port / ICE_PORTS_PER_QUAD; + tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E822; + tx->len = INDEX_PER_PORT_E822; + + return ice_ptp_alloc_tx_tracker(tx); +} + +/** + * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps + * @pf: Board private structure + * @tx: the Tx tracking structure to initialize + * + * Initialize the Tx timestamp tracker for this PF. For E810 devices, each + * port has its own block of timestamps, independent of the other ports. + */ +static int +ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx) +{ + tx->block = pf->hw.port_info->lport; + tx->offset = 0; + tx->len = INDEX_PER_PORT_E810; + + if (pf->hw.dev_caps.ts_dev_info.ts_ll_read) + tx->ll_ena = 1; + + return ice_ptp_alloc_tx_tracker(tx); +} + +/** + * ice_ptp_update_cached_phctime - Update the cached PHC time values + * @pf: Board specific private structure + * + * This function updates the system time values which are cached in the PF + * structure and the Rx rings. + * + * This function must be called periodically to ensure that the cached value + * is never more than 2 seconds old. + * + * Note that the cached copy in the PF PTP structure is always updated, even + * if we can't update the copy in the Rx rings. + * + * Return: + * * 0 - OK, successfully updated + * * -EAGAIN - PF was busy, need to reschedule the update + */ +static int ice_ptp_update_cached_phctime(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + unsigned long update_before; + u64 systime; + int i; + + update_before = pf->ptp.cached_phc_jiffies + 2 * HZ; + if (pf->ptp.cached_phc_time && + time_is_before_jiffies(update_before)) { + unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies; + + dev_warn(dev, "%u msecs passed between update to cached PHC time\n", + jiffies_to_msecs(time_taken)); + pf->ptp.late_cached_phc_updates++; } - time_ns = ice_ptp_read_src_clk_reg(pf, sts); - *ts = ns_to_timespec64(time_ns); + /* Read the current PHC time */ + systime = ice_ptp_read_src_clk_reg(pf, NULL); + + /* Update the cached PHC time stored in the PF structure */ + WRITE_ONCE(pf->ptp.cached_phc_time, systime); + WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies); + + if (test_and_set_bit(ICE_CFG_BUSY, pf->state)) + return -EAGAIN; + + ice_for_each_vsi(pf, i) { + struct ice_vsi *vsi = pf->vsi[i]; + int j; + + if (!vsi) + continue; + + if (vsi->type != ICE_VSI_PF) + continue; + if (!vsi->rx_rings) + continue; + + ice_for_each_rxq(vsi, j) { + if (!vsi->rx_rings[j]) + continue; + WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime); + } + } + + clear_bit(ICE_CFG_BUSY, pf->state); + + return 0; +} + +/** + * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update + * @pf: Board specific private structure + * + * This function must be called when the cached PHC time is no longer valid, + * such as after a time adjustment. It discards any outstanding Tx timestamps, + * and updates the cached PHC time for both the PF and Rx rings. If updating + * the PHC time cannot be done immediately, a warning message is logged and + * the work item is scheduled. + * + * These steps are required in order to ensure that we do not accidentally + * report a timestamp extended by the wrong PHC cached copy. Note that we + * do not directly update the cached timestamp here because it is possible + * this might produce an error when ICE_CFG_BUSY is set. If this occurred, we + * would have to try again. During that time window, timestamps might be + * requested and returned with an invalid extension. Thus, on failure to + * immediately update the cached PHC time we would need to zero the value + * anyways. For this reason, we just zero the value immediately and queue the + * update work item. + */ +static void ice_ptp_reset_cached_phctime(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + int err; + + /* Update the cached PHC time immediately if possible, otherwise + * schedule the work item to execute soon. + */ + err = ice_ptp_update_cached_phctime(pf); + if (err) { + /* If another thread is updating the Rx rings, we won't + * properly reset them here. This could lead to reporting of + * invalid timestamps, but there isn't much we can do. + */ + dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n", + __func__); + + /* Queue the work item to update the Rx rings when possible */ + kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, + msecs_to_jiffies(10)); + } + + /* Flush any outstanding Tx timestamps */ + ice_ptp_flush_tx_tracker(pf, &pf->ptp.port.tx); } /** @@ -1487,21 +2016,16 @@ static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts) { u64 ns = timespec64_to_ns(ts); struct ice_hw *hw = &pf->hw; - enum ice_status status; u64 val; if (pf->ptp.src_tmr_mode != ICE_SRC_TMR_MODE_NANOSECONDS) { dev_err(ice_pf_to_dev(pf), "PTP Locked mode is not supported!\n"); - return ICE_ERR_NOT_SUPPORTED; + return -EOPNOTSUPP; } val = ns; - status = ice_ptp_init_time(hw, val); - if (status) - return ice_status_to_errno(status); - - return 0; + return ice_ptp_init_time(hw, val); } /** @@ -1518,17 +2042,10 @@ static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj, bool lock_sbq) { struct ice_hw *hw = &pf->hw; - enum ice_status status; - - status = ice_ptp_adj_clock(hw, adj, lock_sbq); - if (status) - return ice_status_to_errno(status); - - return 0; + return ice_ptp_adj_clock(hw, adj, lock_sbq); } - /** * ice_ptp_get_incval - Get clock increment params * @pf: Board private structure @@ -1538,7 +2055,7 @@ ice_ptp_write_adj(struct ice_pf *pf, s32 adj, bool lock_sbq) int ice_ptp_get_incval(struct ice_pf *pf, enum ice_time_ref_freq *time_ref_freq, enum ice_src_tmr_mode *src_tmr_mode) { - *time_ref_freq = pf->ptp.time_ref_freq; + *time_ref_freq = ice_e822_time_ref(&pf->hw); *src_tmr_mode = pf->ptp.src_tmr_mode; return 0; @@ -1555,12 +2072,13 @@ int ice_ptp_get_incval(struct ice_pf *pf, enum ice_time_ref_freq *time_ref_freq, */ static u64 ice_base_incval(struct ice_pf *pf) { + struct ice_hw *hw = &pf->hw; u64 incval; - if (ice_is_e810(&pf->hw)) + if (ice_is_e810(hw)) incval = ICE_PTP_NOMINAL_INCVAL_E810; - else if (pf->ptp.time_ref_freq < NUM_ICE_TIME_REF_FREQ) - incval = ice_e822_nominal_incval(pf->ptp.time_ref_freq); + else if (ice_e822_time_ref(hw) < NUM_ICE_TIME_REF_FREQ) + incval = ice_e822_nominal_incval(ice_e822_time_ref(hw)); else incval = LOCKED_INCVAL_E822; @@ -1570,19 +2088,6 @@ static u64 ice_base_incval(struct ice_pf *pf) return incval; } -/** - * ice_ptp_reset_ts_memory_quad - Reset timestamp memory for one quad - * @pf: The PF private data structure - * @quad: The quad (0-4) - */ -static void ice_ptp_reset_ts_memory_quad(struct ice_pf *pf, int quad) -{ - struct ice_hw *hw = &pf->hw; - - ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M); - ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M); -} - /** * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state * @port: PTP port for which Tx FIFO is checked @@ -1591,30 +2096,29 @@ static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port) { int quad = port->port_num / ICE_PORTS_PER_QUAD; int offs = port->port_num % ICE_PORTS_PER_QUAD; - enum ice_status status; struct ice_pf *pf; struct ice_hw *hw; u32 val, phy_sts; + int err; pf = ptp_port_to_pf(port); hw = &pf->hw; - if (port->tx_fifo_busy_cnt == FIFO_OK) return 0; /* need to read FIFO state */ if (offs == 0 || offs == 1) - status = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO01_STATUS, - &val); + err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO01_STATUS, + &val); else - status = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO23_STATUS, - &val); + err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO23_STATUS, + &val); - if (status) { - dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, status %s\n", - port->port_num, ice_stat_str(status)); - return ice_status_to_errno(status); + if (err) { + dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, status %d\n", + port->port_num, err); + return err; } if (offs & 0x1) @@ -1636,7 +2140,7 @@ static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port) dev_dbg(ice_pf_to_dev(pf), "Port %d Tx FIFO still not empty; resetting quad %d\n", port->port_num, quad); - ice_ptp_reset_ts_memory_quad(pf, quad); + ice_ptp_reset_ts_memory_quad_e822(hw, quad); port->tx_fifo_busy_cnt = FIFO_OK; return 0; } @@ -1657,50 +2161,25 @@ static int ice_ptp_check_tx_offset_valid(struct ice_ptp_port *port) struct ice_pf *pf = ptp_port_to_pf(port); struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - enum ice_status status; u32 val; int err; - /* Check if the offset is already valid */ - if (atomic_read(&port->tx_offset_ready)) - return 0; - - /* Take the bit lock to prevent cross thread interaction */ - if (atomic_cmpxchg(&port->tx_offset_lock, false, true)) - return -EBUSY; - err = ice_ptp_check_tx_fifo(port); if (err) - goto out_unlock; + return err; - status = ice_read_phy_reg_e822(hw, port->port_num, P_REG_TX_OV_STATUS, - &val); - if (status) { - dev_err(dev, "Failed to read TX_OV_STATUS for port %d, status %s\n", - port->port_num, ice_stat_str(status)); - err = -EAGAIN; - goto out_unlock; - } - - if (!(val & P_REG_TX_OV_STATUS_OV_M)) { - err = -EAGAIN; - goto out_unlock; - } - - err = ice_ptp_port_phy_set_tx_offset(port); + err = ice_read_phy_reg_e822(hw, port->port_num, P_REG_TX_OV_STATUS, + &val); if (err) { - dev_err(dev, "Failed to set PHY Rx offset for port %d, err %d\n", + dev_err(dev, "Failed to read TX_OV_STATUS for port %d, status %d\n", port->port_num, err); - goto out_unlock; + return -EAGAIN; } - dev_info(dev, "Port %d Tx calibration complete\n", port->port_num); + if (!(val & P_REG_TX_OV_STATUS_OV_M)) + return -EAGAIN; - -out_unlock: - atomic_set(&port->tx_offset_lock, false); - - return err; + return 0; } /** @@ -1716,45 +2195,21 @@ static int ice_ptp_check_rx_offset_valid(struct ice_ptp_port *port) struct ice_pf *pf = ptp_port_to_pf(port); struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - enum ice_status status; - u32 val; int err; + u32 val; - /* Check if the offset is already valid */ - if (atomic_read(&port->rx_offset_ready)) - return 0; - - /* Take the bit lock to prevent cross thread interaction */ - if (atomic_cmpxchg(&port->rx_offset_lock, false, true)) - return -EBUSY; - - status = ice_read_phy_reg_e822(hw, port->port_num, P_REG_RX_OV_STATUS, - &val); - if (status) { - dev_err(dev, "Failed to read RX_OV_STATUS for port %d, status %s\n", - port->port_num, ice_stat_str(status)); - err = ice_status_to_errno(status); - goto out_unlock; - } - - if (!(val & P_REG_RX_OV_STATUS_OV_M)) { - err = -EAGAIN; - goto out_unlock; - } - - err = ice_ptp_port_phy_set_rx_offset(port); + err = ice_read_phy_reg_e822(hw, port->port_num, P_REG_RX_OV_STATUS, + &val); if (err) { - dev_err(dev, "Failed to set PHY Rx offset for port %d, err %d\n", + dev_err(dev, "Failed to read RX_OV_STATUS for port %d, status %d\n", port->port_num, err); - goto out_unlock; + return err; } - dev_info(dev, "Port %d Rx calibration complete\n", port->port_num); + if (!(val & P_REG_RX_OV_STATUS_OV_M)) + return -EAGAIN; -out_unlock: - atomic_set(&port->rx_offset_lock, false); - - return err; + return 0; } /** @@ -1779,171 +2234,165 @@ static int ice_ptp_check_offset_valid(struct ice_ptp_port *port) } /** - * ice_ptp_wait_for_offset_valid - Poll offset valid reg until set or timeout - * @work: Pointer to struct work_struct + * ice_ptp_wait_for_offset_valid - Check for valid Tx and Rx offsets + * @work: Pointer to the kthread_work structure for this task + * + * Check whether both the Tx and Rx offsets are valid for enabling the vernier + * calibration. + * + * Once we have valid offsets from hardware, update the total Tx and Rx + * offsets, and exit bypass mode. This enables more precise timestamps using + * the extra data measured during the vernier calibration process. */ -static void ice_ptp_wait_for_offset_valid(struct work_struct *work) +static void ice_ptp_wait_for_offset_valid(struct kthread_work *work) { struct ice_ptp_port *port; + struct device *dev; struct ice_pf *pf; - int i; + struct ice_hw *hw; + int err; - port = container_of(work, struct ice_ptp_port, ov_task); + port = container_of(work, struct ice_ptp_port, ov_work.work); pf = ptp_port_to_pf(port); + hw = &pf->hw; + dev = ice_pf_to_dev(pf); -#define OV_POLL_PERIOD_MS 10 -#define OV_POLL_ATTEMPTS 20 - for (i = 0; i < OV_POLL_ATTEMPTS; i++) { - if (atomic_read(&pf->ptp.phy_reset_lock)) - return; + if (ice_is_reset_in_progress(pf->state)) + return; - if (!ice_ptp_check_offset_valid(port)) - return; - - msleep(OV_POLL_PERIOD_MS); + if (ice_ptp_check_offset_valid(port)) { + /* Offsets not ready yet, try again later */ + kthread_queue_delayed_work(pf->ptp.kworker, + &port->ov_work, + msecs_to_jiffies(100)); + return; } + + /* Offsets are valid, so Vernier mode calculations are started */ + err = ice_phy_calc_vernier_e822(hw, port->port_num); + if (err) { + dev_warn(dev, "Failed to prepare Vernier mode for PHY port %u, status %d\n", + port->port_num, err); + return; + } + } /** - * ice_ptp_port_phy_start - Set or clear PHY start for port timestamping - * @ptp_port: PTP port for which the PHY start is set - * @phy_start: Value to be set + * ice_ptp_port_phy_stop - Stop timestamping for a PHY port + * @ptp_port: PTP port to stop */ -static int -ice_ptp_port_phy_start(struct ice_ptp_port *ptp_port, bool phy_start) +static int ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port) { struct ice_pf *pf = ptp_port_to_pf(ptp_port); u8 port = ptp_port->port_num; struct ice_hw *hw = &pf->hw; - enum ice_status status; - u32 val; + int err; + + if (ice_is_e810(hw)) + return 0; mutex_lock(&ptp_port->ps_lock); - atomic_set(&ptp_port->tx_offset_ready, 0); - atomic_set(&ptp_port->rx_offset_ready, 0); - ptp_port->tx_fifo_busy_cnt = 0; + kthread_cancel_delayed_work_sync(&ptp_port->ov_work); - status = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 0); - if (status) - goto out_unlock; - - status = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 0); - if (status) - goto out_unlock; - - status = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val); - if (status) - goto out_unlock; - - val &= ~P_REG_PS_START_M; - status = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); - if (status) - goto out_unlock; - - val &= ~P_REG_PS_ENA_CLK_M; - status = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); - if (status) - goto out_unlock; - - - if (phy_start && ptp_port->link_up) { - ice_phy_cfg_lane_e822(hw, port); - ice_ptp_port_phy_set_parpcs_incval(pf, port); - - status = ice_ptp_write_incval_locked(hw, ice_base_incval(pf)); - if (status) - goto out_unlock; - - - status = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val); - if (status) - goto out_unlock; - - val |= P_REG_PS_SFT_RESET_M; - status = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); - if (status) - goto out_unlock; - - val |= P_REG_PS_START_M; - status = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); - if (status) - goto out_unlock; - - val &= ~P_REG_PS_SFT_RESET_M; - status = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); - if (status) - goto out_unlock; - - status = ice_ptp_write_incval_locked(hw, ice_base_incval(pf)); - if (status) - goto out_unlock; - - val |= P_REG_PS_ENA_CLK_M; - status = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); - if (status) - goto out_unlock; - - val |= P_REG_PS_LOAD_OFFSET_M; - status = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); - if (status) - goto out_unlock; - - wr32(&pf->hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD); - status = ice_ptp_port_sync_src_timer(pf, port); - if (status) - goto out_unlock; - - queue_work(pf->ptp.ov_wq, &ptp_port->ov_task); - } - -out_unlock: - if (status) - dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d %s, status=%s\n", - port, phy_start ? "up" : "down", ice_stat_str(status)); + err = ice_stop_phy_timer_e822(hw, port, true); + if (err && err != -EBUSY) + dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, status=%d\n", + port, err); mutex_unlock(&ptp_port->ps_lock); - return ice_status_to_errno(status); + return err; } /** - * ice_ptp_link_change - Set or clear port registers for timestamping + * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping + * @ptp_port: PTP port for which the PHY start is set + * + * Start the PHY timestamping block, and initiate Vernier timestamping + * calibration. If timestamping cannot be calibrated (such as if link is down) + * then disable the timestamping block instead. + */ +static int ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) +{ + struct ice_pf *pf = ptp_port_to_pf(ptp_port); + u8 port = ptp_port->port_num; + struct ice_hw *hw = &pf->hw; + int err; + + if (ice_is_e810(hw)) + return 0; + + if (!ptp_port->link_up) + return ice_ptp_port_phy_stop(ptp_port); + + mutex_lock(&ptp_port->ps_lock); + + /* Start the PHY timer in Vernier mode */ + kthread_cancel_delayed_work_sync(&ptp_port->ov_work); + + /* temporarily disable Tx timestamps while calibrating PHY offset */ + ptp_port->tx.calibrating = true; + ptp_port->tx_fifo_busy_cnt = 0; + + /* Start the PHY timer in Vernier mode */ + err = ice_start_phy_timer_e822(hw, port); + if (err) + goto out_unlock; + + /* Enable Tx timestamps right away */ + ptp_port->tx.calibrating = false; + + kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0); + +out_unlock: + if (err) + dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, status=%d\n", + port, err); + + mutex_unlock(&ptp_port->ps_lock); + + return err; +} + +/** + * ice_ptp_link_change - Reconfigure PTP after link status change * @pf: Board private structure * @port: Port for which the PHY start is set * @linkup: Link is up or down */ -int ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) +void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) { - /* If PTP is not supported on this function, nothing to do */ - if (!test_bit(ICE_FLAG_PTP_ENA, pf->flags)) - return 0; + struct ice_ptp_port *ptp_port; - if (linkup && !test_bit(ICE_FLAG_PTP, pf->flags)) { - dev_err(ice_pf_to_dev(pf), "PTP not ready, failed to prepare port %d\n", - port); - return -EAGAIN; - } + if (!test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) + return; - if (port >= ICE_NUM_EXTERNAL_PORTS) - return -EINVAL; + if (WARN_ON_ONCE(port >= ICE_NUM_EXTERNAL_PORTS)) + return; - pf->ptp.port.link_up = linkup; + ptp_port = &pf->ptp.port; + if (WARN_ON_ONCE(ptp_port->port_num != port)) + return; - return ice_ptp_port_phy_start(&pf->ptp.port, linkup); -} + /* Update cached link status for this port immediately */ + ptp_port->link_up = linkup; + if (!test_bit(ICE_FLAG_PTP, pf->flags)) + return; -/** - * ice_ptp_reset_ts_memory - Reset timestamp memory for all quads - * @pf: The PF private data structure - */ -static void ice_ptp_reset_ts_memory(struct ice_pf *pf) -{ - int quad; + /* Upon link up, flush any timestamps which weren't processed before + * link changed + */ + if (linkup) + ice_ptp_flush_tx_tracker(pf, &ptp_port->tx); - quad = pf->hw.port_info->lport / ICE_PORTS_PER_QUAD; - ice_ptp_reset_ts_memory_quad(pf, quad); + /* E810 devices do not need to reconfigure the PHY */ + if (ice_is_e810(&pf->hw)) + return; + + ice_ptp_port_phy_restart(ptp_port); } /** @@ -1956,17 +2405,16 @@ static void ice_ptp_reset_ts_memory(struct ice_pf *pf) */ static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold) { - enum ice_status status = 0; struct ice_hw *hw = &pf->hw; + int err = 0; int quad; u32 val; - ice_ptp_reset_ts_memory(pf); - + ice_ptp_reset_ts_memory(hw); for (quad = 0; quad < ICE_MAX_QUAD; quad++) { - status = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, - &val); - if (status) + err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, + &val); + if (err) break; if (ena) { @@ -1978,45 +2426,25 @@ static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold) val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; } - status = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, - val); - if (status) + err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, + val); + if (err) break; } - if (status) - dev_err(ice_pf_to_dev(pf), "PTP failed in intr ena, status %s\n", - ice_stat_str(status)); - return ice_status_to_errno(status); + if (err) + dev_err(ice_pf_to_dev(pf), "PTP failed in intr ena, status %d\n", + err); + return err; } /** - * ice_ptp_reset_phy_timestamping - Reset PHY timestamp registers values + * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block * @pf: Board private structure */ static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf) { - int i; - -#define PHY_RESET_TRIES 5 -#define PHY_RESET_SLEEP_MS 5 - - for (i = 0; i < PHY_RESET_TRIES; i++) { - if (atomic_cmpxchg(&pf->ptp.phy_reset_lock, false, true)) - goto reset; - - msleep(PHY_RESET_SLEEP_MS); - } - return; - -reset: - flush_workqueue(pf->ptp.ov_wq); - ice_ptp_port_phy_start(&pf->ptp.port, false); - if (pf->ptp.port.link_up) - ice_ptp_port_phy_start(&pf->ptp.port, true); - - ice_ptp_reset_ts_memory(pf); - atomic_set(&pf->ptp.phy_reset_lock, false); + ice_ptp_port_phy_restart(&pf->ptp.port); } /** @@ -2031,7 +2459,6 @@ ice_ptp_update_incval(struct ice_pf *pf, enum ice_time_ref_freq time_ref_freq, { struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - enum ice_status status; struct timespec64 ts; s64 incval; int err; @@ -2053,27 +2480,26 @@ ice_ptp_update_incval(struct ice_pf *pf, enum ice_time_ref_freq time_ref_freq, if (!ice_ptp_lock(hw)) return -EBUSY; - status = ice_ptp_write_incval(hw, incval); - if (status) { - dev_err(dev, "PTP failed to update incval, status %s\n", - ice_stat_str(status)); - err = ice_status_to_errno(status); + err = ice_ptp_write_incval(hw, incval); + if (err) { + dev_err(dev, "PTP failed to update incval, status %d\n", err); goto err_unlock; } - pf->ptp.time_ref_freq = time_ref_freq; + ice_set_e822_time_ref(hw, time_ref_freq); pf->ptp.src_tmr_mode = src_tmr_mode; ts = ktime_to_timespec64(ktime_get_real()); err = ice_ptp_write_init(pf, &ts); if (err) { - dev_err(dev, "PTP failed to program time registers, err %d\n", - err); + ice_dev_err_errno(dev, err, + "PTP failed to program time registers"); goto err_unlock; } /* unlock PTP semaphore first before resetting PHY timestamping */ ice_ptp_unlock(hw); + ice_ptp_reset_ts_memory(hw); ice_ptp_reset_phy_timestamping(pf); return 0; @@ -2096,11 +2522,10 @@ err_unlock: static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm) { struct ice_pf *pf = ptp_info_to_pf(info); - u64 freq, divisor = 1000000ULL; struct ice_hw *hw = &pf->hw; - enum ice_status status; - s64 incval, diff; + u64 incval, diff; int neg_adj = 0; + int err; if (pf->ptp.src_tmr_mode == ICE_SRC_TMR_MODE_LOCKED) { dev_err(ice_pf_to_dev(pf), @@ -2115,26 +2540,17 @@ static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm) scaled_ppm = -scaled_ppm; } - while ((u64)scaled_ppm > div_u64(U64_MAX, incval)) { - /* handle overflow by scaling down the scaled_ppm and - * the divisor, losing some precision - */ - scaled_ppm >>= 2; - divisor >>= 2; - } - - freq = (incval * (u64)scaled_ppm) >> 16; - diff = div_u64(freq, divisor); - + diff = mul_u64_u64_div_u64(incval, (u64)scaled_ppm, + 1000000ULL << 16); if (neg_adj) incval -= diff; else incval += diff; - status = ice_ptp_write_incval_locked(hw, incval); - if (status) { - dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, status %s\n", - ice_stat_str(status)); + err = ice_ptp_write_incval_locked(hw, incval); + if (err) { + dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, status %d\n", + err); return -EIO; } @@ -2154,8 +2570,8 @@ static int ice_ptp_adjfreq(struct ptp_clock_info *info, s32 ppb) { struct ice_pf *pf = ptp_info_to_pf(info); struct ice_hw *hw = &pf->hw; - enum ice_status status; - s64 incval, freq, diff; + int status, neg_adj = 0; + u64 incval, diff; if (pf->ptp.src_tmr_mode == ICE_SRC_TMR_MODE_LOCKED) { dev_err(ice_pf_to_dev(pf), @@ -2165,14 +2581,21 @@ static int ice_ptp_adjfreq(struct ptp_clock_info *info, s32 ppb) incval = ice_base_incval(pf); - freq = incval * ppb; - diff = div_s64(freq, 1000000000ULL); - incval += diff; + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + + diff = mul_u64_u64_div_u64(incval, (u64)ppb, 1000000000ULL); + if (neg_adj) + incval -= diff; + else + incval += diff; status = ice_ptp_write_incval_locked(hw, incval); if (status) { - dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, status %s\n", - ice_stat_str(status)); + dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, status %d\n", + status); return -EIO; } @@ -2182,12 +2605,14 @@ static int ice_ptp_adjfreq(struct ptp_clock_info *info, s32 ppb) #endif /** * ice_ptp_extts_work - Workqueue task function - * @pf: Board private structure + * @work: external timestamp work structure * * Service for PTP external clock event */ -static void ice_ptp_extts_work(struct ice_pf *pf) +static void ice_ptp_extts_work(struct kthread_work *work) { + struct ice_ptp *ptp = container_of(work, struct ice_ptp, extts_work); + struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp); struct ptp_clock_event event; struct ice_hw *hw = &pf->hw; u8 chan, tmr_idx; @@ -2208,9 +2633,17 @@ static void ice_ptp_extts_work(struct ice_pf *pf) event.type = PTP_CLOCK_EXTTS; event.index = chan; - /* Fire event */ - ptp_clock_event(pf->ptp.clock, &event); pf->ptp.ext_ts_irq &= ~(1 << chan); + + /* Fire event if not filtered by CGU state */ + if (ice_is_feature_supported(pf, ICE_F_CGU) && + test_bit(ICE_FLAG_DPLL_MONITOR, pf->flags) && + test_bit(ICE_FLAG_EXTTS_FILTER, pf->flags) && + pf->ptp_dpll_state != ICE_CGU_STATE_LOCKED && + pf->ptp_dpll_state != ICE_CGU_STATE_LOCKED_HO_ACQ) + continue; + + ptp_clock_event(pf->ptp.clock, &event); } } } @@ -2294,8 +2727,8 @@ ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin, int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, struct ice_perout_channel *config, bool store) { + u64 current_time, period, start_time, phase; struct ice_hw *hw = &pf->hw; - u64 current_time, period, start_time; u32 func, val, gpio_pin; u8 tmr_idx; @@ -2313,7 +2746,7 @@ int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, /* If we're disabling the output, clear out CLKO and TGT and keep * output level low */ - if (!config || !config->ena) { + if (!config || !config->ena || !config->period) { wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0); wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0); wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0); @@ -2330,15 +2763,16 @@ int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, return 0; } period = config->period; - start_time = config->start_time; - gpio_pin = config->gpio_pin; - /* 1. Write clkout with half of required period value */ if (period & 0x1) { dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n"); goto err; } + start_time = config->start_time; + div64_u64_rem(start_time, period, &phase); + gpio_pin = config->gpio_pin; + period >>= 1; /* For proper operation, the GLTSYN_CLKO must be larger than clock tick @@ -2359,13 +2793,13 @@ int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, * maintaining phase */ if (start_time < current_time) - start_time = roundup(current_time + NSEC_PER_MSEC, - NSEC_PER_SEC) + start_time % NSEC_PER_SEC; + start_time = div64_u64(current_time + NSEC_PER_SEC - 1, + NSEC_PER_SEC) * NSEC_PER_SEC + phase; if (ice_is_e810(hw)) start_time -= E810_OUT_PROP_DELAY_NS; else - start_time -= ice_e822_pps_delay(pf->ptp.time_ref_freq); + start_time -= ice_e822_pps_delay(ice_e822_time_ref(hw)); /* 2. Write TARGET time */ wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time)); @@ -2385,7 +2819,7 @@ int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, if (store) { memcpy(&pf->ptp.perout_channels[chan], config, sizeof(struct ice_perout_channel)); - pf->ptp.perout_channels[chan].start_time %= NSEC_PER_SEC; + pf->ptp.perout_channels[chan].start_time = phase; } return 0; @@ -2394,6 +2828,41 @@ err: return -EFAULT; } +/** + * ice_ptp_disable_all_clkout - Disable all currently configured outputs + * @pf: pointer to the PF structure + * + * Disable all currently configured clock outputs. This is necessary before + * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_clkout to + * re-enable the clocks again. + */ +static void ice_ptp_disable_all_clkout(struct ice_pf *pf) +{ + int i; + + for (i = 0; i < pf->ptp.info.n_per_out; i++) + if (pf->ptp.perout_channels[i].ena) + ice_ptp_cfg_clkout(pf, i, NULL, false); +} + +/** + * ice_ptp_enable_all_clkout - Enable all configured periodic clock outputs + * @pf: pointer to the PF structure + * + * Enable all currently configured clock outputs. Use this after + * ice_ptp_disable_all_clkout to reconfigure the output signals according to + * their configuration. + */ +static void ice_ptp_enable_all_clkout(struct ice_pf *pf) +{ + int i; + + for (i = 0; i < pf->ptp.info.n_per_out; i++) + if (pf->ptp.perout_channels[i].ena) + ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i], + false); +} + /** * ice_ptp_gettimex64 - Get the time of the clock * @info: the driver's PTP info structure @@ -2409,15 +2878,16 @@ ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts, struct ptp_system_timestamp *sts) { struct ice_pf *pf = ptp_info_to_pf(info); - struct ice_hw *hw = &pf->hw; + u64 time_ns; - if (!ice_ptp_lock(hw)) { - dev_err(ice_pf_to_dev(pf), "PTP failed to get time\n"); - return -EBUSY; + if (pf->ptp.src_tmr_mode != ICE_SRC_TMR_MODE_NANOSECONDS) { + dev_err(ice_pf_to_dev(pf), + "PTP Locked mode is not supported!\n"); + return -EIO; } + time_ns = ice_ptp_read_src_clk_reg(pf, sts); - ice_ptp_read_time(pf, ts, sts); - ice_ptp_unlock(hw); + *ts = ns_to_timespec64(time_ns); return 0; } @@ -2458,6 +2928,7 @@ static int ice_ptp_gettime32(struct ptp_clock_info *info, struct timespec *ts) #endif /* !HAVE_PTP_CLOCK_INFO_GETTIME64 */ #endif /* !HAVE_PTP_CLOCK_INFO_GETTIMEX64 */ + /** * ice_ptp_settime64 - Set the time of the clock * @info: the driver's PTP info structure @@ -2472,14 +2943,13 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) struct ice_pf *pf = ptp_info_to_pf(info); struct timespec64 ts64 = *ts; struct ice_hw *hw = &pf->hw; - u8 i; int err; /* For Vernier mode, we need to recalibrate after new settime * Start with disabling timestamp block */ if (pf->ptp.port.link_up) - ice_ptp_port_phy_start(&pf->ptp.port, false); + ice_ptp_port_phy_stop(&pf->ptp.port); if (!ice_ptp_lock(hw)) { err = -EBUSY; @@ -2487,25 +2957,30 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) } /* Disable periodic outputs */ - for (i = 0; i < info->n_per_out; i++) - if (pf->ptp.perout_channels[i].ena) - ice_ptp_cfg_clkout(pf, i, NULL, false); + ice_ptp_disable_all_clkout(pf); err = ice_ptp_write_init(pf, &ts64); ice_ptp_unlock(hw); if (!err) - ice_ptp_update_cached_systime(pf); + ice_ptp_reset_cached_phctime(pf); /* Reenable periodic outputs */ - for (i = 0; i < info->n_per_out; i++) - if (pf->ptp.perout_channels[i].ena) - ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i], - false); + ice_ptp_enable_all_clkout(pf); /* Recalibrate and re-enable timestamp block */ if (pf->ptp.port.link_up) - ice_ptp_port_phy_start(&pf->ptp.port, true); + ice_ptp_port_phy_restart(&pf->ptp.port); + + /* Recalibrate and re-enable timestamp block for each PTP port */ + pf->phc_recalc++; + err = ice_aq_set_driver_param(hw, ICE_AQC_DRIVER_PARAM_PHC_RECALC, + pf->phc_recalc, NULL); + if (err) { + dev_err(ice_pf_to_dev(pf), + "Failed to force PHC_RECALC, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); + } exit: if (err) { dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err); @@ -2541,15 +3016,17 @@ ice_ptp_settime32(struct ptp_clock_info *info, const struct timespec *ts) static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta) { struct timespec64 now, then; + int ret; then = ns_to_timespec64(delta); - ice_ptp_gettimex64(info, &now, NULL); + ret = ice_ptp_gettimex64(info, &now, NULL); + if (ret) + return ret; now = timespec64_add(now, then); return ice_ptp_settime64(info, (const struct timespec64 *)&now); } - /** * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta * @info: the driver's PTP info structure @@ -2561,7 +3038,6 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta) struct ice_hw *hw = &pf->hw; struct device *dev; int err; - u8 i; dev = ice_pf_to_dev(pf); @@ -2585,42 +3061,34 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta) } /* Disable periodic outputs */ - for (i = 0; i < info->n_per_out; i++) - if (pf->ptp.perout_channels[i].ena) - ice_ptp_cfg_clkout(pf, i, NULL, false); + ice_ptp_disable_all_clkout(pf); err = ice_ptp_write_adj(pf, delta, true); /* Reenable periodic outputs */ - for (i = 0; i < info->n_per_out; i++) - if (pf->ptp.perout_channels[i].ena) - ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i], - false); + ice_ptp_enable_all_clkout(pf); ice_ptp_unlock(hw); - /* Check error after restarting periodic outputs and releasing the PTP - * hardware lock. - */ if (err) { - dev_err(dev, "PTP failed to adjust time, err %d\n", err); + ice_dev_err_errno(dev, err, "PTP failed to adjust time"); return err; } - ice_ptp_update_cached_systime(pf); + ice_ptp_reset_cached_phctime(pf); return 0; } /** - * ice_ptp_gpio_enable_e822 - Enable/disable ancillary features of PHC + * ice_ptp_gpio_enable_generic - Enable/disable ancillary features of PHC * @info: the driver's PTP info structure * @rq: The requested feature to change * @on: Enable/disable flag */ static int -ice_ptp_gpio_enable_e822(struct ptp_clock_info *info, - struct ptp_clock_request *rq, int on) +ice_ptp_gpio_enable_generic(struct ptp_clock_info *info, + struct ptp_clock_request *rq, int on) { struct ice_pf *pf = ptp_info_to_pf(info); struct ice_perout_channel clk_cfg = {0}; @@ -2667,13 +3135,19 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info, switch (rq->type) { case PTP_CLK_REQ_PEROUT: chan = rq->perout.index; - if (ice_is_e810t(&pf->hw)) { - if (chan == ice_e810t_pin_desc[SMA1].chan) + if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { + if (chan == ice_pin_desc_e810t[SMA1].chan) clk_cfg.gpio_pin = GPIO_20; - else if (chan == ice_e810t_pin_desc[SMA2].chan) + else if (chan == ice_pin_desc_e810t[SMA2].chan) clk_cfg.gpio_pin = GPIO_22; else return -1; + } else if (ice_is_feature_supported(pf, + ICE_F_FIXED_TIMING_PINS)) { + if (chan == 0) + clk_cfg.gpio_pin = GPIO_20; + else + clk_cfg.gpio_pin = GPIO_22; } else if (chan == PPS_CLK_GEN_CHAN) { clk_cfg.gpio_pin = PPS_PIN_INDEX; } else { @@ -2690,11 +3164,18 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info, break; case PTP_CLK_REQ_EXTTS: chan = rq->extts.index; - if (ice_is_e810t(&pf->hw)) { + + if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { if (chan < 2) gpio_pin = GPIO_21; else gpio_pin = GPIO_23; + } else if (ice_is_feature_supported(pf, + ICE_F_FIXED_TIMING_PINS)) { + if (chan == 0) + gpio_pin = GPIO_21; + else + gpio_pin = GPIO_23; } else { gpio_pin = chan; } @@ -2709,6 +3190,38 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info, return err; } +/** + * ice_ptp_gpio_enable_e823 - Enable/disable ancillary features of PHC + * @info: the driver's PTP info structure + * @rq: The requested feature to change + * @on: Enable/disable flag + */ +static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info, + struct ptp_clock_request *rq, int on) +{ + struct ice_pf *pf = ptp_info_to_pf(info); + struct ice_perout_channel clk_cfg = {0}; + int err; + + switch (rq->type) { + case PTP_CLK_REQ_PPS: + clk_cfg.gpio_pin = PPS_PIN_INDEX; + clk_cfg.period = NSEC_PER_SEC; + clk_cfg.ena = !!on; + + err = ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true); + break; + case PTP_CLK_REQ_EXTTS: + err = ice_ptp_cfg_extts(pf, !!on, rq->extts.index, + TIME_SYNC_PIN_INDEX, rq->extts.flags); + break; + default: + return -EOPNOTSUPP; + } + + return err; +} + #ifdef HAVE_PTP_CROSSTIMESTAMP /** * ice_ptp_get_syncdevicetime - Get the cross time stamp info @@ -2729,11 +3242,21 @@ ice_ptp_get_syncdevicetime(ktime_t *device, u32 hh_lock, hh_art_ctl; int i; - /* Get the HW lock */ - hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); +#define MAX_HH_HW_LOCK_TRIES 5 +#define MAX_HH_CTL_LOCK_TRIES 100 + + for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) { + /* Get the HW lock */ + hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); + if (hh_lock & PFHH_SEM_BUSY_M) { + usleep_range(10000, 15000); + continue; + } + break; + } if (hh_lock & PFHH_SEM_BUSY_M) { dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n"); - return -EFAULT; + return -EBUSY; } /* Start the ART and device clock sync sequence */ @@ -2741,9 +3264,7 @@ ice_ptp_get_syncdevicetime(ktime_t *device, hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M; wr32(hw, GLHH_ART_CTL, hh_art_ctl); -#define MAX_HH_LOCK_TRIES 100 - - for (i = 0; i < MAX_HH_LOCK_TRIES; i++) { + for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) { /* Wait for sync to complete */ hh_art_ctl = rd32(hw, GLHH_ART_CTL); if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) { @@ -2772,14 +3293,14 @@ ice_ptp_get_syncdevicetime(ktime_t *device, hh_lock = hh_lock & ~PFHH_SEM_BUSY_M; wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock); - if (i == MAX_HH_LOCK_TRIES) + if (i == MAX_HH_CTL_LOCK_TRIES) return -ETIMEDOUT; return 0; } /** - * ice_ptp_getcrosststamp_e822 - Capture a device cross timestamp + * ice_ptp_getcrosststamp_generic - Capture a device cross timestamp * @info: the driver's PTP info structure * @cts: The memory to fill the cross timestamp info * @@ -2794,10 +3315,11 @@ ice_ptp_get_syncdevicetime(ktime_t *device, * CPU must have X86_FEATURE_TSC_KNOWN_FREQ. */ static int -ice_ptp_getcrosststamp_e822(struct ptp_clock_info *info, - struct system_device_crosststamp *cts) +ice_ptp_getcrosststamp_generic(struct ptp_clock_info *info, + struct system_device_crosststamp *cts) { struct ice_pf *pf = ptp_info_to_pf(info); + return get_device_system_crosststamp(ice_ptp_get_syncdevicetime, pf, NULL, cts); } @@ -2897,257 +3419,13 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) if (err) return err; - /* Save these settings for future reference */ - pf->ptp.tstamp_config = config; + /* Return the actual configuration set */ + config = pf->ptp.tstamp_config; return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; } -/** - * ice_ptp_get_tx_hwtstamp_ver - Returns the Tx timestamp and valid bits - * @pf: Board specific private structure - * @tx_idx_req: Bitmap of timestamp indices to read - * @quad: Quad to read - * @ts: Timestamps read from PHY - * @ts_read: On return, if non-NULL: bitmap of read timestamp indices - * - * Read the value of the Tx timestamp from the registers and build a - * bitmap of successfully read indices and count of the number successfully - * read. - * - * There are 3 possible return values, - * 0 = success - * - * -EIO = unable to read a register, this could be to a variety of issues but - * should be very rare. Up to caller how to respond to this (retry, abandon, - * etc). But once this situation occurs, stop reading as we cannot - * guarantee what state the PHY or Timestamp Unit is in. - * - * -EINVAL = (at least) one of the timestamps that was read did not have the - * TS_VALID bit set, and is probably zero. Be aware that not all of the - * timestamps that were read (so the TS_READY bit for this timestamp was - * cleared but no valid TS was retrieved) are present. Expect at least one - * ts_read index that should be 1 is zero. - */ -static int ice_ptp_get_tx_hwtstamp_ver(struct ice_pf *pf, u64 tx_idx_req, - u8 quad, u64 *ts, u64 *ts_read) -{ - struct device *dev = ice_pf_to_dev(pf); - struct ice_hw *hw = &pf->hw; - enum ice_status status; - unsigned long i; - u64 ts_ns; - - - for_each_set_bit(i, (unsigned long *)&tx_idx_req, INDEX_PER_QUAD) { - ts[i] = 0x0; - - status = ice_read_phy_tstamp(hw, quad, i, &ts_ns); - if (status) { - dev_dbg(dev, "PTP Tx read failed, status %s\n", - ice_stat_str(status)); - return ice_status_to_errno(status); - } - - if (ts_read) - *ts_read |= BIT(i); - - if (!(ts_ns & ICE_PTP_TS_VALID)) { - dev_dbg(dev, "PTP tx invalid\n"); - continue; - } - - ts_ns = ice_ptp_extend_40b_ts(pf, ts_ns); - /* Each timestamp will be offset in the array of - * timestamps by the index's value. So the timestamp - * from index n will be in ts[n] position. - */ - ts[i] = ts_ns; - } - - return 0; -} - - -/** - * ice_ptp_get_tx_hwtstamp_ready - Get the Tx timestamp ready bitmap - * @pf: The PF private data structure - * @quad: Quad to read (0-4) - * @ts_ready: Bitmap where each bit set indicates that the corresponding - * timestamp register is ready to read - * - * Read the PHY timestamp ready registers for a particular bank. - */ -static void -ice_ptp_get_tx_hwtstamp_ready(struct ice_pf *pf, u8 quad, u64 *ts_ready) -{ - struct device *dev = ice_pf_to_dev(pf); - struct ice_hw *hw = &pf->hw; - enum ice_status status; - u64 bitmap; - u32 val; - - status = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_U, - &val); - if (status) { - dev_dbg(dev, "TX_MEMORY_STATUS_U read failed for quad %u\n", - quad); - return; - } - - bitmap = val; - - status = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_L, - &val); - if (status) { - dev_dbg(dev, "TX_MEMORY_STATUS_L read failed for quad %u\n", - quad); - return; - } - - bitmap = (bitmap << 32) | val; - - *ts_ready = bitmap; - -} - -/** - * ice_ptp_tx_hwtstamp_vsi - Return the Tx timestamp for a specified VSI - * @vsi: lport corresponding VSI - * @idx: Index of timestamp read from QUAD memory - * @hwtstamp: Timestamps read from PHY - * - * Helper function for ice_ptp_tx_hwtstamp. - */ -static void -ice_ptp_tx_hwtstamp_vsi(struct ice_vsi *vsi, int idx, u64 hwtstamp) -{ - struct skb_shared_hwtstamps shhwtstamps = {}; - struct sk_buff *skb; - - skb = vsi->ptp_tx_skb[idx]; - if (!skb) - return; - - shhwtstamps.hwtstamp = ns_to_ktime(hwtstamp); - - vsi->ptp_tx_skb[idx] = NULL; - - /* Notify the stack and free the skb after we've unlocked */ - skb_tstamp_tx(skb, &shhwtstamps); - dev_kfree_skb_any(skb); - clear_bit(idx, vsi->ptp_tx_idx); -} - -/** - * ice_ptp_tx_hwtstamp - Return the Tx timestamps - * @pf: Board private structure - * - * Read the tx_memory_status registers for the PHY timestamp block. Determine - * which entries contain a valid ready timestamp. Read out the timestamp from - * the table. Convert the 40b timestamp value into the 64b nanosecond value - * consumed by the stack, and then report it as part of the related skb's - * shhwtstamps structure. - * - * Note that new timestamps might come in while we're reading the timestamp - * block. However, no interrupts will be triggered until the intr_threshold is - * crossed again. Thus we read the status registers in a loop until no more - * timestamps are ready. - */ -static void ice_ptp_tx_hwtstamp(struct ice_pf *pf) -{ - u8 quad, lport, qport; - struct ice_vsi *vsi; - int msk_shft; - u64 rdy_msk; - - vsi = ice_get_main_vsi(pf); - if (!vsi) - return; - - lport = vsi->port_info->lport; - qport = lport % ICE_PORTS_PER_QUAD; - quad = lport / ICE_PORTS_PER_QUAD; - msk_shft = qport * INDEX_PER_PORT; - rdy_msk = GENMASK_ULL(msk_shft + INDEX_PER_PORT - 1, msk_shft); - - while (true) { - u64 ready_map = 0, valid_map = 0; - u64 hwtstamps[INDEX_PER_QUAD]; - int i, ret; - - ice_ptp_get_tx_hwtstamp_ready(pf, quad, &ready_map); - ready_map &= rdy_msk; - if (!ready_map) - break; - - ret = ice_ptp_get_tx_hwtstamp_ver(pf, ready_map, quad, - hwtstamps, &valid_map); - if (ret == -EIO) - break; - - for_each_set_bit(i, (unsigned long *)&valid_map, INDEX_PER_QUAD) - if (test_bit(i, vsi->ptp_tx_idx)) - ice_ptp_tx_hwtstamp_vsi(vsi, i, hwtstamps[i]); - } -} - -/** - * ice_ptp_tx_hwtstamp_ext - Return the Tx timestamp - * @pf: Board private structure - * - * Read the value of the Tx timestamp from the registers, convert it into - * a value consumable by the stack, and store that result into the shhwtstamps - * struct before returning it up the stack. - */ -static void ice_ptp_tx_hwtstamp_ext(struct ice_pf *pf) -{ - struct ice_hw *hw = &pf->hw; - struct ice_vsi *vsi; - u8 lport; - int idx; - - vsi = ice_get_main_vsi(pf); - if (!vsi || !vsi->ptp_tx) - return; - lport = hw->port_info->lport; - - /* Don't attempt to timestamp if we don't have an skb */ - for (idx = 0; idx < INDEX_PER_QUAD; idx++) { - struct skb_shared_hwtstamps shhwtstamps = {}; - enum ice_status status; - struct sk_buff *skb; - u64 ts_ns; - - skb = vsi->ptp_tx_skb[idx]; - if (!skb) - continue; - - status = ice_read_phy_tstamp(hw, lport, idx, &ts_ns); - if (status) { - dev_err(ice_pf_to_dev(pf), "PTP tx rd failed, status %s\n", - ice_stat_str(status)); - vsi->ptp_tx_skb[idx] = NULL; - dev_kfree_skb_any(skb); - clear_bit(idx, vsi->ptp_tx_idx); - } - - ts_ns = ice_ptp_extend_40b_ts(pf, ts_ns); - - shhwtstamps.hwtstamp = ns_to_ktime(ts_ns); - - vsi->ptp_tx_skb[idx] = NULL; - - /* Notify the stack and free the skb after - * we've unlocked - */ - skb_tstamp_tx(skb, &shhwtstamps); - dev_kfree_skb_any(skb); - clear_bit(idx, vsi->ptp_tx_idx); - } -} - /** * ice_ptp_rx_hwtstamp - Check for an Rx timestamp * @rx_ring: Ring to get the VSI info @@ -3157,48 +3435,90 @@ static void ice_ptp_tx_hwtstamp_ext(struct ice_pf *pf) * The driver receives a notification in the receive descriptor with timestamp. * The timestamp is in ns, so we must convert the result first. */ -void ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring, - union ice_32b_rx_flex_desc *rx_desc, - struct sk_buff *skb) +void +ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { + struct skb_shared_hwtstamps *hwtstamps; + u64 ts_ns, cached_time; u32 ts_high; - u64 ts_ns; - /* Populate timesync data into skb */ - if (rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID) { - struct skb_shared_hwtstamps *hwtstamps; + if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID)) + return; - /* Use ice_ptp_extend_32b_ts directly, using the ring-specific - * cached PHC value, rather than accessing the PF. This also - * allows us to simply pass the upper 32bits of nanoseconds - * directly. Calling ice_ptp_extend_40b_ts is unnecessary as - * it would just discard these bits itself. - */ - ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high); - ts_ns = ice_ptp_extend_32b_ts(rx_ring->cached_systime, ts_high); + cached_time = READ_ONCE(rx_ring->cached_phctime); - hwtstamps = skb_hwtstamps(skb); - memset(hwtstamps, 0, sizeof(*hwtstamps)); - hwtstamps->hwtstamp = ns_to_ktime(ts_ns); - } + /* Do not report a timestamp if we don't have a cached PHC time */ + if (!cached_time) + return; + + /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached + * PHC value, rather than accessing the PF. This also allows us to + * simply pass the upper 32bits of nanoseconds directly. Calling + * ice_ptp_extend_40b_ts is unnecessary as it would just discard these + * bits itself. + */ + ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high); + ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high); + + hwtstamps = skb_hwtstamps(skb); + memset(hwtstamps, 0, sizeof(*hwtstamps)); + hwtstamps->hwtstamp = ns_to_ktime(ts_ns); } /** - * ice_ptp_setup_pins_e810t - Setup PTP pins in sysfs - * @pf: pointer to the PF instance - * @info: PTP clock capabilities + * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins + * @pf: pointer to the PF structure + * @info: PTP clock info structure + * + * Disable the OS access to the SMA pins. Called to clear out the OS + * indications of pin support when we fail to setup the E810-T SMA control + * register. */ static void -ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) +ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) { - info->n_per_out = E810T_N_PER_OUT; + struct device *dev = ice_pf_to_dev(pf); - if (!ice_is_feature_supported(pf, ICE_F_PTP_EXTTS)) + dev_warn(dev, "Failed to configure E810-T SMA pin control\n"); + + info->enable = NULL; + info->verify = NULL; + info->n_pins = 0; + info->n_ext_ts = 0; + info->n_per_out = 0; +} + +/** + * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins + * @pf: pointer to the PF structure + * @info: PTP clock info structure + * + * Finish setting up the SMA pins by allocating pin_config, and setting it up + * according to the current status of the SMA. On failure, disable all of the + * extended SMA pin support. + */ +static void +ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) +{ + struct device *dev = ice_pf_to_dev(pf); + int err; + + /* Allocate memory for kernel pins interface */ + info->pin_config = devm_kcalloc(dev, info->n_pins, + sizeof(*info->pin_config), GFP_KERNEL); + if (!info->pin_config) { + dev_err(dev, "Failed to allocate pin_config for E810-T SMA pins\n"); + ice_ptp_disable_sma_pins_e810t(pf, info); return; + } - info->n_ext_ts = E810_N_EXT_TS; - info->n_pins = NUM_E810T_PTP_PINS; - info->verify = ice_e810t_verify_pin; + /* Read current SMA status */ + err = ice_get_sma_config_e810t(&pf->hw, info->pin_config); + if (err) { + ice_ptp_disable_sma_pins_e810t(pf, info); + return; + } } /** @@ -3209,21 +3529,48 @@ ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) static void ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info) { - info->n_per_out = E810_N_PER_OUT; + info->n_per_out = N_PER_OUT_E810; - if (!ice_is_feature_supported(pf, ICE_F_PTP_EXTTS)) + if (ice_is_feature_supported(pf, ICE_F_PTP_EXTTS)) + info->n_ext_ts = N_EXT_TS_E810; + + if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { + info->n_ext_ts = N_EXT_TS_E810; + info->n_pins = NUM_E810T_PTP_PINS; + info->verify = ice_verify_pin_e810t; + + /* Complete setup of the SMA pins */ + ice_ptp_setup_sma_pins_e810t(pf, info); return; + } - info->n_ext_ts = E810_N_EXT_TS; + if (ice_is_feature_supported(pf, ICE_F_FIXED_TIMING_PINS)) { + info->n_ext_ts = N_EXT_TS_NO_SMA_E810T; + info->n_per_out = N_PER_OUT_NO_SMA_E810T; + return; + } } /** - * ice_ptp_setup_pins_e822 - Setup PTP pins in sysfs + * ice_ptp_setup_pins_e823 - Setup PTP pins in sysfs * @pf: pointer to the PF instance * @info: PTP clock capabilities */ static void -ice_ptp_setup_pins_e822(struct ice_pf *pf, struct ptp_clock_info *info) +ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info) +{ + info->pps = 1; + info->n_per_out = 0; + info->n_ext_ts = 1; +} + +/** + * ice_ptp_setup_pins_generic - Setup PTP pins in sysfs + * @pf: pointer to the PF instance + * @info: PTP clock capabilities + */ +static void +ice_ptp_setup_pins_generic(struct ice_pf *pf, struct ptp_clock_info *info) { info->pps = 1; info->n_per_out = 1; @@ -3233,7 +3580,7 @@ ice_ptp_setup_pins_e822(struct ice_pf *pf, struct ptp_clock_info *info) } /** - * ice_ptp_set_funcs_e822 - Set specialized functions for E822 support + * ice_ptp_set_funcs_generic - Set specialized functions for E822 support * @pf: Board private structure * @info: PTP info to fill * @@ -3243,16 +3590,16 @@ ice_ptp_setup_pins_e822(struct ice_pf *pf, struct ptp_clock_info *info) * devices. */ static void -ice_ptp_set_funcs_e822(struct ice_pf *pf, struct ptp_clock_info *info) +ice_ptp_set_funcs_generic(struct ice_pf *pf, struct ptp_clock_info *info) { #ifdef HAVE_PTP_CROSSTIMESTAMP if (boot_cpu_has(X86_FEATURE_ART) && boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) - info->getcrosststamp = ice_ptp_getcrosststamp_e822; + info->getcrosststamp = ice_ptp_getcrosststamp_generic; #endif /* HAVE_PTP_CROSSTIMESTAMP */ - info->enable = ice_ptp_gpio_enable_e822; + info->enable = ice_ptp_gpio_enable_generic; - ice_ptp_setup_pins_e822(pf, info); + ice_ptp_setup_pins_generic(pf, info); } /** @@ -3269,11 +3616,24 @@ static void ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info) { info->enable = ice_ptp_gpio_enable_e810; + ice_ptp_setup_pins_e810(pf, info); +} - if (ice_is_e810t(&pf->hw)) - ice_ptp_setup_pins_e810t(pf, info); - else - ice_ptp_setup_pins_e810(pf, info); +/** + * ice_ptp_set_funcs_e823 - Set specialized functions for E823 support + * @pf: Board private structure + * @info: PTP info to fill + * + * Assign functions to the PTP capabiltiies structure for E823 devices. + * Functions which operate across all device families should be set directly + * in ice_ptp_set_caps. Only add functions here which are distinct for e823 + * devices. + */ +static void +ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info) +{ + info->enable = ice_ptp_gpio_enable_e823; + ice_ptp_setup_pins_e823(pf, info); } /** @@ -3288,7 +3648,8 @@ static void ice_ptp_set_caps(struct ice_pf *pf) snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk", dev_driver_string(dev), dev_name(dev)); info->owner = THIS_MODULE; - info->max_adj = 999999999; + + info->max_adj = 100000000; info->adjtime = ice_ptp_adjtime; #ifdef HAVE_PTP_CLOCK_INFO_ADJFINE info->adjfine = ice_ptp_adjfine; @@ -3310,8 +3671,10 @@ static void ice_ptp_set_caps(struct ice_pf *pf) if (ice_is_e810(&pf->hw)) ice_ptp_set_funcs_e810(pf, info); + else if (ice_is_e823(&pf->hw)) + ice_ptp_set_funcs_e823(pf, info); else - ice_ptp_set_funcs_e822(pf, info); + ice_ptp_set_funcs_generic(pf, info); } /** @@ -3338,40 +3701,324 @@ static long ice_ptp_create_clock(struct ice_pf *pf) info = &pf->ptp.info; dev = ice_pf_to_dev(pf); - /* Allocate memory for kernel pins interface */ - if (info->n_pins) { - info->pin_config = devm_kcalloc(dev, info->n_pins, - sizeof(*info->pin_config), - GFP_KERNEL); - if (!info->pin_config) { - info->n_pins = 0; - return ICE_ERR_NO_MEMORY; - } - } - - if (ice_is_e810t(&pf->hw)) { - /* Enable SMA controller */ - int err = ice_enable_e810t_sma_ctrl(&pf->hw, true); - - if (err) - return err; - - /* Read current SMA status */ - err = ice_get_e810t_sma_config(&pf->hw, info->pin_config); - if (err) - return err; - } - /* Attempt to register the clock before enabling the hardware. */ clock = ptp_clock_register(info, dev); - if (IS_ERR(clock)) + if (IS_ERR(clock)) { + ice_dev_err_errno(dev, PTR_ERR(clock), + "Failed to register PTP clock device"); return PTR_ERR(clock); + } pf->ptp.clock = clock; return 0; } +/** + * ice_ptp_request_ts - Request an available Tx timestamp index + * @tx: the PTP Tx timestamp tracker to request from + * @skb: the SKB to associate with this timestamp request + */ +s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) +{ + u8 idx; + + /* Check if this tracker is initialized */ + if (!tx->init || tx->calibrating) + return -1; + + spin_lock(&tx->lock); + /* Find and set the first available index */ + idx = find_first_zero_bit(tx->in_use, tx->len); + if (idx < tx->len) { + /* We got a valid index that no other thread could have set. + * Store a reference to the skb and the start time to allow + * discarding old requests. + */ + set_bit(idx, tx->in_use); + set_bit(idx, tx->unread); + tx->tstamps[idx].start = jiffies; + tx->tstamps[idx].skb = skb_get(skb); + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + ice_trace(tx_tstamp_request, skb, idx); + } + + spin_unlock(&tx->lock); + + /* return the appropriate PHY timestamp register index, -1 if no + * indexes were available. + */ + if (idx >= tx->len) + return -1; + else + return idx + tx->offset; +} + +/** + * ice_ptp_process_ts - Process TX timestamps + * @pf: Board private structure + * + * Returns true if timestamp processing is complete. + */ +bool ice_ptp_process_ts(struct ice_pf *pf) +{ + if (pf->ptp.port.tx.init) + return ice_ptp_tx_tstamp(&pf->ptp.port.tx); + else + return true; +} + +/** + * ice_dpll_pin_idx_to_name - Return pin name for a corresponding pin + * + * @pf: pointer to the PF instance + * @pin: pin number to get name for + * @pin_name: pointer to pin name buffer + * + * A wrapper for device-specific pin index to name converters that take care + * of mapping pin indices returned by a netlist to real pin names + */ +void ice_dpll_pin_idx_to_name(struct ice_pf *pf, u8 pin, char *pin_name) +{ + /* if we are on a custom board, print generic descriptions */ + if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { + snprintf(pin_name, MAX_PIN_NAME, "Pin %i", pin); + return; + } + switch (pf->hw.device_id) { + case ICE_DEV_ID_E810C_SFP: + /* Skip second PHY recovered clocks as they are not represented + * in the netlist + */ + if (pin >= ZL_REF2P) + pin += 2; + fallthrough; + case ICE_DEV_ID_E810C_QSFP: + snprintf(pin_name, MAX_PIN_NAME, "%s", + ice_zl_pin_idx_to_name_e810t(pin)); + return; + case ICE_DEV_ID_E823L_10G_BASE_T: + case ICE_DEV_ID_E823L_1GBE: + case ICE_DEV_ID_E823L_BACKPLANE: + case ICE_DEV_ID_E823L_QSFP: + case ICE_DEV_ID_E823L_SFP: + case ICE_DEV_ID_E823C_10G_BASE_T: + case ICE_DEV_ID_E823C_BACKPLANE: + case ICE_DEV_ID_E823C_QSFP: + case ICE_DEV_ID_E823C_SFP: + case ICE_DEV_ID_E823C_SGMII: + snprintf(pin_name, MAX_PIN_NAME, "%s", + ice_pin_idx_to_name_e823(&pf->hw, pin)); + return; + default: + snprintf(pin_name, MAX_PIN_NAME, "Pin %i", pin); + } +} + +static void ice_handle_cgu_state(struct ice_pf *pf) +{ + enum ice_cgu_state cgu_state; + char pin_name[MAX_PIN_NAME]; + + cgu_state = ice_get_cgu_state(&pf->hw, ICE_CGU_DPLL_SYNCE, + &pf->synce_ref_pin, NULL, + pf->synce_dpll_state); + ice_dpll_pin_idx_to_name(pf, pf->synce_ref_pin, pin_name); + if (pf->synce_dpll_state != cgu_state) { + pf->synce_dpll_state = cgu_state; + dev_warn(ice_pf_to_dev(pf), + "DPLL%i state changed to: %s, pin %s", + ICE_CGU_DPLL_SYNCE, + ice_cgu_state_to_name(pf->synce_dpll_state), pin_name); + } + + cgu_state = ice_get_cgu_state(&pf->hw, ICE_CGU_DPLL_PTP, + &pf->ptp_ref_pin, + &pf->ptp_dpll_phase_offset, + pf->ptp_dpll_state); + ice_dpll_pin_idx_to_name(pf, pf->ptp_ref_pin, pin_name); + if (pf->ptp_dpll_state != cgu_state) { + pf->ptp_dpll_state = cgu_state; + dev_warn(ice_pf_to_dev(pf), + "DPLL%i state changed to: %s, pin %s", + ICE_CGU_DPLL_PTP, + ice_cgu_state_to_name(pf->ptp_dpll_state), pin_name); + } +} + +static void ice_ptp_periodic_work(struct kthread_work *work) +{ + struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work); + struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp); + struct ice_hw *hw = &pf->hw; + u32 phc_recalc; + int err; + + if (ice_is_feature_supported(pf, ICE_F_CGU)) { + if (test_bit(ICE_FLAG_DPLL_MONITOR, pf->flags) && + hw->func_caps.ts_func_info.src_tmr_owned) { + ice_handle_cgu_state(pf); + } + } + + if (!test_bit(ICE_FLAG_PTP, pf->flags)) + return; + + /* Recalibrate PTP ports after setting time */ + err = ice_aq_get_driver_param(hw, ICE_AQC_DRIVER_PARAM_PHC_RECALC, + &phc_recalc, NULL); + if (err) { + dev_err(ice_pf_to_dev(pf), + "Failed to read PTP PHC recalc ID, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); + } else if (pf->phc_recalc != phc_recalc) { + if (pf->ptp.port.link_up) + ice_ptp_port_phy_restart(&pf->ptp.port); + pf->phc_recalc = phc_recalc; + } + + err = ice_ptp_update_cached_phctime(pf); + + /* Run twice a second or reschedule if PHC update failed */ + kthread_queue_delayed_work(ptp->kworker, &ptp->work, + msecs_to_jiffies(err ? 10 : 500)); +} + +/** + * ice_ptp_reset - Initialize PTP hardware clock support after reset + * @pf: Board private structure + */ +void ice_ptp_reset(struct ice_pf *pf) +{ + struct ice_ptp *ptp = &pf->ptp; + struct ice_hw *hw = &pf->hw; + struct timespec64 ts; + int err, itr = 1; + u64 time_diff; + + if (test_bit(ICE_PFR_REQ, pf->state)) + goto pfr; + + if (!hw->func_caps.ts_func_info.src_tmr_owned) + goto reset_ts; + + err = ice_ptp_init_phc(hw); + if (err) { + dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, status %d\n", + err); + goto err; + } + + /* Acquire the global hardware lock */ + if (!ice_ptp_lock(hw)) { + err = -EBUSY; + dev_err(ice_pf_to_dev(pf), "Failed to acquire PTP hardware semaphore\n"); + goto err; + } + + /* Write the increment time value to PHY and LAN */ + err = ice_ptp_write_incval(hw, ice_base_incval(pf)); + if (err) { + dev_err(ice_pf_to_dev(pf), "Failed to write PHC increment value, status %d\n", + err); + ice_ptp_unlock(hw); + goto err; + } + + /* Write the initial Time value to PHY and LAN using the cached PHC + * time before the reset and time difference between stopping and + * starting the clock. + */ + if (ptp->cached_phc_time) { + time_diff = ktime_get_real_ns() - ptp->reset_time; + ts = ns_to_timespec64(ptp->cached_phc_time + time_diff); + } else { + ts = ktime_to_timespec64(ktime_get_real()); + } + err = ice_ptp_write_init(pf, &ts); + if (err) { + ice_dev_err_errno(ice_pf_to_dev(pf), err, + "Failed to write PHC initial time"); + ice_ptp_unlock(hw); + goto err; + } + + /* Release the global hardware lock */ + ice_ptp_unlock(hw); + + if (!ice_is_e810(hw)) { + /* Enable quad interrupts */ + err = ice_ptp_tx_ena_intr(pf, true, itr); + if (err) { + ice_dev_err_errno(ice_pf_to_dev(pf), err, + "Failed to enable Tx interrupt"); + goto err; + } + } + +reset_ts: + /* Restart the PHY timestamping block */ + ice_ptp_reset_phy_timestamping(pf); + +pfr: + /* Init Tx structures */ + if (ice_is_e810(&pf->hw)) { + err = ice_ptp_init_tx_e810(pf, &ptp->port.tx); + } else { + kthread_init_delayed_work(&ptp->port.ov_work, + ice_ptp_wait_for_offset_valid); + err = ice_ptp_init_tx_e822(pf, &ptp->port.tx, + ptp->port.port_num); + } + if (err) + goto err; + + set_bit(ICE_FLAG_PTP, pf->flags); + + /* Start periodic work going */ + kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); + + dev_info(ice_pf_to_dev(pf), "PTP reset successful\n"); + return; + +err: + ice_dev_err_errno(ice_pf_to_dev(pf), err, "PTP reset failed"); +} + +/** + * ice_ptp_prepare_for_reset - Prepare PTP for reset + * @pf: Board private structure + */ +void ice_ptp_prepare_for_reset(struct ice_pf *pf) +{ + struct ice_ptp *ptp = &pf->ptp; + u8 src_tmr; + + clear_bit(ICE_FLAG_PTP, pf->flags); + + /* Disable timestamping for both Tx and Rx */ + ice_ptp_cfg_timestamp(pf, false); + + kthread_cancel_delayed_work_sync(&ptp->work); + kthread_cancel_work_sync(&ptp->extts_work); + + if (test_bit(ICE_PFR_REQ, pf->state)) + return; + + ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); + + /* Disable periodic outputs */ + ice_ptp_disable_all_clkout(pf); + + src_tmr = ice_get_ptp_src_clock_index(&pf->hw); + + /* Disable source clock */ + wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M); + + /* Acquire PHC and system timer to restore after reset */ + ptp->reset_time = ktime_get_real_ns(); +} + /** * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device * @pf: Board private structure @@ -3384,50 +4031,39 @@ static int ice_ptp_init_owner(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - enum ice_status status; struct timespec64 ts; int err, itr = 1; - u8 src_idx; - u32 regval; - if (ice_is_e810(hw)) - wr32(hw, GLTSYN_SYNC_DLAY, 0); - - /* Clear some HW residue and enable source clock */ - src_idx = hw->func_caps.ts_func_info.tmr_index_owned; - - /* Enable source clocks */ - wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M); - - if (ice_is_e810(hw)) { - /* Enable PHY time sync */ - status = ice_ptp_init_phy_e810(hw); - if (status) { - err = ice_status_to_errno(status); - goto err_exit; - } + /* Start recalculations after setting time */ + pf->phc_recalc = INITIAL_PHC_RECALC_ID; + err = ice_aq_set_driver_param(hw, ICE_AQC_DRIVER_PARAM_PHC_RECALC, + INITIAL_PHC_RECALC_ID, NULL); + if (err) { + dev_dbg(ice_pf_to_dev(pf), + "Failed to set initial PTP PHC recalc ID, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); + return err; + } + err = ice_ptp_init_phc(hw); + if (err) { + dev_err(dev, "Failed to initialize PHC, status %d\n", err); + return err; } - /* Clear event status indications for auxiliary pins */ - (void)rd32(hw, GLTSYN_STAT(src_idx)); - -#define PF_SB_REM_DEV_CTL_PHY0 BIT(2) - if (!ice_is_e810(hw)) { - regval = rd32(hw, PF_SB_REM_DEV_CTL); - regval |= PF_SB_REM_DEV_CTL_PHY0; - wr32(hw, PF_SB_REM_DEV_CTL, regval); - } + pf->ptp.src_tmr_mode = ICE_SRC_TMR_MODE_NANOSECONDS; /* Acquire the global hardware lock */ if (!ice_ptp_lock(hw)) { err = -EBUSY; + dev_err(dev, "Failed to acquire PTP hardware semaphore\n"); goto err_exit; } /* Write the increment time value to PHY and LAN */ - status = ice_ptp_write_incval(hw, ice_base_incval(pf)); - if (status) { - err = ice_status_to_errno(status); + err = ice_ptp_write_incval(hw, ice_base_incval(pf)); + if (err) { + dev_err(dev, "Failed to write PHC increment value, status %d\n", + err); ice_ptp_unlock(hw); goto err_exit; } @@ -3436,6 +4072,7 @@ static int ice_ptp_init_owner(struct ice_pf *pf) /* Write the initial Time value to PHY and LAN */ err = ice_ptp_write_init(pf, &ts); if (err) { + ice_dev_err_errno(dev, err, "Failed to write PHC initial time"); ice_ptp_unlock(hw); goto err_exit; } @@ -3444,37 +4081,86 @@ static int ice_ptp_init_owner(struct ice_pf *pf) ice_ptp_unlock(hw); if (!ice_is_e810(hw)) { - /* Set window length for all the ports */ - status = ice_ptp_set_vernier_wl(hw); - if (status) { - err = ice_status_to_errno(status); - goto err_exit; - } - /* Enable quad interrupts */ err = ice_ptp_tx_ena_intr(pf, true, itr); - if (err) + if (err) { + ice_dev_err_errno(dev, err, + "Failed to enable Tx interrupt"); goto err_exit; - - /* Reset timestamping memory in QUADs */ - ice_ptp_reset_ts_memory(pf); + } } /* Ensure we have a clock device */ err = ice_ptp_create_clock(pf); - if (err) + if (err) { + ice_dev_err_errno(dev, err, + "Failed to register PTP clock device"); goto err_clk; + } /* Store the PTP clock index for other PFs */ ice_set_ptp_clock_index(pf); + if (ice_is_feature_supported(pf, ICE_F_CGU)) { + set_bit(ICE_FLAG_DPLL_MONITOR, pf->flags); + pf->synce_dpll_state = ICE_CGU_STATE_UNKNOWN; + pf->ptp_dpll_state = ICE_CGU_STATE_UNKNOWN; + } + return 0; err_clk: pf->ptp.clock = NULL; err_exit: - dev_err(dev, "PTP failed to register clock, err %d\n", err); + return err; +} +/** + * ice_ptp_init_work - Initialize PTP work threads + * @pf: Board private structure + * @ptp: PF PTP structure + */ +static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp) +{ + struct kthread_worker *kworker; + + /* Initialize work functions */ + kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work); + kthread_init_work(&ptp->extts_work, ice_ptp_extts_work); + + /* Allocate a kworker for handling work required for the ports + * connected to the PTP hardware clock. + */ + kworker = kthread_create_worker(0, "ice-ptp-%s", + dev_name(ice_pf_to_dev(pf))); + if (IS_ERR(kworker)) + return PTR_ERR(kworker); + + ptp->kworker = kworker; + + /* Start periodic work going */ + kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); + + return 0; +} + +/** + * ice_ptp_init_port - Initialize PTP port structure + * @pf: Board private structure + * @ptp_port: PTP port structure + */ +static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) +{ + int err; + + mutex_init(&ptp_port->ps_lock); + + if (ice_is_e810(&pf->hw)) + return ice_ptp_init_tx_e810(pf, &ptp_port->tx); + + kthread_init_delayed_work(&ptp_port->ov_work, + ice_ptp_wait_for_offset_valid); + err = ice_ptp_init_tx_e822(pf, &ptp_port->tx, ptp_port->port_num); return err; } @@ -3482,7 +4168,7 @@ err_exit: * ice_ptp_init - Initialize PTP hardware clock support * @pf: Board private structure * - * Setup the device for interacting with the PTP hardware clock for all + * Set up the device for interacting with the PTP hardware clock for all * functions, both the function that owns the clock hardware, and the * functions connected to the clock hardware. * @@ -3492,10 +4178,11 @@ err_exit: */ void ice_ptp_init(struct ice_pf *pf) { - struct device *dev = ice_pf_to_dev(pf); + struct ice_ptp *ptp = &pf->ptp; struct ice_hw *hw = &pf->hw; int err; + ice_ptp_init_phy_cfg(hw); /* If this function owns the clock hardware, it must allocate and * configure the PTP clock device to represent it. @@ -3503,40 +4190,35 @@ void ice_ptp_init(struct ice_pf *pf) if (hw->func_caps.ts_func_info.src_tmr_owned) { err = ice_ptp_init_owner(pf); if (err) - return; + goto err; } - /* Disable timestamping for both Tx and Rx */ - ice_ptp_cfg_timestamp(pf, false); + ptp->port.port_num = hw->pf_id; + err = ice_ptp_init_port(pf, &ptp->port); + if (err) + goto err; - /* Initialize work structures */ - mutex_init(&pf->ptp.port.ps_lock); - pf->ptp.port.link_up = false; - pf->ptp.port.port_num = pf->hw.pf_id; - INIT_WORK(&pf->ptp.port.ov_task, ice_ptp_wait_for_offset_valid); - - /* Allocate workqueue for 2nd part of Vernier calibration */ - pf->ptp.ov_wq = alloc_workqueue("%s_ov", WQ_MEM_RECLAIM, 0, - KBUILD_MODNAME); - if (!pf->ptp.ov_wq) { - err = -ENOMEM; - goto err_wq; - } + /* Start the PHY timestamping block */ + ice_ptp_reset_phy_timestamping(pf); set_bit(ICE_FLAG_PTP, pf->flags); - dev_info(dev, "PTP init successful\n"); + err = ice_ptp_init_work(pf, ptp); + if (err) + goto err; - if (hw->func_caps.ts_func_info.src_tmr_owned && !ice_is_e810(hw)) - ice_cgu_init_state(pf); + ice_ptp_sysfs_init(pf); + + dev_info(ice_pf_to_dev(pf), "PTP init successful\n"); return; -err_wq: +err: /* If we registered a PTP clock, release it */ if (pf->ptp.clock) { - ptp_clock_unregister(pf->ptp.clock); + ptp_clock_unregister(ptp->clock); pf->ptp.clock = NULL; } - dev_err(dev, "PTP failed %d\n", err); + clear_bit(ICE_FLAG_PTP, pf->flags); + ice_dev_err_errno(ice_pf_to_dev(pf), err, "PTP init failed"); } /** @@ -3550,7 +4232,6 @@ void ice_ptp_release(struct ice_pf *pf) { struct ice_vsi *vsi; char *dev_name; - u8 quad, i; if (!pf) return; @@ -3563,36 +4244,28 @@ void ice_ptp_release(struct ice_pf *pf) /* Disable timestamping for both Tx and Rx */ ice_ptp_cfg_timestamp(pf, false); - /* Clear PHY bank residues if any */ - quad = vsi->port_info->lport / ICE_PORTS_PER_QUAD; - if (!ice_is_e810(&pf->hw) && !pf->hw.reset_ongoing) { - u64 tx_idx = ~((u64)0); - u64 ts[INDEX_PER_QUAD]; - - ice_ptp_get_tx_hwtstamp_ver(pf, tx_idx, quad, ts, NULL); - } else { - ice_ptp_tx_hwtstamp_ext(pf); - } - - /* Release any pending skb */ - ice_ptp_rel_all_skb(pf); + ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); clear_bit(ICE_FLAG_PTP, pf->flags); - pf->ptp.port.link_up = false; - if (pf->ptp.ov_wq) { - destroy_workqueue(pf->ptp.ov_wq); - pf->ptp.ov_wq = NULL; + kthread_cancel_delayed_work_sync(&pf->ptp.work); + + ice_ptp_port_phy_stop(&pf->ptp.port); + mutex_destroy(&pf->ptp.port.ps_lock); + + if (pf->ptp.kworker) { + kthread_destroy_worker(pf->ptp.kworker); + pf->ptp.kworker = NULL; } + ice_ptp_sysfs_release(pf); + if (!pf->ptp.clock) return; /* Disable periodic outputs */ - for (i = 0; i < pf->ptp.info.n_per_out; i++) - if (pf->ptp.perout_channels[i].ena) - ice_ptp_cfg_clkout(pf, i, NULL, false); + ice_ptp_disable_all_clkout(pf); ice_clear_ptp_clock_index(pf); ptp_clock_unregister(pf->ptp.clock); @@ -3606,43 +4279,3 @@ void ice_ptp_release(struct ice_pf *pf) dev_info(ice_pf_to_dev(pf), "removed Clock from %s\n", dev_name); } - -/** - * ice_ptp_set_timestamp_offsets - Calculate timestamp offsets on each port - * @pf: Board private structure - * - * This function calculates timestamp Tx/Rx offset on each port after at least - * one packet was sent/received by the PHY. - */ -void ice_ptp_set_timestamp_offsets(struct ice_pf *pf) -{ - if (!test_bit(ICE_FLAG_PTP, pf->flags)) - return; - - if (atomic_read(&pf->ptp.phy_reset_lock)) - return; - - ice_ptp_check_offset_valid(&pf->ptp.port); -} - -/** - * ice_clean_ptp_subtask - Handle the service task events - * @pf: Board private structure - */ -void ice_clean_ptp_subtask(struct ice_pf *pf) -{ - if (!test_bit(ICE_FLAG_PTP, pf->flags)) - return; - - ice_ptp_update_cached_systime(pf); - if (test_and_clear_bit(ICE_PTP_EXT_TS_READY, pf->state)) - ice_ptp_extts_work(pf); - if (test_and_clear_bit(ICE_PTP_TX_TS_READY, pf->state)) { - struct ice_hw *hw = &pf->hw; - - if (ice_is_e810(hw)) - ice_ptp_tx_hwtstamp_ext(pf); - else - ice_ptp_tx_hwtstamp(pf); - } -} diff --git a/drivers/thirdparty/ice/ice_ptp.h b/drivers/thirdparty/ice/ice_ptp.h index c4ac0fa76523..1ed4f8cdf110 100644 --- a/drivers/thirdparty/ice/ice_ptp.h +++ b/drivers/thirdparty/ice/ice_ptp.h @@ -9,7 +9,7 @@ #include #include #include - +#include "kcompat_kthread.h" #include "ice_ptp_hw.h" enum ice_ptp_pin { @@ -20,6 +20,13 @@ enum ice_ptp_pin { NUM_ICE_PTP_PIN }; +/* Main timer mode */ +enum ice_src_tmr_mode { + ICE_SRC_TMR_MODE_NANOSECONDS, + ICE_SRC_TMR_MODE_LOCKED, + + NUM_ICE_SRC_TMR_MODE +}; #define ICE_E810T_SMA1_CTRL_MASK (ICE_E810T_P1_SMA1_DIR_EN | \ ICE_E810T_P1_SMA1_TX_EN) @@ -38,13 +45,26 @@ enum ice_e810t_ptp_pins { NUM_E810T_PTP_PINS }; -#define ICE_SUBDEV_ID_E810_T 0x000E +enum ice_phy_rclk_pins { + ICE_C827_RCLKA_PIN, /* SCL pin */ + ICE_C827_RCLKB_PIN, /* SDA pin */ + ICE_C827_RCLK_PINS_NUM /* number of pins */ +}; -static inline bool ice_is_e810t(struct ice_hw *hw) -{ - return (hw->device_id == ICE_DEV_ID_E810C_SFP && - hw->subsystem_device_id == ICE_SUBDEV_ID_E810_T); -} +#define E810T_CGU_INPUT_C827(_phy, _pin) ((_phy) * ICE_C827_RCLK_PINS_NUM + \ + (_pin) + ZL_REF1P) + +#define E822_CGU_RCLK_PHY_PINS_NUM 1 +#define E822_CGU_RCLK_PIN_NAME "NAC_CLK_SYNCE0_PN" + +#define ICE_CGU_IN_PIN_FAIL_FLAGS (ICE_AQC_GET_CGU_IN_CFG_STATUS_SCM_FAIL | \ + ICE_AQC_GET_CGU_IN_CFG_STATUS_CFM_FAIL | \ + ICE_AQC_GET_CGU_IN_CFG_STATUS_GST_FAIL | \ + ICE_AQC_GET_CGU_IN_CFG_STATUS_PFM_FAIL) + +#define ICE_DPLL_PIN_STATE_INVALID "invalid" +#define ICE_DPLL_PIN_STATE_VALIDATING "validating" +#define ICE_DPLL_PIN_STATE_VALID "valid" struct ice_perout_channel { bool ena; @@ -53,6 +73,88 @@ struct ice_perout_channel { u64 start_time; }; +/* The ice hardware captures Tx hardware timestamps in the PHY. The timestamp + * is stored in a buffer of registers. Depending on the specific hardware, + * this buffer might be shared across multiple PHY ports. + * + * On transmit of a packet to be timestamped, software is responsible for + * selecting an open index. Hardware makes no attempt to lock or prevent + * re-use of an index for multiple packets. + * + * To handle this, timestamp indexes must be tracked by software to ensure + * that an index is not re-used for multiple transmitted packets. The + * structures and functions declared in this file track the available Tx + * register indexes, as well as provide storage for the SKB pointers. + * + * To allow multiple ports to access the shared register block independently, + * the blocks are split up so that indexes are assigned to each port based on + * hardware logical port number. + */ + +/** + * struct ice_tx_tstamp - Tracking for a single Tx timestamp + * @skb: pointer to the SKB for this timestamp request + * @start: jiffies when the timestamp was first requested + * @cached_tstamp: last read timestamp + * + * This structure tracks a single timestamp request. The SKB pointer is + * provided when initiating a request. The start time is used to ensure that + * we discard old requests that were not fulfilled within a 2 second time + * window. + * Timestamp values in the PHY are read only and do not get cleared except at + * hardware reset or when a new timestamp value is captured. The cached_tstamp + * field is used to detect the case where a new timestamp has not yet been + * captured, ensuring that we avoid sending stale timestamp data to the stack. + */ +struct ice_tx_tstamp { + struct sk_buff *skb; + unsigned long start; + u64 cached_tstamp; +}; + +/** + * struct ice_ptp_tx - Tracking structure for Tx timestamp requests on a port + * @lock: lock to prevent concurrent access to in_use and unread bitmaps + * @tstamps: array of len to store outstanding requests + * @in_use: bitmap of len to indicate which slots are in use + * @unread: bitmap of len to indicate which slots haven't been read + * @block: which memory block (quad or port) the timestamps are captured in + * @offset: offset into timestamp block to get the real index + * @len: length of the tstamps and in_use fields. + * @init: if true, the tracker is initialized; + * @calibrating: if true, the PHY is calibrating the Tx offset. During this + * window, timestamps are temporarily disabled. + * @ll_ena: if true, the low latency timestamping feature is supported + * + * The in_use and unread bitmaps work in concert. The in_use bitmap indicates + * which slots are currently being used by hardware to capture a Tx timestamp. + * The unread bit indicates that a slot has not had its Tx timestamp read by + * software. Both bits should be set by software under lock when initiating + * a Tx timestamp request using a slot. The unread bit is used to ensure that + * only one thread reads the Tx timestamp registers. It should be tested and + * cleared under lock before reading the Tx timestamp. The in_use bit should + * be cleared under lock only after a timestamp has completed. The separation + * of the in_use and unread bits is required because we cannot hold the + * spinlock while reading the Tx timestamp register from firmware. + */ +struct ice_ptp_tx { + spinlock_t lock; /* protects access to in_use bitmap */ + struct ice_tx_tstamp *tstamps; + unsigned long *in_use; + unsigned long *unread; + u8 block; + u8 offset; + u8 len; + u8 init; + u8 calibrating; + u8 ll_ena; +}; + +/* Quad and port information for initializing timestamp blocks */ +#define INDEX_PER_QUAD 64 +#define INDEX_PER_PORT_E822 16 +#define INDEX_PER_PORT_E810 64 +#define INDEX_PER_PORT_ETH56G 64 /** * struct ice_ptp_port - data used to initialize an external port for PTP @@ -61,22 +163,16 @@ struct ice_perout_channel { * ready for PTP functionality. It is used to track the port initialization * and determine when the port's PHY offset is valid. * - * @ov_task: work task for tracking when PHY offset is valid - * @tx_offset_ready: indicates the Tx offset for the port is ready - * @rx_offset_ready: indicates the Rx offset for the port is ready - * @tx_offset_lock: lock used to protect the tx_offset_ready field - * @rx_offset_lock: lock used to protect the rx_offset_ready field + * @tx: Tx timestamp tracking for this port + * @ov_work: delayed work task for tracking when PHY offset is valid * @ps_lock: mutex used to protect the overall PTP PHY start procedure * @link_up: indicates whether the link is up * @tx_fifo_busy_cnt: number of times the Tx FIFO was busy * @port_num: the port number this structure represents */ struct ice_ptp_port { - struct work_struct ov_task; - atomic_t tx_offset_ready; - atomic_t rx_offset_ready; - atomic_t tx_offset_lock; - atomic_t rx_offset_lock; + struct ice_ptp_tx tx; + struct kthread_delayed_work ov_work; struct mutex ps_lock; /* protects overall PTP PHY start procedure */ bool link_up; u8 tx_fifo_busy_cnt; @@ -88,40 +184,63 @@ struct ice_ptp_port { /** * struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK * @port: data for the PHY port initialization procedure + * @work: delayed work function for periodic tasks + * @extts_work: work function for handling external Tx timestamps * @cached_phc_time: a cached copy of the PHC time for timestamp extension + * @cached_phc_jiffies: jiffies when cached_phc_time was last updated * @ext_ts_chan: the external timestamp channel in use * @ext_ts_irq: the external timestamp IRQ in use - * @phy_reset_lock: bit lock for preventing PHY start while resetting - * @ov_wq: work queue for the offset validity task + * @kworker: kwork thread for handling periodic work * @perout_channels: periodic output data * @info: structure defining PTP hardware capabilities * @clock: pointer to registered PTP clock device * @tstamp_config: hardware timestamping configuration - * @time_ref_freq: current device timer frequency (for E822 devices) + * @phy_kobj: pointer to phy sysfs object * @src_tmr_mode: current device timer mode (locked or nanoseconds) + * @reset_time: kernel time after clock stop on reset + * @tx_hwtstamp_skipped: number of Tx time stamp requests skipped + * @tx_hwtstamp_timeouts: number of Tx skbs discarded with no time stamp + * @tx_hwtstamp_flushed: number of Tx skbs flushed due to interface closed + * @tx_hwtstamp_discarded: number of Tx skbs discarded due to cached PHC time + * being too old to correctly extend timestamp + * @late_cached_phc_updates: number of times cached PHC update is late */ struct ice_ptp { struct ice_ptp_port port; + struct kthread_delayed_work work; + struct kthread_work extts_work; u64 cached_phc_time; + unsigned long cached_phc_jiffies; u8 ext_ts_chan; u8 ext_ts_irq; - atomic_t phy_reset_lock; - struct workqueue_struct *ov_wq; + struct kthread_worker *kworker; struct ice_perout_channel perout_channels[GLTSYN_TGT_H_IDX_MAX]; struct ptp_clock_info info; struct ptp_clock *clock; struct hwtstamp_config tstamp_config; - enum ice_time_ref_freq time_ref_freq; + struct kobject *phy_kobj; enum ice_src_tmr_mode src_tmr_mode; + u64 reset_time; + u32 tx_hwtstamp_skipped; + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_flushed; + u32 tx_hwtstamp_discarded; + u32 late_cached_phc_updates; }; -#define __ptp_port_to_ptp(p) \ - container_of((p), struct ice_ptp, port) +static inline struct ice_ptp *__ptp_port_to_ptp(struct ice_ptp_port *p) +{ + return container_of(p, struct ice_ptp, port); +} + #define ptp_port_to_pf(p) \ container_of(__ptp_port_to_ptp((p)), struct ice_pf, ptp) -#define __ptp_info_to_ptp(i) \ - container_of((i), struct ice_ptp, info) +static inline struct ice_ptp *__ptp_info_to_ptp(struct ptp_clock_info *i) +{ + return container_of(i, struct ice_ptp, info); +} + #define ptp_info_to_pf(i) \ container_of(__ptp_info_to_ptp((i)), struct ice_pf, ptp) @@ -138,9 +257,6 @@ struct ice_ptp { #define FIFO_EMPTY BIT(2) #define FIFO_OK 0xFF #define ICE_PTP_FIFO_NUM_CHECKS 5 -/* PHY, quad and port definitions */ -#define INDEX_PER_QUAD 64 -#define INDEX_PER_PORT (INDEX_PER_QUAD / ICE_PORTS_PER_QUAD) #define TX_INTR_QUAD_MASK 0x03 /* Per-channel register definitions */ #define GLTSYN_AUX_OUT(_chan, _idx) (GLTSYN_AUX_OUT_0(_idx) + ((_chan) * 8)) @@ -157,34 +273,68 @@ struct ice_ptp { #define PPS_CLK_SRC_CHAN 2 #define PPS_PIN_INDEX 5 #define TIME_SYNC_PIN_INDEX 4 -#define E810_N_EXT_TS 3 -#define E810_N_PER_OUT 4 -#define E810T_N_PER_OUT 3 +#define N_EXT_TS_E810 3 +#define N_PER_OUT_E810 4 +#define N_PER_OUT_E810T 3 +#define N_PER_OUT_NO_SMA_E810T 2 +#define N_EXT_TS_NO_SMA_E810T 2 /* Macros to derive the low and high addresses for PHY */ #define LOWER_ADDR_SIZE 16 /* Macros to derive offsets for TimeStampLow and TimeStampHigh */ #define PORT_TIMER_ASSOC(_i) (0x0300102C + ((_i) * 256)) #define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4)) +#define MAX_PIN_NAME 15 + +#define ICE_PTP_PIN_FREQ_1HZ 1 +#define ICE_PTP_PIN_FREQ_10MHZ 10000000 + /* Time allowed for programming periodic clock output */ #define START_OFFS_NS 100000000 +#define ICE_PTP_PIN_INVALID 0xFF + +/* "dpll pin prio " (always 6 arguments) */ +#define ICE_PTP_PIN_PRIO_ARG_CNT 6 + +/* + * Examples of possible argument lists and count: + * "in pin enable <0/1>" + * "out pin enable <0/1> freq " + * "in pin freq " + * "out pin freq esync " + * "in pin freq phase_delay esync <0/1>" + * "out pin enable <0/1> freq phase_delay esync <0/1>" + * + * count = 3 + x * 2 + * 3 = target pin arguments ( pin ) + * x = int [1-4] (up to 4: 'param name' + 'value' pairs) + * 2 = count of args in pair ('param name' + 'value') + */ +#define ICE_PTP_PIN_CFG_1_ARG_CNT 5 +#define ICE_PTP_PIN_CFG_2_ARG_CNT 7 +#define ICE_PTP_PIN_CFG_3_ARG_CNT 9 +#define ICE_PTP_PIN_CFG_4_ARG_CNT 11 + #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) struct ice_pf; int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr); int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr); -int ice_ptp_get_ts_idx(struct ice_vsi *vsi); +void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena); int ice_get_ptp_clock_index(struct ice_pf *pf); -void ice_clean_ptp_subtask(struct ice_pf *pf); -void ice_ptp_set_timestamp_offsets(struct ice_pf *pf); +s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb); +bool ice_ptp_process_ts(struct ice_pf *pf); + u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts); void ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb); +void ice_ptp_reset(struct ice_pf *pf); +void ice_ptp_prepare_for_reset(struct ice_pf *pf); void ice_ptp_init(struct ice_pf *pf); void ice_ptp_release(struct ice_pf *pf); -int ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup); +void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup); int ice_ptp_check_rx_fifo(struct ice_pf *pf, u8 port); int ptp_ts_enable(struct ice_pf *pf, u8 port, bool enable); int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, @@ -193,6 +343,7 @@ int ice_ptp_update_incval(struct ice_pf *pf, enum ice_time_ref_freq time_ref_fre enum ice_src_tmr_mode src_tmr_mode); int ice_ptp_get_incval(struct ice_pf *pf, enum ice_time_ref_freq *time_ref_freq, enum ice_src_tmr_mode *src_tmr_mode); +void ice_dpll_pin_idx_to_name(struct ice_pf *pf, u8 pin, char *pin_name); #else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ static inline int ice_ptp_set_ts_config(struct ice_pf __always_unused *pf, struct ifreq __always_unused *ifr) @@ -206,6 +357,7 @@ static inline int ice_ptp_get_ts_config(struct ice_pf __always_unused *pf, return 0; } +static inline void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) { } static inline int ice_ptp_check_rx_fifo(struct ice_pf __always_unused *pf, u8 __always_unused port) @@ -213,9 +365,14 @@ ice_ptp_check_rx_fifo(struct ice_pf __always_unused *pf, return 0; } -static inline int ice_ptp_get_ts_idx(struct ice_vsi __always_unused *vsi) +static inline s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) { - return 0; + return -1; +} + +static inline bool ice_ptp_process_ts(struct ice_pf *pf) +{ + return true; } static inline int ice_get_ptp_clock_index(struct ice_pf __always_unused *pf) @@ -223,13 +380,15 @@ static inline int ice_get_ptp_clock_index(struct ice_pf __always_unused *pf) return 0; } static inline void ice_clean_ptp_subtask(struct ice_pf *pf) { } -static inline void ice_ptp_set_timestamp_offsets(struct ice_pf *pf) { } static inline void ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { } static inline void ice_ptp_init(struct ice_pf *pf) { } +static inline void ice_ptp_reset(struct ice_pf *pf) { } static inline void ice_ptp_release(struct ice_pf *pf) { } -static inline int ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) -{ return 0; } +static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf) { } +static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) +{ +} #endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ #endif /* _ICE_PTP_H_ */ diff --git a/drivers/thirdparty/ice/ice_ptp_consts.h b/drivers/thirdparty/ice/ice_ptp_consts.h index 4c0d390e6276..10faa425cb22 100644 --- a/drivers/thirdparty/ice/ice_ptp_consts.h +++ b/drivers/thirdparty/ice/ice_ptp_consts.h @@ -82,4 +82,295 @@ const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ] = { }, }; +const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = { + /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */ + { + /* refclk_pre_div */ + 1, + /* feedback_div */ + 197, + /* frac_n_div */ + 2621440, + /* post_pll_div */ + 6, + }, + + /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */ + { + /* refclk_pre_div */ + 5, + /* feedback_div */ + 223, + /* frac_n_div */ + 524288, + /* post_pll_div */ + 7, + }, + + /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */ + { + /* refclk_pre_div */ + 5, + /* feedback_div */ + 223, + /* frac_n_div */ + 524288, + /* post_pll_div */ + 7, + }, + + /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */ + { + /* refclk_pre_div */ + 5, + /* feedback_div */ + 159, + /* frac_n_div */ + 1572864, + /* post_pll_div */ + 6, + }, + + /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */ + { + /* refclk_pre_div */ + 5, + /* feedback_div */ + 159, + /* frac_n_div */ + 1572864, + /* post_pll_div */ + 6, + }, + + /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */ + { + /* refclk_pre_div */ + 10, + /* feedback_div */ + 223, + /* frac_n_div */ + 524288, + /* post_pll_div */ + 7, + }, +}; + +/* + * struct ice_vernier_info_e822 + * + * E822 hardware calibrates the delay of the timestamp indication from the + * actual packet transmission or reception during the initialization of the + * PHY. To do this, the hardware mechanism uses some conversions between the + * various clocks within the PHY block. This table defines constants used to + * calculate the correct conversion ratios in the PHY registers. + * + * Many of the values relate to the PAR/PCS clock conversion registers. For + * these registers, a value of 0 means that the associated register is not + * used by this link speed, and that the register should be cleared by writing + * 0. Other values specify the clock frequency in Hz. + */ +const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD] = { + /* ICE_PTP_LNK_SPD_1G */ + { + /* tx_par_clk */ + 31250000, /* 31.25 MHz */ + /* rx_par_clk */ + 31250000, /* 31.25 MHz */ + /* tx_pcs_clk */ + 125000000, /* 125 MHz */ + /* rx_pcs_clk */ + 125000000, /* 125 MHz */ + /* tx_desk_rsgb_par */ + 0, /* unused */ + /* rx_desk_rsgb_par */ + 0, /* unused */ + /* tx_desk_rsgb_pcs */ + 0, /* unused */ + /* rx_desk_rsgb_pcs */ + 0, /* unused */ + /* tx_fixed_delay */ + 25140, + /* pmd_adj_divisor */ + 10000000, + /* rx_fixed_delay */ + 17372, + }, + /* ICE_PTP_LNK_SPD_10G */ + { + /* tx_par_clk */ + 257812500, /* 257.8125 MHz */ + /* rx_par_clk */ + 257812500, /* 257.8125 MHz */ + /* tx_pcs_clk */ + 156250000, /* 156.25 MHz */ + /* rx_pcs_clk */ + 156250000, /* 156.25 MHz */ + /* tx_desk_rsgb_par */ + 0, /* unused */ + /* rx_desk_rsgb_par */ + 0, /* unused */ + /* tx_desk_rsgb_pcs */ + 0, /* unused */ + /* rx_desk_rsgb_pcs */ + 0, /* unused */ + /* tx_fixed_delay */ + 6938, + /* pmd_adj_divisor */ + 82500000, + /* rx_fixed_delay */ + 6212, + }, + /* ICE_PTP_LNK_SPD_25G */ + { + /* tx_par_clk */ + 644531250, /* 644.53125 MHZ */ + /* rx_par_clk */ + 644531250, /* 644.53125 MHz */ + /* tx_pcs_clk */ + 390625000, /* 390.625 MHz */ + /* rx_pcs_clk */ + 390625000, /* 390.625 MHz */ + /* tx_desk_rsgb_par */ + 0, /* unused */ + /* rx_desk_rsgb_par */ + 0, /* unused */ + /* tx_desk_rsgb_pcs */ + 0, /* unused */ + /* rx_desk_rsgb_pcs */ + 0, /* unused */ + /* tx_fixed_delay */ + 2778, + /* pmd_adj_divisor */ + 206250000, + /* rx_fixed_delay */ + 2491, + }, + /* ICE_PTP_LNK_SPD_25G_RS */ + { + /* tx_par_clk */ + 0, /* unused */ + /* rx_par_clk */ + 0, /* unused */ + /* tx_pcs_clk */ + 0, /* unused */ + /* rx_pcs_clk */ + 0, /* unused */ + /* tx_desk_rsgb_par */ + 161132812, /* 162.1328125 MHz Reed Solomon gearbox */ + /* rx_desk_rsgb_par */ + 161132812, /* 162.1328125 MHz Reed Solomon gearbox */ + /* tx_desk_rsgb_pcs */ + 97656250, /* 97.62625 MHz Reed Solomon gearbox */ + /* rx_desk_rsgb_pcs */ + 97656250, /* 97.62625 MHz Reed Solomon gearbox */ + /* tx_fixed_delay */ + 3928, + /* pmd_adj_divisor */ + 206250000, + /* rx_fixed_delay */ + 29535, + }, + /* ICE_PTP_LNK_SPD_40G */ + { + /* tx_par_clk */ + 257812500, + /* rx_par_clk */ + 257812500, + /* tx_pcs_clk */ + 156250000, /* 156.25 MHz */ + /* rx_pcs_clk */ + 156250000, /* 156.25 MHz */ + /* tx_desk_rsgb_par */ + 0, /* unused */ + /* rx_desk_rsgb_par */ + 156250000, /* 156.25 MHz deskew clock */ + /* tx_desk_rsgb_pcs */ + 0, /* unused */ + /* rx_desk_rsgb_pcs */ + 156250000, /* 156.25 MHz deskew clock */ + /* tx_fixed_delay */ + 5666, + /* pmd_adj_divisor */ + 82500000, + /* rx_fixed_delay */ + 4244, + }, + /* ICE_PTP_LNK_SPD_50G */ + { + /* tx_par_clk */ + 644531250, /* 644.53125 MHZ */ + /* rx_par_clk */ + 644531250, /* 644.53125 MHZ */ + /* tx_pcs_clk */ + 390625000, /* 390.625 MHz */ + /* rx_pcs_clk */ + 390625000, /* 390.625 MHz */ + /* tx_desk_rsgb_par */ + 0, /* unused */ + /* rx_desk_rsgb_par */ + 195312500, /* 193.3125 MHz deskew clock */ + /* tx_desk_rsgb_pcs */ + 0, /* unused */ + /* rx_desk_rsgb_pcs */ + 195312500, /* 193.3125 MHz deskew clock */ + /* tx_fixed_delay */ + 2778, + /* pmd_adj_divisor */ + 206250000, + /* rx_fixed_delay */ + 2868, + }, + /* ICE_PTP_LNK_SPD_50G_RS */ + { + /* tx_par_clk */ + 0, /* unused */ + /* rx_par_clk */ + 644531250, /* 644.53125 MHz */ + /* tx_pcs_clk */ + 0, /* unused */ + /* rx_pcs_clk */ + 644531250, /* 644.53125 MHz */ + /* tx_desk_rsgb_par */ + 322265625, /* 322.265625 MHz Reed Solomon gearbox */ + /* rx_desk_rsgb_par */ + 322265625, /* 322.265625 MHz Reed Solomon gearbox */ + /* tx_desk_rsgb_pcs */ + 644531250, /* 644.53125 MHz Reed Solomon gearbox */ + /* rx_desk_rsgb_pcs */ + 644531250, /* 644.53125 MHz Reed Solomon gearbox */ + /* tx_fixed_delay */ + 2095, + /* pmd_adj_divisor */ + 206250000, + /* rx_fixed_delay */ + 14524, + }, + /* ICE_PTP_LNK_SPD_100G_RS */ + { + /* tx_par_clk */ + 0, /* unused */ + /* rx_par_clk */ + 644531250, /* 644.53125 MHz */ + /* tx_pcs_clk */ + 0, /* unused */ + /* rx_pcs_clk */ + 644531250, /* 644.53125 MHz */ + /* tx_desk_rsgb_par */ + 644531250, /* 644.53125 MHz Reed Solomon gearbox */ + /* rx_desk_rsgb_par */ + 644531250, /* 644.53125 MHz Reed Solomon gearbox */ + /* tx_desk_rsgb_pcs */ + 644531250, /* 644.53125 MHz Reed Solomon gearbox */ + /* rx_desk_rsgb_pcs */ + 644531250, /* 644.53125 MHz Reed Solomon gearbox */ + /* tx_fixed_delay */ + 1620, + /* pmd_adj_divisor */ + 206250000, + /* rx_fixed_delay */ + 7775, + }, +}; + #endif /* _ICE_PTP_CONSTS_H_ */ diff --git a/drivers/thirdparty/ice/ice_ptp_hw.c b/drivers/thirdparty/ice/ice_ptp_hw.c index 7e1c33bed88d..8e1555cdb08a 100644 --- a/drivers/thirdparty/ice/ice_ptp_hw.c +++ b/drivers/thirdparty/ice/ice_ptp_hw.c @@ -5,6 +5,7 @@ #include "ice_common.h" #include "ice_ptp_hw.h" #include "ice_ptp_consts.h" +#include "ice_cgu_regs.h" /* Low level functions for interacting with and managing the device clock used * for the Precision Time Protocol. @@ -99,7 +100,423 @@ u64 ice_ptp_read_src_incval(struct ice_hw *hw) return ((u64)(hi & INCVAL_HIGH_M) << 32) | lo; } -/* E822 family functions +/** + * ice_read_cgu_reg_e822 - Read a CGU register + * @hw: pointer to the HW struct + * @addr: Register address to read + * @val: storage for register value read + * + * Read the contents of a register of the Clock Generation Unit. Only + * applicable to E822 devices. + */ +static int +ice_read_cgu_reg_e822(struct ice_hw *hw, u16 addr, u32 *val) +{ + struct ice_sbq_msg_input cgu_msg; + int status; + + cgu_msg.opcode = ice_sbq_msg_rd; + cgu_msg.dest_dev = cgu; + cgu_msg.msg_addr_low = addr; + cgu_msg.msg_addr_high = 0x0; + + status = ice_sbq_rw_reg_lp(hw, &cgu_msg, true); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, status %d\n", + addr, status); + return status; + } + + *val = cgu_msg.data; + + return 0; +} + +/** + * ice_write_cgu_reg_e822 - Write a CGU register + * @hw: pointer to the HW struct + * @addr: Register address to write + * @val: value to write into the register + * + * Write the specified value to a register of the Clock Generation Unit. Only + * applicable to E822 devices. + */ +static int +ice_write_cgu_reg_e822(struct ice_hw *hw, u16 addr, u32 val) +{ + struct ice_sbq_msg_input cgu_msg; + int status; + + cgu_msg.opcode = ice_sbq_msg_wr; + cgu_msg.dest_dev = cgu; + cgu_msg.msg_addr_low = addr; + cgu_msg.msg_addr_high = 0x0; + cgu_msg.data = val; + + status = ice_sbq_rw_reg_lp(hw, &cgu_msg, true); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, status %d\n", + addr, status); + return status; + } + + return 0; +} + +/** + * ice_clk_freq_str - Convert time_ref_freq to string + * @clk_freq: Clock frequency + * + * Convert the specified TIME_REF clock frequency to a string. + */ +static const char *ice_clk_freq_str(u8 clk_freq) +{ + switch ((enum ice_time_ref_freq)clk_freq) { + case ICE_TIME_REF_FREQ_25_000: + return "25 MHz"; + case ICE_TIME_REF_FREQ_122_880: + return "122.88 MHz"; + case ICE_TIME_REF_FREQ_125_000: + return "125 MHz"; + case ICE_TIME_REF_FREQ_153_600: + return "153.6 MHz"; + case ICE_TIME_REF_FREQ_156_250: + return "156.25 MHz"; + case ICE_TIME_REF_FREQ_245_760: + return "245.76 MHz"; + default: + return "Unknown"; + } +} + +/** + * ice_clk_src_str - Convert time_ref_src to string + * @clk_src: Clock source + * + * Convert the specified clock source to its string name. + */ +static const char *ice_clk_src_str(u8 clk_src) +{ + switch ((enum ice_clk_src)clk_src) { + case ICE_CLK_SRC_TCX0: + return "TCX0"; + case ICE_CLK_SRC_TIME_REF: + return "TIME_REF"; + default: + return "Unknown"; + } +} + +/** + * ice_cfg_cgu_pll_e822 - Configure the Clock Generation Unit + * @hw: pointer to the HW struct + * @clk_freq: Clock frequency to program + * @clk_src: Clock source to select (TIME_REF, or TCX0) + * + * Configure the Clock Generation Unit with the desired clock frequency and + * time reference, enabling the PLL which drives the PTP hardware clock. + */ +int +ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq, + enum ice_clk_src clk_src) +{ + union tspll_ro_bwm_lf bwm_lf; + union nac_cgu_dword19 dw19; + union nac_cgu_dword22 dw22; + union nac_cgu_dword24 dw24; + union nac_cgu_dword9 dw9; + int status; + + if (clk_freq >= NUM_ICE_TIME_REF_FREQ) { + dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n", + clk_freq); + return -EINVAL; + } + + if (clk_src >= NUM_ICE_CLK_SRC) { + dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n", + clk_src); + return -EINVAL; + } + + if (clk_src == ICE_CLK_SRC_TCX0 && + clk_freq != ICE_TIME_REF_FREQ_25_000) { + dev_warn(ice_hw_to_dev(hw), + "TCX0 only supports 25 MHz frequency\n"); + return -EINVAL; + } + + status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD9, &dw9.val); + if (status) + return status; + + status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val); + if (status) + return status; + + status = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); + if (status) + return status; + + /* Log the current clock configuration */ + ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", + dw24.field.ts_pll_enable ? "enabled" : "disabled", + ice_clk_src_str(dw24.field.time_ref_sel), + ice_clk_freq_str(dw9.field.time_ref_freq_sel), + bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked"); + + /* Disable the PLL before changing the clock source or frequency */ + if (dw24.field.ts_pll_enable) { + dw24.field.ts_pll_enable = 0; + + status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val); + if (status) + return status; + } + + /* Set the frequency */ + dw9.field.time_ref_freq_sel = clk_freq; + status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD9, dw9.val); + if (status) + return status; + + /* Configure the TS PLL feedback divisor */ + status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD19, &dw19.val); + if (status) + return status; + + dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div; + dw19.field.tspll_ndivratio = 1; + + status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD19, dw19.val); + if (status) + return status; + + /* Configure the TS PLL post divisor */ + status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD22, &dw22.val); + if (status) + return status; + + dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div; + dw22.field.time1588clk_sel_div2 = 0; + + status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD22, dw22.val); + if (status) + return status; + + /* Configure the TS PLL pre divisor and clock source */ + status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val); + if (status) + return status; + + dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div; + dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div; + dw24.field.time_ref_sel = clk_src; + + status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val); + if (status) + return status; + + /* Finally, enable the PLL */ + dw24.field.ts_pll_enable = 1; + + status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val); + if (status) + return status; + + /* Wait to verify if the PLL locks */ + msleep(1); + + status = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); + if (status) + return status; + + if (!bwm_lf.field.plllock_true_lock_cri) { + dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n"); + return -EBUSY; + } + + /* Log the current clock configuration */ + ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", + dw24.field.ts_pll_enable ? "enabled" : "disabled", + ice_clk_src_str(dw24.field.time_ref_sel), + ice_clk_freq_str(dw9.field.time_ref_freq_sel), + bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked"); + + return 0; +} + +/** + * ice_init_cgu_e822 - Initialize CGU with settings from firmware + * @hw: pointer to the HW structure + * + * Initialize the Clock Generation Unit of the E822 device. + */ +static int ice_init_cgu_e822(struct ice_hw *hw) +{ + struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info; + union tspll_cntr_bist_settings cntr_bist; + int status; + + status = ice_read_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS, + &cntr_bist.val); + if (status) + return status; + + /* Disable sticky lock detection so lock status reported is accurate */ + cntr_bist.field.i_plllock_sel_0 = 0; + cntr_bist.field.i_plllock_sel_1 = 0; + + status = ice_write_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS, + cntr_bist.val); + if (status) + return status; + + /* Configure the CGU PLL using the parameters from the function + * capabilities. + */ + status = ice_cfg_cgu_pll_e822(hw, ts_info->time_ref, + (enum ice_clk_src)ts_info->clk_src); + if (status) + return status; + + return 0; +} + +/** + * ice_ptp_cgu_err_reporting - Enable/disable error reporting for CGU + * @hw: pointer to HW struct + * @enable: true if reporting should be enabled + * + * Enable or disable error events to be reported through Admin Queue. + * + * Return: 0 on success, error code otherwise + */ +static int ice_ptp_cgu_err_reporting(struct ice_hw *hw, bool enable) +{ + int status; + + status = ice_aq_cfg_cgu_err(hw, enable, enable, NULL); + if (status) { + ice_debug(hw, ICE_DBG_PTP, + "Failed to %s CGU error reporting, status %d\n", + enable ? "enable" : "disable", status); + return status; + } + + return 0; +} + +/** + * ice_ptp_process_cgu_err - Handle reported CGU error + * @hw: pointer to HW struct + * @event: reported CGU error descriptor + */ +void ice_ptp_process_cgu_err(struct ice_hw *hw, struct ice_rq_event_info *event) +{ + u8 err_type = event->desc.params.cgu_err.err_type; + + if (err_type | ICE_AQC_CGU_ERR_SYNCE_LOCK_LOSS) + dev_warn(ice_hw_to_dev(hw), "SyncE lock lost\n"); + + if (err_type | ICE_AQC_CGU_ERR_HOLDOVER_CHNG) + dev_warn(ice_hw_to_dev(hw), "SyncE holdover change\n"); + if (err_type | ICE_AQC_CGU_ERR_TIMESYNC_LOCK_LOSS) { + dev_warn(ice_hw_to_dev(hw), + "TimeSync PLL lock lost. Retrying to acquire lock with default PLL configuration.\n"); + ice_init_cgu_e822(hw); + } + + /* Reenable CGU error reporting */ + ice_ptp_cgu_err_reporting(hw, true); +} + +/** + * ice_ptp_src_cmd - Prepare source timer for a timer command + * @hw: pointer to HW structure + * @cmd: Timer command + * + * Prepare the source timer for an upcoming timer sync command. + */ +void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) +{ + u32 cmd_val; + u8 tmr_idx; + + tmr_idx = ice_get_ptp_src_clock_index(hw); + cmd_val = tmr_idx << SEL_CPK_SRC; + + switch (cmd) { + case ICE_PTP_INIT_TIME: + cmd_val |= GLTSYN_CMD_INIT_TIME; + break; + case ICE_PTP_INIT_INCVAL: + cmd_val |= GLTSYN_CMD_INIT_INCVAL; + break; + case ICE_PTP_ADJ_TIME: + cmd_val |= GLTSYN_CMD_ADJ_TIME; + break; + case ICE_PTP_ADJ_TIME_AT_TIME: + cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME; + break; + case ICE_PTP_READ_TIME: + cmd_val |= GLTSYN_CMD_READ_TIME; + break; + case ICE_PTP_NOP: + break; + default: + dev_warn(ice_hw_to_dev(hw), "Unknown timer command %u\n", cmd); + return; + } + + wr32(hw, GLTSYN_CMD, cmd_val); +} + +/** + * ice_ptp_exec_tmr_cmd - Execute all prepared timer commands + * @hw: pointer to HW struct + * + * Write the SYNC_EXEC_CMD bit to the GLTSYN_CMD_SYNC register, and flush the + * write immediately. This triggers the hardware to begin executing all of the + * source and PHY timer commands synchronously. + */ +static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw) +{ + wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD); + ice_flush(hw); +} + +/** + * ice_ptp_clean_cmd - Clean the timer command register + * @hw: pointer to HW struct + * + * Zero out the GLTSYN_CMD to avoid any residual command execution. + */ +static void ice_ptp_clean_cmd(struct ice_hw *hw) +{ + wr32(hw, GLTSYN_CMD, 0); + ice_flush(hw); +} + +/** + * ice_ptp_init_phy_cfg - Get the current TX timestamp status + * mask. Returns the mask of ports where TX timestamps are available + * @hw: pointer to the HW struct + */ +int +ice_ptp_init_phy_cfg(struct ice_hw *hw) +{ + + if (ice_is_e810(hw)) + hw->phy_cfg = ICE_PHY_E810; + else + hw->phy_cfg = ICE_PHY_E822; + + return 0; +} + +/* ---------------------------------------------------------------------------- + * E822 family functions * * The following functions operate on the E822 family of devices. */ @@ -115,9 +532,9 @@ ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset) { int phy_port, phy, quadtype; - phy_port = port % ICE_PORTS_PER_PHY; - phy = port / ICE_PORTS_PER_PHY; - quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_NUM_QUAD_TYPE; + phy_port = port % ICE_PORTS_PER_PHY_E822; + phy = port / ICE_PORTS_PER_PHY_E822; + quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E822; if (quadtype == 0) { msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port); @@ -135,6 +552,104 @@ ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset) msg->dest_dev = rmn_2; } +/** + * ice_is_64b_phy_reg_e822 - Check if this is a 64bit PHY register + * @low_addr: the low address to check + * @high_addr: on return, contains the high address of the 64bit register + * + * Checks if the provided low address is one of the known 64bit PHY values + * represented as two 32bit registers. If it is, return the appropriate high + * register offset to use. + */ +static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr) +{ + switch (low_addr) { + case P_REG_PAR_PCS_TX_OFFSET_L: + *high_addr = P_REG_PAR_PCS_TX_OFFSET_U; + return true; + case P_REG_PAR_PCS_RX_OFFSET_L: + *high_addr = P_REG_PAR_PCS_RX_OFFSET_U; + return true; + case P_REG_PAR_TX_TIME_L: + *high_addr = P_REG_PAR_TX_TIME_U; + return true; + case P_REG_PAR_RX_TIME_L: + *high_addr = P_REG_PAR_RX_TIME_U; + return true; + case P_REG_TOTAL_TX_OFFSET_L: + *high_addr = P_REG_TOTAL_TX_OFFSET_U; + return true; + case P_REG_TOTAL_RX_OFFSET_L: + *high_addr = P_REG_TOTAL_RX_OFFSET_U; + return true; + case P_REG_UIX66_10G_40G_L: + *high_addr = P_REG_UIX66_10G_40G_U; + return true; + case P_REG_UIX66_25G_100G_L: + *high_addr = P_REG_UIX66_25G_100G_U; + return true; + case P_REG_TX_CAPTURE_L: + *high_addr = P_REG_TX_CAPTURE_U; + return true; + case P_REG_RX_CAPTURE_L: + *high_addr = P_REG_RX_CAPTURE_U; + return true; + case P_REG_TX_TIMER_INC_PRE_L: + *high_addr = P_REG_TX_TIMER_INC_PRE_U; + return true; + case P_REG_RX_TIMER_INC_PRE_L: + *high_addr = P_REG_RX_TIMER_INC_PRE_U; + return true; + default: + return false; + } +} + +/** + * ice_is_40b_phy_reg_e822 - Check if this is a 40bit PHY register + * @low_addr: the low address to check + * @high_addr: on return, contains the high address of the 40bit value + * + * Checks if the provided low address is one of the known 40bit PHY values + * split into two registers with the lower 8 bits in the low register and the + * upper 32 bits in the high register. If it is, return the appropriate high + * register offset to use. + */ +static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr) +{ + switch (low_addr) { + case P_REG_TIMETUS_L: + *high_addr = P_REG_TIMETUS_U; + return true; + case P_REG_PAR_RX_TUS_L: + *high_addr = P_REG_PAR_RX_TUS_U; + return true; + case P_REG_PAR_TX_TUS_L: + *high_addr = P_REG_PAR_TX_TUS_U; + return true; + case P_REG_PCS_RX_TUS_L: + *high_addr = P_REG_PCS_RX_TUS_U; + return true; + case P_REG_PCS_TX_TUS_L: + *high_addr = P_REG_PCS_TX_TUS_U; + return true; + case P_REG_DESK_PAR_RX_TUS_L: + *high_addr = P_REG_DESK_PAR_RX_TUS_U; + return true; + case P_REG_DESK_PAR_TX_TUS_L: + *high_addr = P_REG_DESK_PAR_TX_TUS_U; + return true; + case P_REG_DESK_PCS_RX_TUS_L: + *high_addr = P_REG_DESK_PCS_RX_TUS_U; + return true; + case P_REG_DESK_PCS_TX_TUS_L: + *high_addr = P_REG_DESK_PCS_TX_TUS_U; + return true; + default: + return false; + } +} + /** * ice_read_phy_reg_e822_lp - Read a PHY register * @hw: pointer to the HW struct @@ -145,13 +660,12 @@ ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset) * * Read a PHY register for the given port over the device sideband queue. */ -static enum ice_status +static int ice_read_phy_reg_e822_lp(struct ice_hw *hw, u8 port, u16 offset, u32 *val, bool lock_sbq) { struct ice_sbq_msg_input msg = {0}; - enum ice_status status; - + int status; ice_fill_phy_msg_e822(&msg, port, offset); msg.opcode = ice_sbq_msg_rd; @@ -168,12 +682,106 @@ ice_read_phy_reg_e822_lp(struct ice_hw *hw, u8 port, u16 offset, u32 *val, return 0; } -enum ice_status +int ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val) { return ice_read_phy_reg_e822_lp(hw, port, offset, val, true); } +/** + * ice_read_40b_phy_reg_e822 - Read a 40bit value from PHY registers + * @hw: pointer to the HW struct + * @port: PHY port to read from + * @low_addr: offset of the lower register to read from + * @val: on return, the contents of the 40bit value from the PHY registers + * + * Reads the two registers associated with a 40bit value and returns it in the + * val pointer. The offset always specifies the lower register offset to use. + * The high offset is looked up. This function only operates on registers + * known to be split into a lower 8 bit chunk and an upper 32 bit chunk. + */ +static int +ice_read_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val) +{ + u32 low, high; + u16 high_addr; + int status; + + /* Only operate on registers known to be split into two 32bit + * registers. + */ + if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) { + ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n", + low_addr); + return -EINVAL; + } + + status = ice_read_phy_reg_e822(hw, port, low_addr, &low); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, status %d", + low_addr, status); + return status; + } + + status = ice_read_phy_reg_e822(hw, port, high_addr, &high); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, status %d", + high_addr, status); + return status; + } + + *val = (u64)high << P_REG_40B_HIGH_S | (low & P_REG_40B_LOW_M); + + return 0; +} + +/** + * ice_read_64b_phy_reg_e822 - Read a 64bit value from PHY registers + * @hw: pointer to the HW struct + * @port: PHY port to read from + * @low_addr: offset of the lower register to read from + * @val: on return, the contents of the 64bit value from the PHY registers + * + * Reads the two registers associated with a 64bit value and returns it in the + * val pointer. The offset always specifies the lower register offset to use. + * The high offset is looked up. This function only operates on registers + * known to be two parts of a 64bit value. + */ +static int +ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val) +{ + u32 low, high; + u16 high_addr; + int status; + + /* Only operate on registers known to be split into two 32bit + * registers. + */ + if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) { + ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n", + low_addr); + return -EINVAL; + } + + status = ice_read_phy_reg_e822(hw, port, low_addr, &low); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, status %d", + low_addr, status); + return status; + } + + status = ice_read_phy_reg_e822(hw, port, high_addr, &high); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, status %d", + high_addr, status); + return status; + } + + *val = (u64)high << 32 | low; + + return 0; +} + /** * ice_write_phy_reg_e822_lp - Write a PHY register * @hw: pointer to the HW struct @@ -184,13 +792,12 @@ ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val) * * Write a PHY register for the given port over the device sideband queue. */ -static enum ice_status +static int ice_write_phy_reg_e822_lp(struct ice_hw *hw, u8 port, u16 offset, u32 val, bool lock_sbq) { struct ice_sbq_msg_input msg = {0}; - enum ice_status status; - + int status; ice_fill_phy_msg_e822(&msg, port, offset); msg.opcode = ice_sbq_msg_wr; @@ -206,12 +813,106 @@ ice_write_phy_reg_e822_lp(struct ice_hw *hw, u8 port, u16 offset, u32 val, return 0; } -enum ice_status +int ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val) { return ice_write_phy_reg_e822_lp(hw, port, offset, val, true); } +/** + * ice_write_40b_phy_reg_e822 - Write a 40b value to the PHY + * @hw: pointer to the HW struct + * @port: port to write to + * @low_addr: offset of the low register + * @val: 40b value to write + * + * Write the provided 40b value to the two associated registers by splitting + * it up into two chunks, the lower 8 bits and the upper 32 bits. + */ +static int +ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) +{ + u32 low, high; + u16 high_addr; + int status; + + /* Only operate on registers known to be split into a lower 8 bit + * register and an upper 32 bit register. + */ + if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) { + ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n", + low_addr); + return -EINVAL; + } + + low = (u32)(val & P_REG_40B_LOW_M); + high = (u32)(val >> P_REG_40B_HIGH_S); + + status = ice_write_phy_reg_e822(hw, port, low_addr, low); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, status %d", + low_addr, status); + return status; + } + + status = ice_write_phy_reg_e822(hw, port, high_addr, high); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, status %d", + high_addr, status); + return status; + } + + return 0; +} + +/** + * ice_write_64b_phy_reg_e822 - Write a 64bit value to PHY registers + * @hw: pointer to the HW struct + * @port: PHY port to read from + * @low_addr: offset of the lower register to read from + * @val: the contents of the 64bit value to write to PHY + * + * Write the 64bit value to the two associated 32bit PHY registers. The offset + * is always specified as the lower register, and the high address is looked + * up. This function only operates on registers known to be two parts of + * a 64bit value. + */ +static int +ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) +{ + u32 low, high; + u16 high_addr; + int status; + + /* Only operate on registers known to be split into two 32bit + * registers. + */ + if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) { + ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n", + low_addr); + return -EINVAL; + } + + low = lower_32_bits(val); + high = upper_32_bits(val); + + status = ice_write_phy_reg_e822(hw, port, low_addr, low); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, status %d", + low_addr, status); + return status; + } + + status = ice_write_phy_reg_e822(hw, port, high_addr, high); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, status %d", + high_addr, status); + return status; + } + + return 0; +} + /** * ice_fill_quad_msg_e822 - Fill message data for quad register access * @msg: the PHY message buffer to fill in @@ -221,20 +922,25 @@ ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val) * Fill a message buffer for accessing a register in a quad shared between * multiple PHYs. */ -static void +static int ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset) { u32 addr; + if (quad >= ICE_MAX_QUAD) + return -EINVAL; + msg->dest_dev = rmn_0; - if ((quad % ICE_NUM_QUAD_TYPE) == 0) + if ((quad % ICE_QUADS_PER_PHY_E822) == 0) addr = Q_0_BASE + offset; else addr = Q_1_BASE + offset; msg->msg_addr_low = ICE_LO_WORD(addr); msg->msg_addr_high = ICE_HI_WORD(addr); + + return 0; } /** @@ -248,32 +954,31 @@ ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset) * Read a quad register over the device sideband queue. Quad registers are * shared between multiple PHYs. */ -static enum ice_status +static int ice_read_quad_reg_e822_lp(struct ice_hw *hw, u8 quad, u16 offset, u32 *val, bool lock_sbq) { struct ice_sbq_msg_input msg = {0}; - enum ice_status status; + int status; - if (quad >= ICE_MAX_QUAD) - return ICE_ERR_PARAM; + status = ice_fill_quad_msg_e822(&msg, quad, offset); + if (status) + goto exit_err; - ice_fill_quad_msg_e822(&msg, quad, offset); msg.opcode = ice_sbq_msg_rd; status = ice_sbq_rw_reg_lp(hw, &msg, lock_sbq); - if (status) { +exit_err: + if (status) ice_debug(hw, ICE_DBG_PTP, "Failed to send message to phy, status %d\n", status); - return status; - } + else + *val = msg.data; - *val = msg.data; - - return 0; + return status; } -enum ice_status +int ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val) { return ice_read_quad_reg_e822_lp(hw, quad, offset, val, true); @@ -290,31 +995,30 @@ ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val) * Write a quad register over the device sideband queue. Quad registers are * shared between multiple PHYs. */ -static enum ice_status +static int ice_write_quad_reg_e822_lp(struct ice_hw *hw, u8 quad, u16 offset, u32 val, bool lock_sbq) { struct ice_sbq_msg_input msg = {0}; - enum ice_status status; + int status; - if (quad >= ICE_MAX_QUAD) - return ICE_ERR_PARAM; + status = ice_fill_quad_msg_e822(&msg, quad, offset); + if (status) + goto exit_err; - ice_fill_quad_msg_e822(&msg, quad, offset); msg.opcode = ice_sbq_msg_wr; msg.data = val; status = ice_sbq_rw_reg_lp(hw, &msg, lock_sbq); - if (status) { +exit_err: + if (status) ice_debug(hw, ICE_DBG_PTP, "Failed to send message to phy, status %d\n", status); - return status; - } - return 0; + return status; } -enum ice_status +int ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val) { return ice_write_quad_reg_e822_lp(hw, quad, offset, val, true); @@ -331,11 +1035,11 @@ ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val) * quad memory block that is shared between the internal PHYs of the E822 * family of devices. */ -static enum ice_status +static int ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) { - enum ice_status status; u16 lo_addr, hi_addr; + int status; u32 lo, hi; lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx); @@ -365,81 +1069,157 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) } /** - * ice_clear_phy_tstamp_e822 - Clear a timestamp from the quad block + * ice_clear_phy_tstamp_e822 - Drop a timestamp from the quad block * @hw: pointer to the HW struct * @quad: the quad to read from * @idx: the timestamp index to reset * - * Clear a timestamp, resetting its valid bit, from the PHY quad block that is - * shared between the internal PHYs on the E822 devices. + * Read the timetamp out of the quad to clear its timestamp status bit from + * the PHY quad block that is shared between the internal PHYs of the E822 + * devices. + * + * Note that software cannot directly write the quad memory bank registers, + * and must use ice_ptp_reset_ts_memory_quad_e822 for that purpose. + * + * This function should only be called on an idx whose bit is set according to + * ice_get_phy_tx_tstamp_ready. */ -static enum ice_status +static int ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx) { - enum ice_status status; - u16 lo_addr, hi_addr; + u64 unused_tstamp; + int status; - lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx); - hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx); - - status = ice_write_quad_reg_e822(hw, quad, lo_addr, 0); + status = ice_read_phy_tstamp_e822(hw, quad, idx, &unused_tstamp); if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, status %d\n", - status); - return status; - } - - status = ice_write_quad_reg_e822(hw, quad, hi_addr, 0); - if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, status %d\n", - status); + ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for quad %u, idx %u, status %d\n", + quad, idx, status); return status; } return 0; } +/** + * ice_ptp_reset_ts_memory_quad_e822 - Clear all timestamps from the quad block + * @hw: pointer to the HW struct + * @quad: the quad to read from + * + * Clear all timestamps from the PHY quad block that is shared between the + * internal PHYs on the E822 devices. + */ +void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad) +{ + ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M); + ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M); +} + +/** + * ice_ptp_reset_ts_memory_e822 - Clear all timestamps from all quad blocks + * @hw: pointer to the HW struct + */ +static void ice_ptp_reset_ts_memory_e822(struct ice_hw *hw) +{ + unsigned int quad; + + for (quad = 0; quad < ICE_MAX_QUAD; quad++) { + ice_ptp_reset_ts_memory_quad_e822(hw, quad); + } +} + +/** + * ice_ptp_set_vernier_wl - Set the window length for vernier calibration + * @hw: pointer to the HW struct + * + * Set the window length used for the vernier port calibration process. + */ +int ice_ptp_set_vernier_wl(struct ice_hw *hw) +{ + u8 port; + + for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { + int status; + + status = ice_write_phy_reg_e822_lp(hw, port, P_REG_WL, + PTP_VERNIER_WL, true); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, status %d\n", + port, status); + return status; + } + } + + return 0; +} + +/** + * ice_ptp_init_phc_e822 - Perform E822 specific PHC initialization + * @hw: pointer to HW struct + * + * Perform PHC initialization steps specific to E822 devices. + */ +static int ice_ptp_init_phc_e822(struct ice_hw *hw) +{ + int status; + u32 regval; + + /* Enable reading switch and PHY registers over the sideband queue */ +#define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1) +#define PF_SB_REM_DEV_CTL_PHY0 BIT(2) + regval = rd32(hw, PF_SB_REM_DEV_CTL); + regval |= (PF_SB_REM_DEV_CTL_SWITCH_READ | + PF_SB_REM_DEV_CTL_PHY0); + wr32(hw, PF_SB_REM_DEV_CTL, regval); + + /* Initialize the Clock Generation Unit */ + status = ice_init_cgu_e822(hw); + if (status) + return status; + + /* Enable CGU error reporting */ + status = ice_ptp_cgu_err_reporting(hw, true); + if (status) + return status; + + /* Set window length for all the ports */ + return ice_ptp_set_vernier_wl(hw); +} + /** * ice_ptp_prep_phy_time_e822 - Prepare PHY port with initial time * @hw: pointer to the HW struct * @time: Time to initialize the PHY port clocks to * * Program the PHY port registers with a new initial time value. The port - * clock will be initialized once the driver issues an INIT_TIME sync + * clock will be initialized once the driver issues an ICE_PTP_INIT_TIME sync * command. The time value is the upper 32 bits of the PHY timer, usually in * units of nominal nanoseconds. */ -static enum ice_status +static int ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time) { - enum ice_status status; + u64 phy_time; + int status; u8 port; + /* The time represents the upper 32 bits of the PHY timer, so we need + * to shift to account for this when programming. + */ + phy_time = (u64)time << 32; + for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { /* Tx case */ - status = ice_write_phy_reg_e822_lp(hw, port, - P_REG_TX_TIMER_INC_PRE_L, - 0, true); - if (status) - goto exit_err; - - status = ice_write_phy_reg_e822_lp(hw, port, - P_REG_TX_TIMER_INC_PRE_U, - time, true); + status = ice_write_64b_phy_reg_e822(hw, port, + P_REG_TX_TIMER_INC_PRE_L, + phy_time); if (status) goto exit_err; /* Rx case */ - status = ice_write_phy_reg_e822_lp(hw, port, - P_REG_RX_TIMER_INC_PRE_L, - 0, true); - if (status) - goto exit_err; - - status = ice_write_phy_reg_e822_lp(hw, port, - P_REG_RX_TIMER_INC_PRE_U, - time, true); + status = ice_write_64b_phy_reg_e822(hw, port, + P_REG_RX_TIMER_INC_PRE_L, + phy_time); if (status) goto exit_err; } @@ -463,19 +1243,19 @@ exit_err: * * Program the port for an atomic adjustment by writing the Tx and Rx timer * registers. The atomic adjustment won't be completed until the driver issues - * an ADJ_TIME command. + * an ICE_PTP_ADJ_TIME command. * * Note that time is not in units of nanoseconds. It is in clock time * including the lower sub-nanosecond portion of the port timer. * * Negative adjustments are supported using 2s complement arithmetic. */ -enum ice_status +int ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time, bool lock_sbq) { - enum ice_status status; u32 l_time, u_time; + int status; l_time = lower_32_bits(time); u_time = upper_32_bits(time); @@ -519,9 +1299,9 @@ exit_err: * * Prepare the PHY ports for an atomic time adjustment by programming the PHY * Tx and Rx port registers. The actual adjustment is completed by issuing an - * ADJ_TIME or ADJ_TIME_AT_TIME sync command. + * ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command. */ -static enum ice_status +static int ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj, bool lock_sbq) { s64 cycles; @@ -537,8 +1317,7 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj, bool lock_sbq) cycles = -(((s64)-adj) << 32); for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { - enum ice_status status; - + int status; status = ice_ptp_prep_port_adj_e822(hw, port, cycles, lock_sbq); @@ -554,32 +1333,19 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj, bool lock_sbq) * @hw: pointer to HW struct * @incval: new increment value to prepare * - * Prepare each of the PHY ports for a new increment value by programming each + * Prepare each of the PHY ports for a new increment value by programming the * port's TIMETUS registers. The new increment value will be updated after - * issuing an INIT_INCVAL command. + * issuing an ICE_PTP_INIT_INCVAL command. */ -static enum ice_status +static int ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval) { - enum ice_status status; - u32 high, low; + int status; u8 port; - /* The PHY registers for the increment value divide the lower 8 bits - * into the first low register, and the next 32 bits into the second - * high register. - */ - low = (u32)(incval & P_REG_TIMETUS_LOW_M); - high = (u32)(incval >> P_REG_TIMETUS_HIGH_S); - for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { - status = ice_write_phy_reg_e822_lp(hw, port, P_REG_TIMETUS_L, - low, true); - if (status) - goto exit_err; - - status = ice_write_phy_reg_e822_lp(hw, port, P_REG_TIMETUS_U, - high, true); + status = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, + incval); if (status) goto exit_err; } @@ -601,30 +1367,20 @@ exit_err: * * Read the time_clk_cyc increment value for a given PHY port. */ -enum ice_status +int ice_ptp_read_phy_incval_e822(struct ice_hw *hw, u8 port, u64 *incval) { - enum ice_status status; - u32 high, low; + int status; - status = ice_read_phy_reg_e822_lp(hw, port, P_REG_TIMETUS_L, - &low, true); + status = ice_read_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, incval); if (status) { ice_debug(hw, ICE_DBG_PTP, "Failed to read TIMETUS_L, status %d\n", status); return status; } - status = ice_read_phy_reg_e822_lp(hw, port, P_REG_TIMETUS_U, - &high, true); - if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to read TIMETUS_U, status %d\n", - status); - return status; - } - - *incval = high << P_REG_TIMETUS_HIGH_S | (low & P_REG_TIMETUS_LOW_M); - ice_debug(hw, ICE_DBG_PTP, "read INCVAL = 0x%08x%08x\n", high, low); + ice_debug(hw, ICE_DBG_PTP, "read INCVAL = 0x%016llx\n", + (unsigned long long)*incval); return 0; } @@ -635,17 +1391,17 @@ ice_ptp_read_phy_incval_e822(struct ice_hw *hw, u8 port, u64 *incval) * @target_time: target time to program * * Program the PHY port Tx and Rx TIMER_CNT_ADJ registers used for the - * ADJ_TIME_AT_TIME command. This should be used in conjunction with + * ICE_PTP_ADJ_TIME_AT_TIME command. This should be used in conjunction with * ice_ptp_prep_phy_adj_e822 to program an atomic adjustment that is * delayed until a specified target time. * * Note that a target time adjustment is not currently supported on E810 * devices. */ -static enum ice_status +static int ice_ptp_prep_phy_adj_target_e822(struct ice_hw *hw, u32 target_time) { - enum ice_status status; + int status; u8 port; for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { @@ -689,7 +1445,7 @@ exit_err: } /** - * ice_ptp_read_port_capture - Read a port's local time capture + * ice_ptp_read_port_capture_e822 - Read a port's local time capture * @hw: pointer to HW struct * @port: Port number to read * @tx_ts: on return, the Tx port time capture @@ -699,57 +1455,39 @@ exit_err: * * Note this has no equivalent for the E810 devices. */ -enum ice_status -ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts) +int +ice_ptp_read_port_capture_e822(struct ice_hw *hw, u8 port, u64 *tx_ts, + u64 *rx_ts) { - enum ice_status status; - u32 high, low; + int status; /* Tx case */ - status = ice_read_phy_reg_e822_lp(hw, port, P_REG_TX_CAPTURE_L, - &low, true); + status = ice_read_64b_phy_reg_e822(hw, port, P_REG_TX_CAPTURE_L, tx_ts); if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE_L, status %d\n", + ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, status %d\n", status); return status; } - status = ice_read_phy_reg_e822_lp(hw, port, P_REG_TX_CAPTURE_U, - &high, true); - if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_CAPTURE_U, status %d\n", - status); - return status; - } - - *tx_ts = (u64)high << 32 | low; - ice_debug(hw, ICE_DBG_PTP, "tx_init = 0x%016llx\n", *tx_ts); + ice_debug(hw, ICE_DBG_PTP, "tx_init = 0x%016llx\n", + (unsigned long long)*tx_ts); /* Rx case */ - status = ice_read_phy_reg_e822_lp(hw, port, P_REG_RX_CAPTURE_L, - &low, true); + status = ice_read_64b_phy_reg_e822(hw, port, P_REG_RX_CAPTURE_L, rx_ts); if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE_L, status %d\n", + ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, status %d\n", status); return status; } - status = ice_read_phy_reg_e822_lp(hw, port, P_REG_RX_CAPTURE_U, - &high, true); - if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE_U, status %d\n", - status); - return status; - } - - *rx_ts = (u64)high << 32 | low; - ice_debug(hw, ICE_DBG_PTP, "rx_init = 0x%016llx\n", *rx_ts); + ice_debug(hw, ICE_DBG_PTP, "rx_init = 0x%016llx\n", + (unsigned long long)*rx_ts); return 0; } /** - * ice_ptp_one_port_cmd - Prepare a single PHY port for a timer command + * ice_ptp_one_port_cmd_e822 - Prepare a single PHY port for a timer command * @hw: pointer to HW struct * @port: Port to which cmd has to be sent * @cmd: Command to be sent to the port @@ -760,35 +1498,35 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts) * Note there is no equivalent of this operation on E810, as that device * always handles all external PHYs internally. */ -enum ice_status -ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd, - bool lock_sbq) +int +ice_ptp_one_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd, + bool lock_sbq) { - enum ice_status status; u32 cmd_val, val; + int status; u8 tmr_idx; tmr_idx = ice_get_ptp_src_clock_index(hw); cmd_val = tmr_idx << SEL_PHY_SRC; switch (cmd) { - case INIT_TIME: + case ICE_PTP_INIT_TIME: cmd_val |= PHY_CMD_INIT_TIME; break; - case INIT_INCVAL: + case ICE_PTP_INIT_INCVAL: cmd_val |= PHY_CMD_INIT_INCVAL; break; - case ADJ_TIME: + case ICE_PTP_ADJ_TIME: cmd_val |= PHY_CMD_ADJ_TIME; break; - case ADJ_TIME_AT_TIME: + case ICE_PTP_ADJ_TIME_AT_TIME: cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME; break; - case READ_TIME: + case ICE_PTP_READ_TIME: cmd_val |= PHY_CMD_READ_TIME; break; default: dev_warn(ice_hw_to_dev(hw), "Unknown timer command %u\n", cmd); - return ICE_ERR_PARAM; + return -EINVAL; } /* Tx case */ @@ -847,17 +1585,16 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd, * Prepare all ports connected to this device for an upcoming timer sync * command. */ -static enum ice_status +static int ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd, bool lock_sbq) { u8 port; for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { - enum ice_status status; + int status; - - status = ice_ptp_one_port_cmd(hw, port, cmd, lock_sbq); + status = ice_ptp_one_port_cmd_e822(hw, port, cmd, lock_sbq); if (status) return status; } @@ -872,32 +1609,6 @@ ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd, * port. */ -/** - * ice_ptp_set_vernier_wl - Set the window length for vernier calibration - * @hw: pointer to the HW struct - * - * Set the window length used for the vernier port calibration process. - */ -enum ice_status ice_ptp_set_vernier_wl(struct ice_hw *hw) -{ - u8 port; - - for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { - enum ice_status status; - - - status = ice_write_phy_reg_e822_lp(hw, port, P_REG_WL, - PTP_VERNIER_WL, true); - if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, status %d\n", - port, status); - return status; - } - } - - return 0; -} - /** * ice_phy_get_speed_and_fec_e822 - Get link speed and FEC based on serdes mode * @hw: pointer to HW struct @@ -908,14 +1619,14 @@ enum ice_status ice_ptp_set_vernier_wl(struct ice_hw *hw) * Read the serdes data for the PHY port and extract the link speed and FEC * algorithm. */ -enum ice_status +int ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port, enum ice_ptp_link_spd *link_out, enum ice_ptp_fec_mode *fec_out) { enum ice_ptp_link_spd link; enum ice_ptp_fec_mode fec; - enum ice_status status; + int status; u32 serdes; status = ice_read_phy_reg_e822(hw, port, P_REG_LINK_SPEED, &serdes); @@ -942,7 +1653,7 @@ ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port, link = ICE_PTP_LNK_SPD_100G_RS; break; default: - return ICE_ERR_OUT_OF_RANGE; + return -EIO; } } else { switch (serdes) { @@ -962,7 +1673,7 @@ ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port, link = ICE_PTP_LNK_SPD_50G; break; default: - return ICE_ERR_OUT_OF_RANGE; + return -EIO; } } @@ -982,11 +1693,9 @@ ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port, void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port) { enum ice_ptp_link_spd link_spd; - enum ice_status status; - int quad; + int status; u32 val; - - quad = port / ICE_PORTS_PER_QUAD; + u8 quad; status = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, NULL); if (status) { @@ -995,6 +1704,8 @@ void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port) return; } + quad = port / ICE_PORTS_PER_QUAD; + status = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val); if (status) { ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEM_GLB_CFG, status %d\n", @@ -1015,6 +1726,1021 @@ void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port) } } +/** + * ice_phy_cfg_uix_e822 - Configure Serdes UI to TU conversion for E822 + * @hw: pointer to the HW structure + * @port: the port to configure + * + * Program the conversion ration of Serdes clock "unit intervals" (UIs) to PHC + * hardware clock time units (TUs). That is, determine the number of TUs per + * serdes unit interval, and program the UIX registers with this conversion. + * + * This conversion is used as part of the calibration process when determining + * the additional error of a timestamp vs the real time of transmission or + * receipt of the packet. + * + * Hardware uses the number of TUs per 66 UIs, written to the UIX registers + * for the two main serdes clock rates, 10G/40G and 25G/100G serdes clocks. + * + * To calculate the conversion ratio, we use the following facts: + * + * a) the clock frequency in Hz (cycles per second) + * b) the number of TUs per cycle (the increment value of the clock) + * c) 1 second per 1 billion nanoseconds + * d) the duration of 66 UIs in nanoseconds + * + * Given these facts, we can use the following table to work out what ratios + * to multiply in order to get the number of TUs per 66 UIs: + * + * cycles | 1 second | incval (TUs) | nanoseconds + * -------+--------------+--------------+------------- + * second | 1 billion ns | cycle | 66 UIs + * + * To perform the multiplication using integers without too much loss of + * precision, we can take use the following equation: + * + * (freq * incval * 6600 LINE_UI ) / ( 100 * 1 billion) + * + * We scale up to using 6600 UI instead of 66 in order to avoid fractional + * nanosecond UIs (66 UI at 10G/40G is 6.4 ns) + * + * The increment value has a maximum expected range of about 34 bits, while + * the frequency value is about 29 bits. Multiplying these values shouldn't + * overflow the 64 bits. However, we must then further multiply them again by + * the Serdes unit interval duration. To avoid overflow here, we split the + * overall divide by 1e11 into a divide by 256 (shift down by 8 bits) and + * a divide by 390,625,000. This does lose some precision, but avoids + * miscalculation due to arithmetic overflow. + */ +static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port) +{ + u64 cur_freq, clk_incval, tu_per_sec, uix; + int status; + + cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); + clk_incval = ice_ptp_read_src_incval(hw); + + /* Calculate TUs per second divided by 256 */ + tu_per_sec = (cur_freq * clk_incval) >> 8; + +#define LINE_UI_10G_40G 640 /* 6600 UIs is 640 nanoseconds at 10Gb/40Gb */ +#define LINE_UI_25G_100G 256 /* 6600 UIs is 256 nanoseconds at 25Gb/100Gb */ + + /* Program the 10Gb/40Gb conversion ratio */ + uix = div64_u64(tu_per_sec * LINE_UI_10G_40G, 390625000); + + status = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_10G_40G_L, + uix); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_10G_40G, status %d\n", + status); + return status; + } + + /* Program the 25Gb/100Gb conversion ratio */ + uix = div64_u64(tu_per_sec * LINE_UI_25G_100G, 390625000); + + status = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_25G_100G_L, + uix); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_25G_100G, status %d\n", + status); + return status; + } + + return 0; +} + +/** + * ice_phy_cfg_parpcs_e822 - Configure TUs per PAR/PCS clock cycle + * @hw: pointer to the HW struct + * @port: port to configure + * + * Configure the number of TUs for the PAR and PCS clocks used as part of the + * timestamp calibration process. This depends on the link speed, as the PHY + * uses different markers depending on the speed. + * + * 1Gb/10Gb/25Gb: + * - Tx/Rx PAR/PCS markers + * + * 25Gb RS: + * - Tx/Rx Reed Solomon gearbox PAR/PCS markers + * + * 40Gb/50Gb: + * - Tx/Rx PAR/PCS markers + * - Rx Deskew PAR/PCS markers + * + * 50G RS and 100GB RS: + * - Tx/Rx Reed Solomon gearbox PAR/PCS markers + * - Rx Deskew PAR/PCS markers + * - Tx PAR/PCS markers + * + * To calculate the conversion, we use the PHC clock frequency (cycles per + * second), the increment value (TUs per cycle), and the related PHY clock + * frequency to calculate the TUs per unit of the PHY link clock. The + * following table shows how the units convert: + * + * cycles | TUs | second + * -------+-------+-------- + * second | cycle | cycles + * + * For each conversion register, look up the appropriate frequency from the + * e822 PAR/PCS table and calculate the TUs per unit of that clock. Program + * this to the appropriate register, preparing hardware to perform timestamp + * calibration to calculate the total Tx or Rx offset to adjust the timestamp + * in order to calibrate for the internal PHY delays. + * + * Note that the increment value ranges up to ~34 bits, and the clock + * frequency is ~29 bits, so multiplying them together should fit within the + * 64 bit arithmetic. + */ +static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) +{ + u64 cur_freq, clk_incval, tu_per_sec, phy_tus; + enum ice_ptp_link_spd link_spd; + enum ice_ptp_fec_mode fec_mode; + int status; + + status = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode); + if (status) + return status; + + cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); + clk_incval = ice_ptp_read_src_incval(hw); + + /* Calculate TUs per cycle of the PHC clock */ + tu_per_sec = cur_freq * clk_incval; + + /* For each PHY conversion register, look up the appropriate link + * speed frequency and determine the TUs per that clock's cycle time. + * Split this into a high and low value and then program the + * appropriate register. If that link speed does not use the + * associated register, write zeros to clear it instead. + */ + + /* P_REG_PAR_TX_TUS */ + if (e822_vernier[link_spd].tx_par_clk) + phy_tus = div64_u64(tu_per_sec, + e822_vernier[link_spd].tx_par_clk); + else + phy_tus = 0; + + status = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_TX_TUS_L, + phy_tus); + if (status) + return status; + + /* P_REG_PAR_RX_TUS */ + if (e822_vernier[link_spd].rx_par_clk) + phy_tus = div64_u64(tu_per_sec, + e822_vernier[link_spd].rx_par_clk); + else + phy_tus = 0; + + status = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_RX_TUS_L, + phy_tus); + if (status) + return status; + + /* P_REG_PCS_TX_TUS */ + if (e822_vernier[link_spd].tx_pcs_clk) + phy_tus = div64_u64(tu_per_sec, + e822_vernier[link_spd].tx_pcs_clk); + else + phy_tus = 0; + + status = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_TX_TUS_L, + phy_tus); + if (status) + return status; + + /* P_REG_PCS_RX_TUS */ + if (e822_vernier[link_spd].rx_pcs_clk) + phy_tus = div64_u64(tu_per_sec, + e822_vernier[link_spd].rx_pcs_clk); + else + phy_tus = 0; + + status = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_RX_TUS_L, + phy_tus); + if (status) + return status; + + /* P_REG_DESK_PAR_TX_TUS */ + if (e822_vernier[link_spd].tx_desk_rsgb_par) + phy_tus = div64_u64(tu_per_sec, + e822_vernier[link_spd].tx_desk_rsgb_par); + else + phy_tus = 0; + + status = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_TX_TUS_L, + phy_tus); + if (status) + return status; + + /* P_REG_DESK_PAR_RX_TUS */ + if (e822_vernier[link_spd].rx_desk_rsgb_par) + phy_tus = div64_u64(tu_per_sec, + e822_vernier[link_spd].rx_desk_rsgb_par); + else + phy_tus = 0; + + status = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_RX_TUS_L, + phy_tus); + if (status) + return status; + + /* P_REG_DESK_PCS_TX_TUS */ + if (e822_vernier[link_spd].tx_desk_rsgb_pcs) + phy_tus = div64_u64(tu_per_sec, + e822_vernier[link_spd].tx_desk_rsgb_pcs); + else + phy_tus = 0; + + status = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_TX_TUS_L, + phy_tus); + if (status) + return status; + + /* P_REG_DESK_PCS_RX_TUS */ + if (e822_vernier[link_spd].rx_desk_rsgb_pcs) + phy_tus = div64_u64(tu_per_sec, + e822_vernier[link_spd].rx_desk_rsgb_pcs); + else + phy_tus = 0; + + return ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_RX_TUS_L, + phy_tus); +} + +/** + * ice_calc_fixed_tx_offset_e822 - Calculated Fixed Tx offset for a port + * @hw: pointer to the HW struct + * @link_spd: the Link speed to calculate for + * + * Calculate the fixed offset due to known static latency data. + */ +static u64 +ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) +{ + u64 cur_freq, clk_incval, tu_per_sec, fixed_offset; + + cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); + clk_incval = ice_ptp_read_src_incval(hw); + + /* Calculate TUs per second */ + tu_per_sec = cur_freq * clk_incval; + + /* Calculate number of TUs to add for the fixed Tx latency. Since the + * latency measurement is in 1/100th of a nanosecond, we need to + * multiply by tu_per_sec and then divide by 1e11. This calculation + * overflows 64 bit integer arithmetic, so break it up into two + * divisions by 1e4 first then by 1e7. + */ + fixed_offset = div64_u64(tu_per_sec, 10000); + fixed_offset *= e822_vernier[link_spd].tx_fixed_delay; + fixed_offset = div64_u64(fixed_offset, 10000000); + + return fixed_offset; +} + +/** + * ice_phy_cfg_tx_offset_e822 - Configure total Tx timestamp offset + * @hw: pointer to the HW struct + * @port: the PHY port to configure + * + * Program the P_REG_TOTAL_TX_OFFSET register with the total number of TUs to + * adjust Tx timestamps by. This is calculated by combining some known static + * latency along with the Vernier offset computations done by hardware. + * + * This function must be called only after the offset registers are valid, + * i.e. after the Vernier calibration wait has passed, to ensure that the PHY + * has measured the offset. + * + * To avoid overflow, when calculating the offset based on the known static + * latency values, we use measurements in 1/100th of a nanosecond, and divide + * the TUs per second up front. This avoids overflow while allowing + * calculation of the adjustment using integer arithmetic. + */ +int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) +{ + enum ice_ptp_link_spd link_spd; + enum ice_ptp_fec_mode fec_mode; + u64 total_offset, val; + int status; + + status = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode); + if (status) + return status; + + total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd); + + /* Read the first Vernier offset from the PHY register and add it to + * the total offset. + */ + if (link_spd == ICE_PTP_LNK_SPD_1G || + link_spd == ICE_PTP_LNK_SPD_10G || + link_spd == ICE_PTP_LNK_SPD_25G || + link_spd == ICE_PTP_LNK_SPD_25G_RS || + link_spd == ICE_PTP_LNK_SPD_40G || + link_spd == ICE_PTP_LNK_SPD_50G) { + status = ice_read_64b_phy_reg_e822(hw, port, + P_REG_PAR_PCS_TX_OFFSET_L, + &val); + if (status) + return status; + + total_offset += val; + } + + /* For Tx, we only need to use the second Vernier offset for + * multi-lane link speeds with RS-FEC. The lanes will always be + * aligned. + */ + if (link_spd == ICE_PTP_LNK_SPD_50G_RS || + link_spd == ICE_PTP_LNK_SPD_100G_RS) { + status = ice_read_64b_phy_reg_e822(hw, port, + P_REG_PAR_TX_TIME_L, + &val); + if (status) + return status; + + total_offset += val; + } + + /* Now that the total offset has been calculated, program it to the + * PHY and indicate that the Tx offset is ready. After this, + * timestamps will be enabled. + */ + status = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L, + total_offset); + if (status) + return status; + + status = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1); + if (status) + return status; + + return 0; +} + +/** + * ice_phy_calc_pmd_adj_e822 - Calculate PMD adjustment for Rx + * @hw: pointer to the HW struct + * @port: the PHY port to adjust for + * @link_spd: the current link speed of the PHY + * @fec_mode: the current FEC mode of the PHY + * @pmd_adj: on return, the amount to adjust the Rx total offset by + * + * Calculates the adjustment to Rx timestamps due to PMD alignment in the PHY. + * This varies by link speed and FEC mode. The value calculated accounts for + * various delays caused when receiving a packet. + */ +static int +ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port, + enum ice_ptp_link_spd link_spd, + enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj) +{ + u64 cur_freq, clk_incval, tu_per_sec, mult, adj; + u32 pmd_adj_divisor, val; + u8 pmd_align; + int status; + + status = ice_read_phy_reg_e822(hw, port, P_REG_PMD_ALIGNMENT, &val); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read PMD alignment, status %d\n", + status); + return status; + } + + pmd_align = (u8)val; + + cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); + clk_incval = ice_ptp_read_src_incval(hw); + + /* Calculate TUs per second */ + tu_per_sec = cur_freq * clk_incval; + + /* Get the link speed dependent PMD adjustment divisor */ + pmd_adj_divisor = e822_vernier[link_spd].pmd_adj_divisor; + + /* The PMD alignment adjustment measurement depends on the link speed, + * and whether FEC is enabled. For each link speed, the alignment + * adjustment is calculated by dividing a value by the length of + * a Time Unit in nanoseconds. + * + * 1G: align == 4 ? 10 * 0.8 : (align + 6 % 10) * 0.8 + * 10G: align == 65 ? 0 : (align * 0.1 * 32/33) + * 10G w/FEC: align * 0.1 * 32/33 + * 25G: align == 65 ? 0 : (align * 0.4 * 32/33) + * 25G w/FEC: align * 0.4 * 32/33 + * 40G: align == 65 ? 0 : (align * 0.1 * 32/33) + * 40G w/FEC: align * 0.1 * 32/33 + * 50G: align == 65 ? 0 : (align * 0.4 * 32/33) + * 50G w/FEC: align * 0.8 * 32/33 + * + * For RS-FEC, if align is < 17 then we must also add 1.6 * 32/33. + * + * To allow for calculating this value using integer arithmetic, we + * instead start with the number of TUs per second, (inverse of the + * length of a Time Unit in nanoseconds), multiply by a value based + * on the PMD alignment register, and then divide by the right value + * calculated based on the table above. To avoid integer overflow this + * division is broken up into a step of dividing by 125 first. + */ + if (link_spd == ICE_PTP_LNK_SPD_1G) { + if (pmd_align == 4) + mult = 10; + else + mult = (pmd_align + 6) % 10; + } else if (link_spd == ICE_PTP_LNK_SPD_10G || + link_spd == ICE_PTP_LNK_SPD_25G || + link_spd == ICE_PTP_LNK_SPD_40G || + link_spd == ICE_PTP_LNK_SPD_50G) { + /* If Clause 74 FEC, always calculate PMD adjust */ + if (pmd_align != 65 || fec_mode == ICE_PTP_FEC_MODE_CLAUSE74) + mult = pmd_align; + else + mult = 0; + } else if (link_spd == ICE_PTP_LNK_SPD_25G_RS || + link_spd == ICE_PTP_LNK_SPD_50G_RS || + link_spd == ICE_PTP_LNK_SPD_100G_RS) { + if (pmd_align < 17) + mult = pmd_align + 40; + else + mult = pmd_align; + } else { + ice_debug(hw, ICE_DBG_PTP, "Unknown link speed %d, skipping PMD adjustment\n", + link_spd); + mult = 0; + } + + /* In some cases, there's no need to adjust for the PMD alignment */ + if (!mult) { + *pmd_adj = 0; + return 0; + } + + /* Calculate the adjustment by multiplying TUs per second by the + * appropriate multiplier and divisor. To avoid overflow, we first + * divide by 125, and then handle remaining divisor based on the link + * speed pmd_adj_divisor value. + */ + adj = div64_u64(tu_per_sec, 125); + adj *= mult; + adj = div64_u64(adj, pmd_adj_divisor); + + /* Finally, for 25G-RS and 50G-RS, a further adjustment for the Rx + * cycle count is necessary. + */ + if (link_spd == ICE_PTP_LNK_SPD_25G_RS) { + u64 cycle_adj; + u8 rx_cycle; + + status = ice_read_phy_reg_e822(hw, port, P_REG_RX_40_TO_160_CNT, + &val); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read 25G-RS Rx cycle count, status %d\n", + status); + return status; + } + + rx_cycle = val & P_REG_RX_40_TO_160_CNT_RXCYC_M; + if (rx_cycle) { + mult = (4 - rx_cycle) * 40; + + cycle_adj = div64_u64(tu_per_sec, 125); + cycle_adj *= mult; + cycle_adj = div64_u64(cycle_adj, pmd_adj_divisor); + + adj += cycle_adj; + } + } else if (link_spd == ICE_PTP_LNK_SPD_50G_RS) { + u64 cycle_adj; + u8 rx_cycle; + + status = ice_read_phy_reg_e822(hw, port, P_REG_RX_80_TO_160_CNT, + &val); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read 50G-RS Rx cycle count, status %d\n", + status); + return status; + } + + rx_cycle = val & P_REG_RX_80_TO_160_CNT_RXCYC_M; + if (rx_cycle) { + mult = rx_cycle * 40; + + cycle_adj = div64_u64(tu_per_sec, 125); + cycle_adj *= mult; + cycle_adj = div64_u64(cycle_adj, pmd_adj_divisor); + + adj += cycle_adj; + } + } + + /* Return the calculated adjustment */ + *pmd_adj = adj; + + return 0; +} + +/** + * ice_calc_fixed_rx_offset_e822 - Calculated the fixed Rx offset for a port + * @hw: pointer to HW struct + * @link_spd: The Link speed to calculate for + * + * Determine the fixed Rx latency for a given link speed. + */ +static u64 +ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) +{ + u64 cur_freq, clk_incval, tu_per_sec, fixed_offset; + + cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); + clk_incval = ice_ptp_read_src_incval(hw); + + /* Calculate TUs per second */ + tu_per_sec = cur_freq * clk_incval; + + /* Calculate number of TUs to add for the fixed Rx latency. Since the + * latency measurement is in 1/100th of a nanosecond, we need to + * multiply by tu_per_sec and then divide by 1e11. This calculation + * overflows 64 bit integer arithmetic, so break it up into two + * divisions by 1e4 first then by 1e7. + */ + fixed_offset = div64_u64(tu_per_sec, 10000); + fixed_offset *= e822_vernier[link_spd].rx_fixed_delay; + fixed_offset = div64_u64(fixed_offset, 10000000); + + return fixed_offset; +} + +/** + * ice_phy_cfg_rx_offset_e822 - Configure total Rx timestamp offset + * @hw: pointer to the HW struct + * @port: the PHY port to configure + * + * Program the P_REG_TOTAL_RX_OFFSET register with the number of Time Units to + * adjust Rx timestamps by. This combines calculations from the Vernier offset + * measurements taken in hardware with some data about known fixed delay as + * well as adjusting for multi-lane alignment delay. + * + * This function must be called only after the offset registers are valid, + * i.e. after the Vernier calibration wait has passed, to ensure that the PHY + * has measured the offset. + * + * To avoid overflow, when calculating the offset based on the known static + * latency values, we use measurements in 1/100th of a nanosecond, and divide + * the TUs per second up front. This avoids overflow while allowing + * calculation of the adjustment using integer arithmetic. + */ +int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) +{ + enum ice_ptp_link_spd link_spd; + enum ice_ptp_fec_mode fec_mode; + u64 total_offset, pmd, val; + int status; + + status = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode); + if (status) + return status; + + total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd); + + /* Read the first Vernier offset from the PHY register and add it to + * the total offset. + */ + status = ice_read_64b_phy_reg_e822(hw, port, + P_REG_PAR_PCS_RX_OFFSET_L, + &val); + if (status) + return status; + + total_offset += val; + + /* For Rx, all multi-lane link speeds include a second Vernier + * calibration, because the lanes might not be aligned. + */ + if (link_spd == ICE_PTP_LNK_SPD_40G || + link_spd == ICE_PTP_LNK_SPD_50G || + link_spd == ICE_PTP_LNK_SPD_50G_RS || + link_spd == ICE_PTP_LNK_SPD_100G_RS) { + status = ice_read_64b_phy_reg_e822(hw, port, + P_REG_PAR_RX_TIME_L, + &val); + if (status) + return status; + + total_offset += val; + } + + /* In addition, Rx must account for the PMD alignment */ + status = ice_phy_calc_pmd_adj_e822(hw, port, link_spd, fec_mode, &pmd); + if (status) + return status; + + /* For RS-FEC, this adjustment adds delay, but for other modes, it + * subtracts delay. + */ + if (fec_mode == ICE_PTP_FEC_MODE_RS_FEC) + total_offset += pmd; + else + total_offset -= pmd; + + /* Now that the total offset has been calculated, program it to the + * PHY and indicate that the Rx offset is ready. After this, + * timestamps will be enabled. + */ + status = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L, + total_offset); + if (status) + return status; + + status = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1); + if (status) + return status; + + return 0; +} + +/** + * ice_read_phy_and_phc_time_e822 - Simultaneously capture PHC and PHY time + * @hw: pointer to the HW struct + * @port: the PHY port to read + * @phy_time: on return, the 64bit PHY timer value + * @phc_time: on return, the lower 64bits of PHC time + * + * Issue a ICE_PTP_READ_TIME timer command to simultaneously capture the PHY + * and PHC timer values. + */ +static int +ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time, + u64 *phc_time) +{ + u64 tx_time, rx_time; + int status; + u32 zo, lo; + u8 tmr_idx; + + tmr_idx = ice_get_ptp_src_clock_index(hw); + + /* Prepare the PHC timer for a ICE_PTP_READ_TIME capture command */ + ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME); + + /* Prepare the PHY timer for a ICE_PTP_READ_TIME capture command */ + status = ice_ptp_one_port_cmd_e822(hw, port, ICE_PTP_READ_TIME, true); + if (status) + return status; + + /* Issue the sync to start the ICE_PTP_READ_TIME capture */ + ice_ptp_exec_tmr_cmd(hw); + ice_ptp_clean_cmd(hw); + + /* Read the captured PHC time from the shadow time registers */ + zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx)); + lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx)); + *phc_time = (u64)lo << 32 | zo; + + /* Read the captured PHY time from the PHY shadow registers */ + status = ice_ptp_read_port_capture_e822(hw, port, &tx_time, &rx_time); + if (status) + return status; + + /* If the PHY Tx and Rx timers don't match, log a warning message. + * Note that this should not happen in normal circumstances since the + * driver always programs them together. + */ + if (tx_time != rx_time) + dev_warn(ice_hw_to_dev(hw), + "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n", + port, (unsigned long long)tx_time, + (unsigned long long)rx_time); + + *phy_time = tx_time; + + return 0; +} + +/** + * ice_sync_phy_timer_e822 - Synchronize the PHY timer with PHC timer + * @hw: pointer to the HW struct + * @port: the PHY port to synchronize + * + * Perform an adjustment to ensure that the PHY and PHC timers are in sync. + * This is done by issuing a ICE_PTP_READ_TIME command which triggers a + * simultaneous read of the PHY timer and PHC timer. Then we use the + * difference to calculate an appropriate 2s complement addition to add + * to the PHY timer in order to ensure it reads the same value as the + * primary PHC timer. + */ +static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port) +{ + u64 phc_time, phy_time, difference; + int status; + + if (!ice_ptp_lock(hw)) { + ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n"); + return -EBUSY; + } + + status = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, + &phc_time); + if (status) + goto err_unlock; + + /* Calculate the amount required to add to the port time in order for + * it to match the PHC time. + * + * Note that the port adjustment is done using 2s complement + * arithmetic. This is convenient since it means that we can simply + * calculate the difference between the PHC time and the port time, + * and it will be interpreted correctly. + */ + difference = phc_time - phy_time; + + status = ice_ptp_prep_port_adj_e822(hw, port, (s64)difference, true); + if (status) + goto err_unlock; + + status = ice_ptp_one_port_cmd_e822(hw, port, ICE_PTP_ADJ_TIME, true); + if (status) + goto err_unlock; + + /* Init PHC mstr/src cmd for exec during sync */ + ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME); + + /* Issue the sync to activate the time adjustment */ + ice_ptp_exec_tmr_cmd(hw); + ice_ptp_clean_cmd(hw); + + /* Re-capture the timer values to flush the command registers and + * verify that the time was properly adjusted. + */ + status = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time); + if (status) + goto err_unlock; + + dev_info(ice_hw_to_dev(hw), + "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n", + port, (unsigned long long)phy_time, + (unsigned long long)phc_time); + + ice_ptp_unlock(hw); + + return 0; + +err_unlock: + ice_ptp_unlock(hw); + return status; +} + +/** + * ice_stop_phy_timer_e822 - Stop the PHY clock timer + * @hw: pointer to the HW struct + * @port: the PHY port to stop + * @soft_reset: if true, hold the SOFT_RESET bit of P_REG_PS + * + * Stop the clock of a PHY port. This must be done as part of the flow to + * re-calibrate Tx and Rx timestamping offsets whenever the clock time is + * initialized or when link speed changes. + */ +int +ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset) +{ + int status; + u32 val; + + status = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 0); + if (status) + return status; + + status = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 0); + if (status) + return status; + + status = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val); + if (status) + return status; + + val &= ~P_REG_PS_START_M; + status = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + if (status) + return status; + + val &= ~P_REG_PS_ENA_CLK_M; + status = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + if (status) + return status; + + if (soft_reset) { + val |= P_REG_PS_SFT_RESET_M; + status = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + if (status) + return status; + } + + ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port); + + return 0; +} + +/** + * ice_start_phy_timer_e822 - Start the PHY clock timer + * @hw: pointer to the HW struct + * @port: the PHY port to start + * + * Start the clock of a PHY port. This must be done as part of the flow to + * re-calibrate Tx and Rx timestamping offsets whenever the clock time is + * initialized or when link speed changes. + * + * Hardware will take Vernier measurements on Tx or Rx of packets. + */ +int +ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) +{ + u32 lo, hi, val; + int status; + u64 incval; + u8 tmr_idx; + + ice_ptp_clean_cmd(hw); + tmr_idx = ice_get_ptp_src_clock_index(hw); + + status = ice_stop_phy_timer_e822(hw, port, false); + if (status) + return status; + + ice_phy_cfg_lane_e822(hw, port); + + status = ice_phy_cfg_uix_e822(hw, port); + if (status) + return status; + + status = ice_phy_cfg_parpcs_e822(hw, port); + if (status) + return status; + + lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx)); + hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx)); + incval = (u64)hi << 32 | lo; + + status = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, incval); + if (status) + return status; + + status = ice_ptp_one_port_cmd_e822(hw, port, ICE_PTP_INIT_INCVAL, true); + if (status) + return status; + + /* Init PHC mstr/src cmd for exec during sync */ + ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME); + + ice_ptp_exec_tmr_cmd(hw); + + status = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val); + if (status) + return status; + + val |= P_REG_PS_SFT_RESET_M; + status = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + if (status) + return status; + + val |= P_REG_PS_START_M; + status = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + if (status) + return status; + + val &= ~P_REG_PS_SFT_RESET_M; + status = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + if (status) + return status; + + status = ice_ptp_one_port_cmd_e822(hw, port, ICE_PTP_INIT_INCVAL, true); + if (status) + return status; + + ice_ptp_exec_tmr_cmd(hw); + + val |= P_REG_PS_ENA_CLK_M; + status = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + if (status) + return status; + + val |= P_REG_PS_LOAD_OFFSET_M; + status = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + if (status) + return status; + + ice_ptp_exec_tmr_cmd(hw); + + status = ice_sync_phy_timer_e822(hw, port); + if (status) + return status; + + ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port); + + return 0; +} + +/** + * ice_phy_calc_vernier_e822 - Perform vernier calculations + * @hw: pointer to the HW struct + * @port: the PHY port to configure + * + * Perform vernier calculations for the Tx and Rx offset. This will enable + * hardware to include the more precise offset calibrations, + * increasing precision of the generated timestamps. + * + * This cannot be done until hardware has measured the offsets, which requires + * waiting until at least one packet has been sent and received by the device. + */ +int ice_phy_calc_vernier_e822(struct ice_hw *hw, u8 port) +{ + int status; + u32 val; + + status = ice_read_phy_reg_e822(hw, port, P_REG_TX_OV_STATUS, &val); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, status %d\n", + port, status); + return status; + } + + if (!(val & P_REG_TX_OV_STATUS_OV_M)) { + ice_debug(hw, ICE_DBG_PTP, "Tx offset is not yet valid for port %u\n", + port); + return -EBUSY; + } + + status = ice_read_phy_reg_e822(hw, port, P_REG_RX_OV_STATUS, &val); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, status %d\n", + port, status); + return status; + } + + if (!(val & P_REG_TX_OV_STATUS_OV_M)) { + ice_debug(hw, ICE_DBG_PTP, "Rx offset is not yet valid for port %u\n", + port); + return -EBUSY; + } + + status = ice_phy_cfg_tx_offset_e822(hw, port); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to program total Tx offset for port %u, status %d\n", + port, status); + return status; + } + + status = ice_phy_cfg_rx_offset_e822(hw, port); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to program total Rx offset for port %u, status %d\n", + port, status); + return status; + } + + return 0; +} + +/** + * ice_get_phy_tx_tstamp_ready_e822 - Read Tx memory status register + * @hw: pointer to the HW struct + * @quad: the timestamp quad to read from + * @tstamp_ready: contents of the Tx memory status register + * + * Read the Q_REG_TX_MEMORY_STATUS register indicating which timestamps in + * the PHY are ready. A set bit means the corresponding timestamp is valid and + * ready to be captured from the PHY timestamp block. + */ +static int +ice_get_phy_tx_tstamp_ready_e822(struct ice_hw *hw, u8 quad, u64 *tstamp_ready) +{ + int status; + u32 hi, lo; + + status = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_U, + &hi); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_U for quad %u, status %d\n", + quad, status); + return status; + } + + status = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_L, + &lo); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_L for quad %u, status %d\n", + quad, status); + return status; + } + + *tstamp_ready = (u64)hi << 32 | (u64)lo; + + return 0; +} + /* E810 functions * * The following functions operate on the E810 series devices which use @@ -1030,11 +2756,11 @@ void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port) * * Read a register from the external PHY on the E810 device. */ -static enum ice_status +static int ice_read_phy_reg_e810_lp(struct ice_hw *hw, u32 addr, u32 *val, bool lock_sbq) { struct ice_sbq_msg_input msg = {0}; - enum ice_status status; + int status; msg.msg_addr_low = ICE_LO_WORD(addr); msg.msg_addr_high = ICE_HI_WORD(addr); @@ -1053,7 +2779,7 @@ ice_read_phy_reg_e810_lp(struct ice_hw *hw, u32 addr, u32 *val, bool lock_sbq) return 0; } -static enum ice_status +static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val) { return ice_read_phy_reg_e810_lp(hw, addr, val, true); @@ -1068,11 +2794,11 @@ ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val) * * Write a value to a register of the external PHY on the E810 device. */ -static enum ice_status +static int ice_write_phy_reg_e810_lp(struct ice_hw *hw, u32 addr, u32 val, bool lock_sbq) { struct ice_sbq_msg_input msg = {0}; - enum ice_status status; + int status; msg.msg_addr_low = ICE_LO_WORD(addr); msg.msg_addr_high = ICE_HI_WORD(addr); @@ -1090,12 +2816,93 @@ ice_write_phy_reg_e810_lp(struct ice_hw *hw, u32 addr, u32 val, bool lock_sbq) return 0; } -static enum ice_status +static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val) { return ice_write_phy_reg_e810_lp(hw, addr, val, true); } +/** + * ice_read_phy_tstamp_ll_e810 - Read a PHY timestamp registers through the FW + * @hw: pointer to the HW struct + * @idx: the timestamp index to read + * @hi: 8 bit timestamp high value + * @lo: 32 bit timestamp low value + * + * Read a 8bit timestamp high value and 32 bit timestamp low value out of the + * timestamp block of the external PHY on the E810 device using the low latency + * timestamp read. + */ +static int +ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo) +{ + u8 i; + + /* Write TS index to read to the PF register so the FW can read it */ + wr32(hw, PF_SB_ATQBAL, TS_LL_READ_TS_IDX(idx)); + + /* Read the register repeatedly until the FW provides us the TS */ + for (i = TS_LL_READ_RETRIES; i > 0; i--) { + u32 val = rd32(hw, PF_SB_ATQBAL); + + /* When the bit is cleared, the TS is ready in the register */ + if (!(val & TS_LL_READ_TS)) { + /* High 8 bit value of the TS is on the bits 16:23 */ + *hi = (u8)(val >> TS_LL_READ_TS_HIGH_S); + + /* Read the low 32 bit value and set the TS valid bit */ + *lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID; + return 0; + } + + udelay(10); + } + + /* FW failed to provide the TS in time */ + ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n"); + return -EBUSY; +} + +/** + * ice_read_phy_tstamp_sbq_e810 - Read a PHY timestamp registers through the sbq + * @hw: pointer to the HW struct + * @lport: the lport to read from + * @idx: the timestamp index to read + * @hi: 8 bit timestamp high value + * @lo: 32 bit timestamp low value + * + * Read a 8bit timestamp high value and 32 bit timestamp low value out of the + * timestamp block of the external PHY on the E810 device using sideband queue. + */ +static int +ice_read_phy_tstamp_sbq_e810(struct ice_hw *hw, u8 lport, u8 idx, u8 *hi, + u32 *lo) +{ + u32 hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx); + u32 lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx); + u32 lo_val, hi_val; + int status; + + status = ice_read_phy_reg_e810(hw, lo_addr, &lo_val); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, status %d\n", + status); + return status; + } + + status = ice_read_phy_reg_e810(hw, hi_addr, &hi_val); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, status %d\n", + status); + return status; + } + + *lo = lo_val; + *hi = (u8)hi_val; + + return 0; +} + /** * ice_read_phy_tstamp_e810 - Read a PHY timestamp out of the external PHY * @hw: pointer to the HW struct @@ -1106,28 +2913,20 @@ ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val) * Read a 40bit timestamp value out of the timestamp block of the external PHY * on the E810 device. */ -static enum ice_status +static int ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp) { - enum ice_status status; - u32 lo_addr, hi_addr, lo, hi; + int status; + u32 lo = 0; + u8 hi = 0; - lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx); - hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx); + if (hw->dev_caps.ts_dev_info.ts_ll_read) + status = ice_read_phy_tstamp_ll_e810(hw, idx, &hi, &lo); + else + status = ice_read_phy_tstamp_sbq_e810(hw, lport, idx, &hi, &lo); - status = ice_read_phy_reg_e810(hw, lo_addr, &lo); - if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, status %d\n", - status); + if (status) return status; - } - - status = ice_read_phy_reg_e810(hw, hi_addr, &hi); - if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, status %d\n", - status); - return status; - } /* For E810 devices, the timestamp is reported with the lower 32 bits * in the low register, and the upper 8 bits in the high register. @@ -1143,29 +2942,40 @@ ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp) * @lport: the lport to read from * @idx: the timestamp index to reset * - * Clear a timestamp, resetting its valid bit, from the timestamp block of the - * external PHY on the E810 device. + * Read the timestamp and then forcibly overwrite its value to clear the valid + * bit from the timestamp block of the external PHY on the E810 device. + * + * This function should only be called on an idx whose bit is set according to + * ice_get_phy_tx_tstamp_ready. */ -static enum ice_status +static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx) { - enum ice_status status; u32 lo_addr, hi_addr; + u64 unused_tstamp; + int status; + + status = ice_read_phy_tstamp_e810(hw, lport, idx, &unused_tstamp); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for lport %u, idx %u, status %d\n", + lport, idx, status); + return status; + } lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx); hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx); status = ice_write_phy_reg_e810(hw, lo_addr, 0); if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, status %d\n", - status); + ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register for lport %u, idx %u, status %d\n", + lport, idx, status); return status; } status = ice_write_phy_reg_e810(hw, hi_addr, 0); if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, status %d\n", - status); + ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register for lport %u, idx %u, status %d\n", + lport, idx, status); return status; } @@ -1181,9 +2991,9 @@ ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx) * * Note there is no equivalent function needed on E822 based devices. */ -enum ice_status ice_ptp_init_phy_e810(struct ice_hw *hw) +int ice_ptp_init_phy_e810(struct ice_hw *hw) { - enum ice_status status; + int status; u8 tmr_idx; tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; @@ -1196,6 +3006,21 @@ enum ice_status ice_ptp_init_phy_e810(struct ice_hw *hw) return status; } +/** + * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization + * @hw: pointer to HW struct + * + * Perform E810-specific PTP hardware clock initialization steps. + */ +static int ice_ptp_init_phc_e810(struct ice_hw *hw) +{ + /* Ensure synchronization delay is zero */ + wr32(hw, GLTSYN_SYNC_DLAY, 0); + + /* Initialize the PHY */ + return ice_ptp_init_phy_e810(hw); +} + /** * ice_ptp_prep_phy_time_e810 - Prepare PHY port with initial time * @hw: Board private structure @@ -1203,14 +3028,14 @@ enum ice_status ice_ptp_init_phy_e810(struct ice_hw *hw) * * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the * initial clock time. The time will not actually be programmed until the - * driver issues an INIT_TIME command. + * driver issues an ICE_PTP_INIT_TIME command. * * The time value is the upper 32 bits of the PHY timer, usually in units of * nominal nanoseconds. */ -static enum ice_status ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time) +static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time) { - enum ice_status status; + int status; u8 tmr_idx; tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; @@ -1239,16 +3064,16 @@ static enum ice_status ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time) * * Prepare the PHY port for an atomic adjustment by programming the PHY * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual adjustment - * is completed by issuing an ADJ_TIME sync command. + * is completed by issuing an ICE_PTP_ADJ_TIME sync command. * * The adjustment value only contains the portion used for the upper 32bits of * the PHY timer, usually in units of nominal nanoseconds. Negative * adjustments are supported using 2s complement arithmetic. */ -static enum ice_status +static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj, bool lock_sbq) { - enum ice_status status; + int status; u8 tmr_idx; tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; @@ -1282,13 +3107,13 @@ ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj, bool lock_sbq) * * Prepare the PHY port for a new increment value by programming the PHY * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual change is - * completed by issuing an INIT_INCVAL command. + * completed by issuing an ICE_PTP_INIT_INCVAL command. */ -static enum ice_status +static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval) { - enum ice_status status; u32 high, low; + int status; u8 tmr_idx; tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; @@ -1320,16 +3145,16 @@ ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval) * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation for * a target time adjust, which will trigger an adjustment of the clock in the * future. The actual adjustment will occur the next time the PHY port timer - * crosses over the provided value after the driver issues an ADJ_TIME_AT_TIME - * command. + * crosses over the provided value after the driver issues an + * ICE_PTP_ADJ_TIME_AT_TIME command. * * The time value is the upper 32 bits of the PHY timer, usually in units of * nominal nanoseconds. */ -static enum ice_status +static int ice_ptp_prep_phy_adj_target_e810(struct ice_hw *hw, u32 target_time) { - enum ice_status status; + int status; u8 tmr_idx; tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; @@ -1360,32 +3185,32 @@ ice_ptp_prep_phy_adj_target_e810(struct ice_hw *hw, u32 target_time) * Prepare the external PHYs connected to this device for a timer sync * command. */ -static enum ice_status +static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd, bool lock_sbq) { - enum ice_status status; u32 cmd_val, val; + int status; switch (cmd) { - case INIT_TIME: + case ICE_PTP_INIT_TIME: cmd_val = GLTSYN_CMD_INIT_TIME; break; - case INIT_INCVAL: + case ICE_PTP_INIT_INCVAL: cmd_val = GLTSYN_CMD_INIT_INCVAL; break; - case ADJ_TIME: + case ICE_PTP_ADJ_TIME: cmd_val = GLTSYN_CMD_ADJ_TIME; break; - case ADJ_TIME_AT_TIME: + case ICE_PTP_ADJ_TIME_AT_TIME: cmd_val = GLTSYN_CMD_ADJ_INIT_TIME; break; - case READ_TIME: + case ICE_PTP_READ_TIME: cmd_val = GLTSYN_CMD_READ_TIME; break; default: dev_warn(ice_hw_to_dev(hw), "Unknown timer command %u\n", cmd); - return ICE_ERR_PARAM; + return -EINVAL; } /* Read, modify, write */ @@ -1410,10 +3235,516 @@ ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd, return 0; } +/* ice_get_phy_tx_tstamp_ready_e810 - Read Tx memory status register + * @hw: pointer to the HW struct + * @port: the PHY port to read + * @tstamp_ready: contents of the Tx memory status register + * + * E810 devices do not use a Tx memory status register. Instead simply + * indicate that all timestamps are currently ready. + */ +static int +ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready) +{ + *tstamp_ready = 0xFFFFFFFFFFFFFFFF; + return 0; +} + +/* E810T SMA functions + * + * The following functions operate specifically on E810T hardware and are used + * to access the extended GPIOs available. + */ + +/** + * ice_get_pca9575_handle + * @hw: pointer to the hw struct + * @pca9575_handle: GPIO controller's handle + * + * Find and return the GPIO controller's handle in the netlist. + * When found - the value will be cached in the hw structure and following calls + * will return cached value + */ +static int +ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle) +{ + struct ice_aqc_get_link_topo cmd; + u8 node_part_number, idx; + u16 node_handle; + int status; + + if (!hw || !pca9575_handle) + return -EINVAL; + + /* If handle was read previously return cached value */ + if (hw->io_expander_handle) { + *pca9575_handle = hw->io_expander_handle; + return 0; + } + + memset(&cmd, 0, sizeof(cmd)); + + /* Set node type to GPIO controller */ + cmd.addr.topo_params.node_type_ctx = + (ICE_AQC_LINK_TOPO_NODE_TYPE_M & + ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL); + +#define SW_PCA9575_SFP_TOPO_IDX 2 +#define SW_PCA9575_QSFP_TOPO_IDX 1 + + /* Check if the SW IO expander controlling SMA exists in the netlist. */ + if (hw->device_id == ICE_DEV_ID_E810C_SFP) + idx = SW_PCA9575_SFP_TOPO_IDX; + else if (hw->device_id == ICE_DEV_ID_E810C_QSFP) + idx = SW_PCA9575_QSFP_TOPO_IDX; + else + return -EOPNOTSUPP; + + cmd.addr.topo_params.index = idx; + + status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number, + &node_handle); + if (status) + return -EOPNOTSUPP; + + /* Verify if we found the right IO expander type */ + if (node_part_number != ICE_ACQ_GET_LINK_TOPO_NODE_NR_PCA9575) + return -EOPNOTSUPP; + + /* If present save the handle and return it */ + hw->io_expander_handle = node_handle; + *pca9575_handle = hw->io_expander_handle; + + return 0; +} + +/** + * ice_is_phy_rclk_present + * @hw: pointer to the hw struct + * + * Check if the PHY Recovered Clock device is present in the netlist + */ +bool ice_is_phy_rclk_present(struct ice_hw *hw) +{ + if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, + ICE_ACQ_GET_LINK_TOPO_NODE_NR_C827, NULL) && + ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, + ICE_ACQ_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL)) + return false; + + return true; +} + +/** + * ice_is_clock_mux_present_e810t + * @hw: pointer to the hw struct + * + * Check if the Clock Multiplexer device is present in the netlist + */ +bool ice_is_clock_mux_present_e810t(struct ice_hw *hw) +{ + if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX, + ICE_ACQ_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX, + NULL)) + return false; + + return true; +} + +/** + * ice_get_pf_c827_idx - find and return the C827 index for the current pf + * @hw: pointer to the hw struct + * @idx: index of the found C827 PHY + */ +int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx) +{ + struct ice_aqc_get_link_topo cmd; + u8 node_part_number; + u16 node_handle; + int status; + u8 ctx; + + if (hw->mac_type != ICE_MAC_E810) + return -ENODEV; + + if (hw->device_id != ICE_DEV_ID_E810C_QSFP) { + *idx = C827_0; + return 0; + } + + memset(&cmd, 0, sizeof(cmd)); + + ctx = ICE_AQC_LINK_TOPO_NODE_TYPE_PHY << ICE_AQC_LINK_TOPO_NODE_TYPE_S; + ctx |= ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S; + cmd.addr.topo_params.node_type_ctx = ctx; + cmd.addr.topo_params.index = 0; + + status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number, + &node_handle); + if (status || node_part_number != ICE_ACQ_GET_LINK_TOPO_NODE_NR_C827) + return -ENOENT; + + if (node_handle == E810C_QSFP_C827_0_HANDLE) + *idx = C827_0; + else if (node_handle == E810C_QSFP_C827_1_HANDLE) + *idx = C827_1; + else + return -EIO; + + return 0; +} + +/** + * ice_is_gps_present_e810t + * @hw: pointer to the hw struct + * + * Check if the GPS generic device is present in the netlist + */ +bool ice_is_gps_present_e810t(struct ice_hw *hw) +{ + if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS, + ICE_ACQ_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL)) + return false; + + return true; +} + +/** + * ice_read_pca9575_reg_e810t + * @hw: pointer to the hw struct + * @offset: GPIO controller register offset + * @data: pointer to data to be read from the GPIO controller + * + * Read the register from the GPIO controller + */ +int +ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data) +{ + struct ice_aqc_link_topo_addr link_topo; + __le16 addr; + int status; + u16 handle; + + memset(&link_topo, 0, sizeof(link_topo)); + + status = ice_get_pca9575_handle(hw, &handle); + if (status) + return status; + + link_topo.handle = cpu_to_le16(handle); + link_topo.topo_params.node_type_ctx = + (ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED << + ICE_AQC_LINK_TOPO_NODE_CTX_S); + + addr = cpu_to_le16((u16)offset); + + return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL); +} + +/** + * ice_write_pca9575_reg_e810t + * @hw: pointer to the hw struct + * @offset: GPIO controller register offset + * @data: data to be written to the GPIO controller + * + * Write the data to the GPIO controller register + */ +int +ice_write_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 data) +{ + struct ice_aqc_link_topo_addr link_topo; + __le16 addr; + int status; + u16 handle; + + memset(&link_topo, 0, sizeof(link_topo)); + + status = ice_get_pca9575_handle(hw, &handle); + if (status) + return status; + + link_topo.handle = cpu_to_le16(handle); + link_topo.topo_params.node_type_ctx = + (ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED << + ICE_AQC_LINK_TOPO_NODE_CTX_S); + + addr = cpu_to_le16((u16)offset); + + return ice_aq_write_i2c(hw, link_topo, 0, addr, 1, &data, NULL); +} + +/** + * ice_read_sma_ctrl_e810t + * @hw: pointer to the hw struct + * @data: pointer to data to be read from the GPIO controller + * + * Read the SMA controller state. Only bits 3-7 in data are valid. + */ +int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data) +{ + int status; + u16 handle; + u8 i; + + status = ice_get_pca9575_handle(hw, &handle); + if (status) + return status; + + *data = 0; + + for (i = ICE_E810T_SMA_MIN_BIT; i <= ICE_E810T_SMA_MAX_BIT; i++) { + bool pin; + + status = ice_aq_get_gpio(hw, handle, i + ICE_E810T_P1_OFFSET, + &pin, NULL); + if (status) + break; + *data |= (u8)(!pin) << i; + } + + return status; +} + +/** + * ice_write_sma_ctrl_e810t + * @hw: pointer to the hw struct + * @data: data to be written to the GPIO controller + * + * Write the data to the SMA controller. Only bits 3-7 in data are valid. + */ +int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data) +{ + int status; + u16 handle; + u8 i; + + status = ice_get_pca9575_handle(hw, &handle); + if (status) + return status; + + for (i = ICE_E810T_SMA_MIN_BIT; i <= ICE_E810T_SMA_MAX_BIT; i++) { + bool pin; + + pin = !(data & (1 << i)); + status = ice_aq_set_gpio(hw, handle, i + ICE_E810T_P1_OFFSET, + pin, NULL); + if (status) + break; + } + + return status; +} + +/** + * ice_is_pca9575_present + * @hw: pointer to the hw struct + * + * Check if the SW IO expander is present in the netlist + */ +bool ice_is_pca9575_present(struct ice_hw *hw) +{ + int status; + u16 handle = 0; + + status = ice_get_pca9575_handle(hw, &handle); + if (!status && handle) + return true; + + return false; +} + +/** + * ice_is_cgu_present + * @hw: pointer to the hw struct + * + * Check if the Clock Generation Unit (CGU) device is present in the netlist + */ +bool ice_is_cgu_present(struct ice_hw *hw) +{ + if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, + ICE_ACQ_GET_LINK_TOPO_NODE_NR_ZL30632_80032, + NULL)) { + hw->cgu_part_number = + ICE_ACQ_GET_LINK_TOPO_NODE_NR_ZL30632_80032; + return true; + } else if (!ice_find_netlist_node(hw, + ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, + ICE_ACQ_GET_LINK_TOPO_NODE_NR_SI5383_5384, + NULL)) { + hw->cgu_part_number = ICE_ACQ_GET_LINK_TOPO_NODE_NR_SI5383_5384; + return true; + } + + return false; +} + +/** + * ice_cgu_state_to_name - get the name of CGU state + * @state: state of the CGU + * + * Return: name of CGU state + */ +const char *ice_cgu_state_to_name(int state) +{ + switch (state) { + case ICE_CGU_STATE_INVALID: + return "invalid"; + case ICE_CGU_STATE_FREERUN: + return "freerun"; + case ICE_CGU_STATE_LOCKED: + return "locked"; + case ICE_CGU_STATE_LOCKED_HO_ACQ: + return "locked_ho_acq"; + case ICE_CGU_STATE_HOLDOVER: + return "holdover"; + case ICE_CGU_STATE_UNKNOWN: + default: + return "unknown"; + } +} + +/** + * ice_get_cgu_state - get the state of the DPLL + * @hw: pointer to the hw struct + * @dpll_idx: Index of internal DPLL unit + * @pin: pointer to a buffer for returning currently active pin + * @phase_offset: pointer to a buffer for returning phase offset + * @last_dpll_state: last known state of DPLL + * + * This function will read the state of the DPLL(dpll_idx). Non-null + * 'pin' and 'phase_offset' parameters are used to retrieve currently + * active pin and phase_offset respectively. + * + * Return: state of the DPLL + */ +enum ice_cgu_state +ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx, u8 *pin, s64 *phase_offset, + enum ice_cgu_state last_dpll_state) +{ + u16 dpll_state; + s64 ph_offset; + u8 ref_state; + u8 eec_mode; + int status; + + if (dpll_idx >= ICE_CGU_DPLL_MAX) + return ICE_CGU_STATE_INVALID; + + status = ice_aq_get_cgu_dpll_status(hw, dpll_idx, &ref_state, + &dpll_state, &ph_offset, + &eec_mode); + if (status) + return ICE_CGU_STATE_INVALID; + + if (pin) { + /* current ref pin in dpll_state_refsel_status_X register */ + *pin = (dpll_state & + ICE_AQC_GET_CGU_DPLL_STATUS_STATE_CLK_REF_SEL) >> + ICE_AQC_GET_CGU_DPLL_STATUS_STATE_CLK_REF_SHIFT; + } + + if (phase_offset) + *phase_offset = ph_offset; + + if (dpll_state & ICE_AQC_GET_CGU_DPLL_STATUS_STATE_LOCK) { + if (dpll_state & ICE_AQC_GET_CGU_DPLL_STATUS_STATE_HO_READY) + return ICE_CGU_STATE_LOCKED_HO_ACQ; + else + return ICE_CGU_STATE_LOCKED; + } + + /* According to ZL DPLL documentation once it goes to LOCKED_HO_ACQ it + * never return to FREERUN. This aligns to ITU-T G.781 Recommendation. + * But we cannot report HOLDOVER while HO memory is cleared while + * switching to another reference (so in LOCKED without HO_ACQ) or + * holdover timeouts (not implemented yet) - in those two situations + * (only!) we actually back to FREERUN. + */ + if (last_dpll_state == ICE_CGU_STATE_LOCKED_HO_ACQ || + last_dpll_state == ICE_CGU_STATE_HOLDOVER) + return ICE_CGU_STATE_HOLDOVER; + + return ICE_CGU_STATE_FREERUN; +} + +static const struct ice_cgu_pin_desc ice_e810t_cgu_inputs[] = { + /* name idx */ + { "CVL-SDP22", ZL_REF0P }, + { "CVL-SDP20", ZL_REF0N }, + { "C827_0-RCLKA", ZL_REF1P }, + { "C827_0-RCLKB", ZL_REF1N }, + { "C827_1-RCLKA", ZL_REF2P }, + { "C827_1-RCLKB", ZL_REF2N }, + { "SMA1", ZL_REF3P }, + { "SMA2/U.FL2", ZL_REF3N }, + { "GNSS-1PPS", ZL_REF4P }, + { "OCXO", ZL_REF4N }, +}; + +/** + * ice_zl_pin_idx_to_name_e810t - get the name of E810T CGU pin + * @pin: pin number + * + * Return: name of E810T CGU pin + */ +const char *ice_zl_pin_idx_to_name_e810t(u8 pin) +{ + if (pin < NUM_ZL_CGU_PINS) + return ice_e810t_cgu_inputs[pin].name; + + return "invalid"; +} +static const struct ice_cgu_pin_desc ice_e823_si_cgu_inputs[] = { + /* name idx */ + { "NONE", SI_REF0P }, + { "NONE", SI_REF0N }, + { "SYNCE0_DP", SI_REF1P }, + { "SYNCE0_DN", SI_REF1N }, + { "EXT_CLK_SYNC", SI_REF2P }, + { "NONE", SI_REF2N }, + { "EXT_PPS_OUT", SI_REF3 }, + { "INT_PPS_OUT", SI_REF4 }, +}; + +static const struct ice_cgu_pin_desc ice_e823_zl_cgu_inputs[] = { + /* name idx */ + { "NONE", ZL_REF0P }, + { "INT_PPS_OUT", ZL_REF0N }, + { "SYNCE0_DP", ZL_REF1P }, + { "SYNCE0_DN", ZL_REF1N }, + { "NONE", ZL_REF2P }, + { "NONE", ZL_REF2N }, + { "EXT_CLK_SYNC", ZL_REF3P }, + { "NONE", ZL_REF3N }, + { "EXT_PPS_OUT", ZL_REF4P }, + { "OCXO", ZL_REF4N }, +}; + +/** + * ice_pin_idx_to_name_e823 - get the name of E823 CGU pin + * @hw: pointer to the hw struct + * @pin: pin number + * + * Return: name of E823 CGU pin + */ +const char *ice_pin_idx_to_name_e823(struct ice_hw *hw, u8 pin) +{ + if (hw->cgu_part_number == + ICE_ACQ_GET_LINK_TOPO_NODE_NR_ZL30632_80032 && + pin < NUM_ZL_CGU_PINS) + return ice_e823_zl_cgu_inputs[pin].name; + else if (hw->cgu_part_number == + ICE_ACQ_GET_LINK_TOPO_NODE_NR_SI5383_5384 && + pin < NUM_SI_CGU_PINS) + return ice_e823_si_cgu_inputs[pin].name; + else + return "invalid"; +} + /* Device agnostic functions * - * The following functions implement shared behavior common to both E822 and - * E810 devices, possibly calling a device specific implementation where + * The following functions implement shared behavior common to both E822/E823 + * and E810 devices, possibly calling a device specific implementation where * necessary. */ @@ -1437,18 +3768,16 @@ bool ice_ptp_lock(struct ice_hw *hw) u32 hw_lock; int i; -#define MAX_TRIES 5 +#define MAX_TRIES 15 for (i = 0; i < MAX_TRIES; i++) { hw_lock = rd32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); hw_lock = hw_lock & PFTSYN_SEM_BUSY_M; - if (hw_lock) { - /* Somebody is holding the lock */ - msleep(10); - continue; - } else { + if (!hw_lock) break; - } + + /* Somebody is holding the lock */ + usleep_range(5000, 6000); } return !hw_lock; @@ -1466,45 +3795,6 @@ void ice_ptp_unlock(struct ice_hw *hw) wr32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), 0); } -/** - * ice_ptp_src_cmd - Prepare source timer for a timer command - * @hw: pointer to HW structure - * @cmd: Timer command - * - * Prepare the source timer for an upcoming timer sync command. - */ -void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) -{ - u32 cmd_val; - u8 tmr_idx; - - tmr_idx = ice_get_ptp_src_clock_index(hw); - cmd_val = tmr_idx << SEL_CPK_SRC; - - switch (cmd) { - case INIT_TIME: - cmd_val |= GLTSYN_CMD_INIT_TIME; - break; - case INIT_INCVAL: - cmd_val |= GLTSYN_CMD_INIT_INCVAL; - break; - case ADJ_TIME: - cmd_val |= GLTSYN_CMD_ADJ_TIME; - break; - case ADJ_TIME_AT_TIME: - cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME; - break; - case READ_TIME: - cmd_val |= GLTSYN_CMD_READ_TIME; - break; - default: - dev_warn(ice_hw_to_dev(hw), "Unknown timer command %u\n", cmd); - return; - } - - wr32(hw, GLTSYN_CMD, cmd_val); -} - /** * ice_ptp_tmr_cmd - Prepare and trigger a timer sync command * @hw: pointer to HW struct @@ -1516,19 +3806,25 @@ void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) * for the command to be synchronously applied to both the source and PHY * timers. */ -static enum ice_status +static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd, bool lock_sbq) { - enum ice_status status; + int status; /* First, prepare the source timer */ ice_ptp_src_cmd(hw, cmd); /* Next, prepare the ports */ - if (ice_is_e810(hw)) + switch (hw->phy_cfg) { + case ICE_PHY_E810: status = ice_ptp_port_cmd_e810(hw, cmd, lock_sbq); - else + break; + case ICE_PHY_E822: status = ice_ptp_port_cmd_e822(hw, cmd, lock_sbq); + break; + default: + status = -EOPNOTSUPP; + } if (status) { ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, status %d\n", cmd, status); @@ -1538,7 +3834,8 @@ ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd, bool lock_sbq) /* Write the sync command register to drive both source and PHY timer * commands synchronously */ - wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD); + ice_ptp_exec_tmr_cmd(hw); + ice_ptp_clean_cmd(hw); return 0; } @@ -1556,9 +3853,9 @@ ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd, bool lock_sbq) * 3) issue an init_time timer command to synchronously switch both the source * and port timers to the new init time value at the next clock cycle. */ -enum ice_status ice_ptp_init_time(struct ice_hw *hw, u64 time) +int ice_ptp_init_time(struct ice_hw *hw, u64 time) { - enum ice_status status; + int status; u8 tmr_idx; tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; @@ -1570,14 +3867,21 @@ enum ice_status ice_ptp_init_time(struct ice_hw *hw, u64 time) /* PHY Clks */ /* Fill Rx and Tx ports and send msg to PHY */ - if (ice_is_e810(hw)) + switch (hw->phy_cfg) { + case ICE_PHY_E810: status = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF); - else + break; + case ICE_PHY_E822: status = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF); + break; + default: + status = -EOPNOTSUPP; + } + if (status) return status; - return ice_ptp_tmr_cmd(hw, INIT_TIME, true); + return ice_ptp_tmr_cmd(hw, ICE_PTP_INIT_TIME, true); } /** @@ -1590,13 +3894,13 @@ enum ice_status ice_ptp_init_time(struct ice_hw *hw, u64 time) * * 1) Write the increment value to the source timer shadow registers * 2) Write the increment value to the PHY timer shadow registers - * 3) Issue an INIT_INCVAL timer command to synchronously switch both the - * source and port timers to the new increment value at the next clock + * 3) Issue an ICE_PTP_INIT_INCVAL timer command to synchronously switch both + * the source and port timers to the new increment value at the next clock * cycle. */ -enum ice_status ice_ptp_write_incval(struct ice_hw *hw, u64 incval) +int ice_ptp_write_incval(struct ice_hw *hw, u64 incval) { - enum ice_status status; + int status; u8 tmr_idx; tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; @@ -1605,14 +3909,21 @@ enum ice_status ice_ptp_write_incval(struct ice_hw *hw, u64 incval) wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval)); wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval)); - if (ice_is_e810(hw)) + switch (hw->phy_cfg) { + case ICE_PHY_E810: status = ice_ptp_prep_phy_incval_e810(hw, incval); - else + break; + case ICE_PHY_E822: status = ice_ptp_prep_phy_incval_e822(hw, incval); + break; + default: + status = -EOPNOTSUPP; + } + if (status) return status; - return ice_ptp_tmr_cmd(hw, INIT_INCVAL, true); + return ice_ptp_tmr_cmd(hw, ICE_PTP_INIT_INCVAL, true); } /** @@ -1622,12 +3933,12 @@ enum ice_status ice_ptp_write_incval(struct ice_hw *hw, u64 incval) * * Program a new PHC incval while holding the PTP semaphore. */ -enum ice_status ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval) +int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval) { - enum ice_status status; + int status; if (!ice_ptp_lock(hw)) - return ICE_ERR_NOT_READY; + return -EBUSY; status = ice_ptp_write_incval(hw, incval); @@ -1648,32 +3959,39 @@ enum ice_status ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval) * * 1) Write the adjustment to the source timer shadow registers * 2) Write the adjustment to the PHY timer shadow registers - * 3) Issue an ADJ_TIME timer command to synchronously apply the adjustment to - * both the source and port timers at the next clock cycle. + * 3) Issue an ICE_PTP_ADJ_TIME timer command to synchronously apply the + * adjustment to both the source and port timers at the next clock cycle. */ -enum ice_status ice_ptp_adj_clock(struct ice_hw *hw, s32 adj, bool lock_sbq) +int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj, bool lock_sbq) { - enum ice_status status; + int status; u8 tmr_idx; tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; /* Write the desired clock adjustment into the GLTSYN_SHADJ register. - * For an ADJ_TIME command, this set of registers represents the value - * to add to the clock time. It supports subtraction by interpreting - * the value as a 2's complement integer. + * For an ICE_PTP_ADJ_TIME command, this set of registers represents + * the value to add to the clock time. It supports subtraction by + * interpreting the value as a 2's complement integer. */ wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0); wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj); - if (ice_is_e810(hw)) + switch (hw->phy_cfg) { + case ICE_PHY_E810: status = ice_ptp_prep_phy_adj_e810(hw, adj, lock_sbq); - else + break; + case ICE_PHY_E822: status = ice_ptp_prep_phy_adj_e822(hw, adj, lock_sbq); + break; + default: + status = -EOPNOTSUPP; + } + if (status) return status; - return ice_ptp_tmr_cmd(hw, ADJ_TIME, lock_sbq); + return ice_ptp_tmr_cmd(hw, ICE_PTP_ADJ_TIME, lock_sbq); } /** @@ -1689,13 +4007,14 @@ enum ice_status ice_ptp_adj_clock(struct ice_hw *hw, s32 adj, bool lock_sbq) * 2) Write the target time to the source timer shadow time registers * 3) Write the adjustment to the PHY timers shadow adjust registers * 4) Write the target time to the PHY timers shadow adjust registers - * 5) Issue an ADJ_TIME_AT_TIME command to initiate the atomic adjustment. + * 5) Issue an ICE_PTP_ADJ_TIME_AT_TIME command to initiate the atomic + * adjustment. */ -enum ice_status +int ice_ptp_adj_clock_at_time(struct ice_hw *hw, u64 at_time, s32 adj) { - enum ice_status status; u32 time_lo, time_hi; + int status; u8 tmr_idx; tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; @@ -1703,9 +4022,9 @@ ice_ptp_adj_clock_at_time(struct ice_hw *hw, u64 at_time, s32 adj) time_hi = upper_32_bits(at_time); /* Write the desired clock adjustment into the GLTSYN_SHADJ register. - * For an ADJ_TIME_AT_TIME command, this set of registers represents - * the value to add to the clock time. It supports subtraction by - * interpreting the value as a 2's complement integer. + * For an ICE_PTP_ADJ_TIME_AT_TIME command, this set of registers + * represents the value to add to the clock time. It supports + * subtraction by interpreting the value as a 2's complement integer. */ wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0); wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj); @@ -1716,28 +4035,42 @@ ice_ptp_adj_clock_at_time(struct ice_hw *hw, u64 at_time, s32 adj) wr32(hw, GLTSYN_SHTIME_H(tmr_idx), time_hi); /* Prepare PHY port adjustments */ - if (ice_is_e810(hw)) + switch (hw->phy_cfg) { + case ICE_PHY_E810: status = ice_ptp_prep_phy_adj_e810(hw, adj, true); - else + break; + case ICE_PHY_E822: status = ice_ptp_prep_phy_adj_e822(hw, adj, true); + break; + default: + status = -EOPNOTSUPP; + } + if (status) return status; /* Set target time for each PHY port */ - if (ice_is_e810(hw)) + switch (hw->phy_cfg) { + case ICE_PHY_E810: status = ice_ptp_prep_phy_adj_target_e810(hw, time_lo); - else + break; + case ICE_PHY_E822: status = ice_ptp_prep_phy_adj_target_e822(hw, time_lo); + break; + default: + status = -EOPNOTSUPP; + } + if (status) return status; - return ice_ptp_tmr_cmd(hw, ADJ_TIME_AT_TIME, true); + return ice_ptp_tmr_cmd(hw, ICE_PTP_ADJ_TIME_AT_TIME, true); } /** - * ice_read_phy_tstamp - Read a PHY timestamp from the timestamo block + * ice_read_phy_tstamp - Read a PHY timestamp from the timestamp block * @hw: pointer to the HW struct - * @block: the block to read from + * @block: the block/port to read from * @idx: the timestamp index to read * @tstamp: on return, the 40bit timestamp value * @@ -1745,30 +4078,133 @@ ice_ptp_adj_clock_at_time(struct ice_hw *hw, u64 at_time, s32 adj) * the block is the quad to read from. For E810 devices, the block is the * logical port to read from. */ -enum ice_status +int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp) { - if (ice_is_e810(hw)) - return ice_read_phy_tstamp_e810(hw, block, idx, tstamp); - else - return ice_read_phy_tstamp_e822(hw, block, idx, tstamp); + int status; + + switch (hw->phy_cfg) { + case ICE_PHY_E810: + status = ice_read_phy_tstamp_e810(hw, block, idx, tstamp); + break; + case ICE_PHY_E822: + status = ice_read_phy_tstamp_e822(hw, block, idx, tstamp); + break; + default: + status = -EOPNOTSUPP; + } + + return status; } /** - * ice_clear_phy_tstamp - Clear a timestamp from the timestamp block + * ice_clear_phy_tstamp - Drop a timestamp from the timestamp block * @hw: pointer to the HW struct * @block: the block to read from * @idx: the timestamp index to reset * - * Clear a timestamp, resetting its valid bit, from the timestamp block. For - * E822 devices, the block is the quad to clear from. For E810 devices, the - * block is the logical port to clear from. + * Drop a timestamp from the timestamp block by reading it. This will reset + * the memory status bit allowing the timestamp index to be reused. For E822 + * devices, the block is the quad to clear from. For E810 devices, the block + * is the logical port to clear from. + * + * This function should only be called on a timestamp index whose valid bit + * is set according to ice_get_phy_tx_tstamp_ready. */ -enum ice_status +int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx) { - if (ice_is_e810(hw)) - return ice_clear_phy_tstamp_e810(hw, block, idx); - else - return ice_clear_phy_tstamp_e822(hw, block, idx); + int status; + + switch (hw->phy_cfg) { + case ICE_PHY_E810: + status = ice_clear_phy_tstamp_e810(hw, block, idx); + break; + case ICE_PHY_E822: + status = ice_clear_phy_tstamp_e822(hw, block, idx); + break; + default: + status = -EOPNOTSUPP; + } + + return status; } + +/** + * ice_ptp_reset_ts_memory - Reset timestamp memory for all blocks + * @hw: pointer to the HW struct + */ +void ice_ptp_reset_ts_memory(struct ice_hw *hw) +{ + switch (hw->phy_cfg) { + case ICE_PHY_E822: + ice_ptp_reset_ts_memory_e822(hw); + break; + case ICE_PHY_E810: + default: + return; + } +} + +/** + * ice_ptp_init_phc - Initialize PTP hardware clock + * @hw: pointer to the HW struct + * + * Perform the steps required to initialize the PTP hardware clock. + */ +int ice_ptp_init_phc(struct ice_hw *hw) +{ + u8 src_idx = hw->func_caps.ts_func_info.tmr_index_owned; + int status; + + /* Enable source clocks */ + wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M); + + /* Clear event status indications for auxiliary pins */ + (void)rd32(hw, GLTSYN_STAT(src_idx)); + + switch (hw->phy_cfg) { + case ICE_PHY_E810: + status = ice_ptp_init_phc_e810(hw); + break; + case ICE_PHY_E822: + status = ice_ptp_init_phc_e822(hw); + break; + default: + status = -EOPNOTSUPP; + } + + return status; +} + +/* ice_get_phy_tx_tstamp_ready - Read PHY Tx memory status indication + * @hw: pointer to the HW struct + * @block: the timestamp block to check + * @tstamp_ready: storage for the PHY Tx memory status information + * + * Check the PHY for Tx timestamp memory status. This reports a 64 bit value + * which indicates which timestamps in the block may be captured. A set bit + * means the timestamp can be read. An unset bit means the timestamp is not + * ready and software should avoid reading the register. + */ +int +ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready) +{ + int status; + + switch (hw->phy_cfg) { + case ICE_PHY_E810: + status = ice_get_phy_tx_tstamp_ready_e810(hw, block, + tstamp_ready); + break; + case ICE_PHY_E822: + status = ice_get_phy_tx_tstamp_ready_e822(hw, block, + tstamp_ready); + break; + default: + status = -EOPNOTSUPP; + } + + return status; +} + diff --git a/drivers/thirdparty/ice/ice_ptp_hw.h b/drivers/thirdparty/ice/ice_ptp_hw.h index e63b9ca75260..adbbad4ff003 100644 --- a/drivers/thirdparty/ice/ice_ptp_hw.h +++ b/drivers/thirdparty/ice/ice_ptp_hw.h @@ -5,11 +5,12 @@ #define _ICE_PTP_HW_H_ enum ice_ptp_tmr_cmd { - INIT_TIME, - INIT_INCVAL, - ADJ_TIME, - ADJ_TIME_AT_TIME, - READ_TIME + ICE_PTP_INIT_TIME, + ICE_PTP_INIT_INCVAL, + ICE_PTP_ADJ_TIME, + ICE_PTP_ADJ_TIME_AT_TIME, + ICE_PTP_READ_TIME, + ICE_PTP_NOP, }; enum ice_ptp_serdes { @@ -54,9 +55,135 @@ struct ice_time_ref_info_e822 { u8 pps_delay; }; +/** + * struct ice_vernier_info_e822 + * @tx_par_clk: Frequency used to calculate P_REG_PAR_TX_TUS + * @rx_par_clk: Frequency used to calculate P_REG_PAR_RX_TUS + * @tx_pcs_clk: Frequency used to calculate P_REG_PCS_TX_TUS + * @rx_pcs_clk: Frequency used to calculate P_REG_PCS_RX_TUS + * @tx_desk_rsgb_par: Frequency used to calculate P_REG_DESK_PAR_TX_TUS + * @rx_desk_rsgb_par: Frequency used to calculate P_REG_DESK_PAR_RX_TUS + * @tx_desk_rsgb_pcs: Frequency used to calculate P_REG_DESK_PCS_TX_TUS + * @rx_desk_rsgb_pcs: Frequency used to calculate P_REG_DESK_PCS_RX_TUS + * @tx_fixed_delay: Fixed Tx latency measured in 1/100th nanoseconds + * @pmd_adj_divisor: Divisor used to calculate PDM alignment adjustment + * @rx_fixed_delay: Fixed Rx latency measured in 1/100th nanoseconds + * + * Table of constants used during as part of the Vernier calibration of the Tx + * and Rx timestamps. This includes frequency values used to compute TUs per + * PAR/PCS clock cycle, and static delay values measured during hardware + * design. + * + * Note that some values are not used for all link speeds, and the + * P_REG_DESK_PAR* registers may represent different clock markers at + * different link speeds, either the deskew marker for multi-lane link speeds + * or the Reed Solomon gearbox marker for RS-FEC. + */ +struct ice_vernier_info_e822 { + u32 tx_par_clk; + u32 rx_par_clk; + u32 tx_pcs_clk; + u32 rx_pcs_clk; + u32 tx_desk_rsgb_par; + u32 rx_desk_rsgb_par; + u32 tx_desk_rsgb_pcs; + u32 rx_desk_rsgb_pcs; + u32 tx_fixed_delay; + u32 pmd_adj_divisor; + u32 rx_fixed_delay; +}; + +/** + * struct ice_cgu_pll_params_e822 + * @refclk_pre_div: Reference clock pre-divisor + * @feedback_div: Feedback divisor + * @frac_n_div: Fractional divisor + * @post_pll_div: Post PLL divisor + * + * Clock Generation Unit parameters used to program the PLL based on the + * selected TIME_REF frequency. + */ +struct ice_cgu_pll_params_e822 { + u32 refclk_pre_div; + u32 feedback_div; + u32 frac_n_div; + u32 post_pll_div; +}; + +extern const struct +ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ]; + +enum ice_e810t_cgu_dpll { + ICE_CGU_DPLL_SYNCE, + ICE_CGU_DPLL_PTP, + ICE_CGU_DPLL_MAX +}; + +enum ice_cgu_state { + ICE_CGU_STATE_UNKNOWN = -1, + ICE_CGU_STATE_INVALID, /* state is not valid */ + ICE_CGU_STATE_FREERUN, /* clock is free-running */ + ICE_CGU_STATE_LOCKED, /* clock is locked to the reference, + * but the holdover memory is not valid + */ + ICE_CGU_STATE_LOCKED_HO_ACQ, /* clock is locked to the reference + * and holdover memory is valid + */ + ICE_CGU_STATE_HOLDOVER, /* clock is in holdover mode */ + ICE_CGU_STATE_MAX +}; + +#define MAX_CGU_STATE_NAME_LEN 14 +struct ice_cgu_state_desc { + char name[MAX_CGU_STATE_NAME_LEN]; + enum ice_cgu_state state; +}; + +#define MAX_CGU_PIN_NAME_LEN 16 +struct ice_cgu_pin_desc { + char name[MAX_CGU_PIN_NAME_LEN]; + u8 index; +}; + +enum ice_zl_cgu_pins { + ZL_REF0P = 0, + ZL_REF0N, + ZL_REF1P, + ZL_REF1N, + ZL_REF2P, + ZL_REF2N, + ZL_REF3P, + ZL_REF3N, + ZL_REF4P, + ZL_REF4N, + NUM_ZL_CGU_PINS +}; + +enum ice_si_cgu_pins { + SI_REF0P = 0, + SI_REF0N, + SI_REF1P, + SI_REF1N, + SI_REF2P, + SI_REF2N, + SI_REF3, + SI_REF4, + NUM_SI_CGU_PINS +}; + +#define E810C_QSFP_C827_0_HANDLE 2 +#define E810C_QSFP_C827_1_HANDLE 3 +enum ice_e810_c827_idx { + C827_0, + C827_1 +}; + /* Table of constants related to possible TIME_REF sources */ extern const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ]; +/* Table of constants for Vernier calibration on E822 */ +extern const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD]; + /* Increment value to generate nanoseconds in the GLTSYN_TIME_L register for * the E810 devices. Based off of a PLL with an 812.5 MHz frequency. */ @@ -68,36 +195,70 @@ u64 ice_ptp_read_src_incval(struct ice_hw *hw); bool ice_ptp_lock(struct ice_hw *hw); void ice_ptp_unlock(struct ice_hw *hw); void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd); -enum ice_status ice_ptp_init_time(struct ice_hw *hw, u64 time); -enum ice_status ice_ptp_write_incval(struct ice_hw *hw, u64 incval); -enum ice_status ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval); -enum ice_status ice_ptp_adj_clock(struct ice_hw *hw, s32 adj, bool lock_sbq); -enum ice_status +int ice_ptp_init_time(struct ice_hw *hw, u64 time); +int ice_ptp_write_incval(struct ice_hw *hw, u64 incval); +int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval); +int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj, bool lock_sbq); +int ice_ptp_adj_clock_at_time(struct ice_hw *hw, u64 at_time, s32 adj); -enum ice_status +int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp); -enum ice_status +int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx); +void ice_ptp_reset_ts_memory(struct ice_hw *hw); +int ice_ptp_init_phc(struct ice_hw *hw); +int +ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready); /* E822 family functions */ -enum ice_status +int ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val); -enum ice_status +int ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val); -enum ice_status +int ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val); -enum ice_status +int ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val); -enum ice_status +int ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time, bool lock_sbq); -enum ice_status +int ice_ptp_read_phy_incval_e822(struct ice_hw *hw, u8 port, u64 *incval); -enum ice_status -ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts); -enum ice_status -ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd, - bool lock_sbq); +int +ice_ptp_read_port_capture_e822(struct ice_hw *hw, u8 port, + u64 *tx_ts, u64 *rx_ts); +int +ice_ptp_one_port_cmd_e822(struct ice_hw *hw, u8 port, + enum ice_ptp_tmr_cmd cmd, bool lock_sbq); +int +ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq, + enum ice_clk_src clk_src); +void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad); + +/** + * ice_e822_time_ref - Get the current TIME_REF from capabilities + * @hw: pointer to the HW structure + * + * Returns the current TIME_REF from the capabilities structure. + */ +static inline enum ice_time_ref_freq ice_e822_time_ref(struct ice_hw *hw) +{ + return hw->func_caps.ts_func_info.time_ref; +} + +/** + * ice_set_e822_time_ref - Set new TIME_REF + * @hw: pointer to the HW structure + * @time_ref: new TIME_REF to set + * + * Update the TIME_REF in the capabilities structure in response to some + * change, such as an update to the CGU registers. + */ +static inline void +ice_set_e822_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref) +{ + hw->func_caps.ts_func_info.time_ref = time_ref; +} static inline u64 ice_e822_pll_freq(enum ice_time_ref_freq time_ref) { @@ -115,15 +276,45 @@ static inline u64 ice_e822_pps_delay(enum ice_time_ref_freq time_ref) } /* E822 Vernier calibration functions */ -enum ice_status ice_ptp_set_vernier_wl(struct ice_hw *hw); -enum ice_status +int ice_ptp_set_vernier_wl(struct ice_hw *hw); +int ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port, enum ice_ptp_link_spd *link_out, enum ice_ptp_fec_mode *fec_out); void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port); +int +ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset); +int +ice_start_phy_timer_e822(struct ice_hw *hw, u8 port); +int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port); +int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port); +int ice_phy_calc_vernier_e822(struct ice_hw *hw, u8 port); /* E810 family functions */ -enum ice_status ice_ptp_init_phy_e810(struct ice_hw *hw); +bool ice_is_phy_rclk_present(struct ice_hw *hw); +bool ice_is_clock_mux_present_e810t(struct ice_hw *hw); +int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx); +bool ice_is_gps_present_e810t(struct ice_hw *hw); +int ice_ptp_init_phy_e810(struct ice_hw *hw); +int +ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data); +int +ice_write_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 data); +int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data); +int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data); +bool ice_is_pca9575_present(struct ice_hw *hw); + +void +ice_ptp_process_cgu_err(struct ice_hw *hw, struct ice_rq_event_info *event); +bool ice_is_cgu_present(struct ice_hw *hw); +const char *ice_cgu_state_to_name(int state); +enum ice_cgu_state +ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx, u8 *pin, s64 *phase_offset, + enum ice_cgu_state last_dpll_state); +const char *ice_zl_pin_idx_to_name_e810t(u8 pin); +const char *ice_pin_idx_to_name_e823(struct ice_hw *hw, u8 pin); + +int ice_ptp_init_phy_cfg(struct ice_hw *hw); #define PFTSYN_SEM_BYTES 4 @@ -220,8 +411,8 @@ enum ice_status ice_ptp_init_phy_e810(struct ice_hw *hw); #define P_REG_TIMETUS_L 0x410 #define P_REG_TIMETUS_U 0x414 -#define P_REG_TIMETUS_LOW_M 0xFF -#define P_REG_TIMETUS_HIGH_S 8 +#define P_REG_40B_LOW_M 0xFF +#define P_REG_40B_HIGH_S 8 /* PHY window length registers */ #define P_REG_WL 0x40C @@ -333,8 +524,8 @@ enum ice_status ice_ptp_init_phy_e810(struct ice_hw *hw); #define INCVAL_HIGH_M 0xFF /* Timestamp block macros */ +#define TS_VALID BIT(0) #define TS_LOW_M 0xFFFFFFFF -#define TS_HIGH_M 0xFF #define TS_HIGH_S 32 #define TS_PHY_LOW_M 0xFF @@ -344,6 +535,16 @@ enum ice_status ice_ptp_init_phy_e810(struct ice_hw *hw); #define BYTES_PER_IDX_ADDR_L_U 8 #define BYTES_PER_IDX_ADDR_L 4 +/* Tx timestamp low latency read definitions */ +#define TS_LL_READ_RETRIES 200 +#define TS_LL_READ_TS BIT(31) +#define TS_LL_READ_TS_IDX_S 24 +#define TS_LL_READ_TS_IDX_M ICE_M(0x3F, 0) +#define TS_LL_READ_TS_IDX(__idx) (TS_LL_READ_TS | \ + (((__idx) & TS_LL_READ_TS_IDX_M) << \ + TS_LL_READ_TS_IDX_S)) +#define TS_LL_READ_TS_HIGH_S 16 + /* Internal PHY timestamp address */ #define TS_L(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U)) #define TS_H(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U + \ @@ -356,4 +557,28 @@ enum ice_status ice_ptp_init_phy_e810(struct ice_hw *hw); #define LOW_TX_MEMORY_BANK_START 0x03090000 #define HIGH_TX_MEMORY_BANK_START 0x03090004 +/* E810T PCA9575 IO controller registers */ +#define ICE_PCA9575_P0_IN 0x0 +#define ICE_PCA9575_P1_IN 0x1 +#define ICE_PCA9575_P0_CFG 0x8 +#define ICE_PCA9575_P1_CFG 0x9 +#define ICE_PCA9575_P0_OUT 0xA +#define ICE_PCA9575_P1_OUT 0xB + +/* E810T PCA9575 IO controller pin control */ +#define ICE_E810T_P0_GNSS_PRSNT_N BIT(4) +#define ICE_E810T_P1_SMA1_DIR_EN BIT(4) +#define ICE_E810T_P1_SMA1_TX_EN BIT(5) +#define ICE_E810T_P1_SMA2_UFL2_RX_DIS BIT(3) +#define ICE_E810T_P1_SMA2_DIR_EN BIT(6) +#define ICE_E810T_P1_SMA2_TX_EN BIT(7) + +#define ICE_E810T_SMA_MIN_BIT 3 +#define ICE_E810T_SMA_MAX_BIT 7 +#define ICE_E810T_P1_OFFSET 8 + +/* E810T ZL30632 DPLL pin control */ +#define ICE_ZL30632_CGU_PIN_FREQ_10MHZ 10000000 +#define ICE_ZL30632_CGU_PIN_FREQ_1HZ 1 + #endif /* _ICE_PTP_HW_H_ */ diff --git a/drivers/thirdparty/ice/ice_ptype_mk.c b/drivers/thirdparty/ice/ice_ptype_mk.c new file mode 100644 index 000000000000..24dc8b8fb9be --- /dev/null +++ b/drivers/thirdparty/ice/ice_ptype_mk.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#include "ice_common.h" +#include "ice_parser_util.h" + +#define ICE_PTYPE_MK_TCAM_TABLE_SIZE 1024 + +/** + * ice_ptype_mk_tcam_dump - dump an ptype marker tcam info_ + * @hw: pointer to the hardware structure + * @item: ptype marker tcam to dump + */ +void ice_ptype_mk_tcam_dump(struct ice_hw *hw, + struct ice_ptype_mk_tcam_item *item) +{ + int i; + + dev_info(ice_hw_to_dev(hw), "address = %d\n", item->address); + dev_info(ice_hw_to_dev(hw), "ptype = %d\n", item->ptype); + dev_info(ice_hw_to_dev(hw), "key :"); + for (i = 0; i < 10; i++) + dev_info(ice_hw_to_dev(hw), "%02x ", item->key[i]); + dev_info(ice_hw_to_dev(hw), "\n"); + dev_info(ice_hw_to_dev(hw), "key_inv:"); + for (i = 0; i < 10; i++) + dev_info(ice_hw_to_dev(hw), "%02x ", item->key_inv[i]); + dev_info(ice_hw_to_dev(hw), "\n"); +} + +static void _parse_ptype_mk_tcam_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int size) +{ + ice_parse_item_dflt(hw, idx, item, data, size); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_ptype_mk_tcam_dump(hw, + (struct ice_ptype_mk_tcam_item *)item); +} + +/** + * ice_ptype_mk_tcam_table_get - create a ptype marker tcam table + * @hw: pointer to the hardware structure + */ +struct ice_ptype_mk_tcam_item *ice_ptype_mk_tcam_table_get(struct ice_hw *hw) +{ + return (struct ice_ptype_mk_tcam_item *) + ice_parser_create_table(hw, ICE_SID_RXPARSER_MARKER_PTYPE, + sizeof(struct ice_ptype_mk_tcam_item), + ICE_PTYPE_MK_TCAM_TABLE_SIZE, + ice_parser_sect_item_get, + _parse_ptype_mk_tcam_item, true); +} + +/** + * ice_ptype_mk_tcam_match - match a pattern on a ptype marker tcam table + * @table: ptype marker tcam table to search + * @pat: pattern to match + * @len: length of the pattern + */ +struct ice_ptype_mk_tcam_item * +ice_ptype_mk_tcam_match(struct ice_ptype_mk_tcam_item *table, + u8 *pat, int len) +{ + int i; + + for (i = 0; i < ICE_PTYPE_MK_TCAM_TABLE_SIZE; i++) { + struct ice_ptype_mk_tcam_item *item = &table[i]; + + if (ice_ternary_match(item->key, item->key_inv, pat, len)) + return item; + } + + return NULL; +} diff --git a/drivers/thirdparty/ice/ice_ptype_mk.h b/drivers/thirdparty/ice/ice_ptype_mk.h new file mode 100644 index 000000000000..b47bd5389d69 --- /dev/null +++ b/drivers/thirdparty/ice/ice_ptype_mk.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_PTYPE_MK_H_ +#define _ICE_PTYPE_MK_H_ + +struct ice_ptype_mk_tcam_item { + u16 address; + u16 ptype; + u8 key[10]; + u8 key_inv[10]; +}; + +void ice_ptype_mk_tcam_dump(struct ice_hw *hw, + struct ice_ptype_mk_tcam_item *item); +struct ice_ptype_mk_tcam_item *ice_ptype_mk_tcam_table_get(struct ice_hw *hw); +struct ice_ptype_mk_tcam_item * +ice_ptype_mk_tcam_match(struct ice_ptype_mk_tcam_item *table, + u8 *pat, int len); +#endif /* _ICE_PTYPE_MK_H_ */ diff --git a/drivers/thirdparty/ice/ice_repr.c b/drivers/thirdparty/ice/ice_repr.c index 0f4d9b6e8a33..bc00dadc32a2 100644 --- a/drivers/thirdparty/ice/ice_repr.c +++ b/drivers/thirdparty/ice/ice_repr.c @@ -6,8 +6,9 @@ #if IS_ENABLED(CONFIG_NET_DEVLINK) #include "ice_devlink.h" #endif /* CONFIG_NET_DEVLINK */ -#include "ice_virtchnl_pf.h" +#include "ice_sriov.h" #include "ice_tc_lib.h" +#include "ice_lib.h" #ifdef HAVE_NDO_GET_PHYS_PORT_NAME /** @@ -34,7 +35,7 @@ ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len) #if IS_ENABLED(CONFIG_NET_DEVLINK) /* Devlink port is registered and devlink core is taking care of name formatting. */ - if (repr->vf->devlink_port.registered) + if (repr->vf->devlink_port.devlink) return -EOPNOTSUPP; #endif /* CONFIG_NET_DEVLINK */ @@ -62,7 +63,7 @@ ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) struct ice_eth_stats *eth_stats; struct ice_vsi *vsi; - if (ice_check_vf_ready_for_cfg(np->repr->vf)) + if (ice_is_vf_disabled(np->repr->vf)) #ifdef HAVE_VOID_NDO_GET_STATS64 return; #else @@ -108,7 +109,7 @@ struct ice_repr *ice_netdev_to_repr(struct net_device *netdev) * interface is made active by the system (IFF_UP). Corresponding * VF is notified about link status change. * - * Returns 0 on success, negative value on failure + * Returns 0 on success */ static int ice_repr_open(struct net_device *netdev) { @@ -134,7 +135,7 @@ static int ice_repr_open(struct net_device *netdev) * interface is de-activated by the system. Corresponding * VF is notified about link status change. * - * Returns 0 on success, negative value on failure + * Returns 0 on success */ static int ice_repr_stop(struct net_device *netdev) { @@ -161,6 +162,57 @@ ice_repr_get_devlink_port(struct net_device *netdev) return &repr->vf->devlink_port; } #endif /* CONFIG_NET_DEVLINK && HAVE_DEVLINK_PORT_ATTR_PCI_VF*/ +#if defined(HAVE_NDO_OFFLOAD_STATS) || defined(HAVE_RHEL7_EXTENDED_OFFLOAD_STATS) +/** + * ice_repr_sp_stats64 - get slow path stats for port representor + * @dev: network interface device structure + * @stats: netlink stats structure + * + * RX/TX stats are being swapped here to be consistent with VF stats. In slow + * path, port representor receives data when the corresponding VF is sending it + * (and vice versa), TX and RX bytes/packets are effectively swapped on port + * representor. + */ +static int +ice_repr_sp_stats64(const struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct ice_netdev_priv *np = netdev_priv(dev); + int vf_id = np->repr->vf->vf_id; + struct ice_ring *ring; + u64 pkts, bytes; + + ring = np->vsi->tx_rings[vf_id]; + ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); + stats->rx_packets = pkts; + stats->rx_bytes = bytes; + + ring = np->vsi->rx_rings[vf_id]; + ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); + stats->tx_packets = pkts; + stats->tx_bytes = bytes; + stats->tx_dropped = ring->rx_stats.alloc_page_failed + + ring->rx_stats.alloc_buf_failed; + + return 0; +} + +static bool +ice_repr_ndo_has_offload_stats(const struct net_device *dev, int attr_id) +{ + return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT; +} + +static int +ice_repr_ndo_get_offload_stats(int attr_id, const struct net_device *dev, + void *sp) +{ + if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT) + return ice_repr_sp_stats64(dev, (struct rtnl_link_stats64 *)sp); + + return -EINVAL; +} +#endif /* HAVE_NDO_OFFLOAD_STATS || HAVE_RHEL7_EXTENDED_OFFLOAD_STATS */ #ifdef HAVE_TC_SETUP_CLSFLOWER static int @@ -234,7 +286,27 @@ ice_repr_setup_tc(struct net_device *netdev, u32 __always_unused handle, return -EOPNOTSUPP; } } -#endif /* ESWITCH_SUPPORT */ +#endif /* HAVE_TC_SETUP_CLSFLOWER */ + +/** + * ice_repr_change_mtu - NDO callback to change the MTU on port representor + * @netdev: network interface device structure + * @new_mtu: new value for MTU + * + * Returns 0 on success, negative on failure + */ +static int ice_repr_change_mtu(struct net_device *netdev, int new_mtu) +{ + int err; + + err = ice_check_mtu_valid(netdev, new_mtu); + if (err) + return err; + + netdev->mtu = (unsigned int)new_mtu; + + return 0; +} static const struct net_device_ops ice_repr_netdev_ops = { #ifdef HAVE_NDO_GET_PHYS_PORT_NAME @@ -245,6 +317,11 @@ static const struct net_device_ops ice_repr_netdev_ops = { .ndo_stop = ice_repr_stop, #if IS_ENABLED(CONFIG_NET_DEVLINK) .ndo_start_xmit = ice_eswitch_port_start_xmit, +#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU + .extended.ndo_change_mtu = ice_repr_change_mtu, +#else + .ndo_change_mtu = ice_repr_change_mtu, +#endif /* HAVE_RHEL7_EXTENDED_MIN_MAX_MTU */ #ifdef HAVE_DEVLINK_PORT_ATTR_PCI_VF .ndo_get_devlink_port = ice_repr_get_devlink_port, #endif /* HAVE_DEVLINK_PORT_ATTR_PCI_VF */ @@ -256,6 +333,13 @@ static const struct net_device_ops ice_repr_netdev_ops = { #endif /* HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC */ #endif /* HAVE_TC_SETUP_CLSFLOWER */ #endif /* CONFIG_NET_DEVLINK */ +#ifdef HAVE_NDO_OFFLOAD_STATS + .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats, + .ndo_get_offload_stats = ice_repr_ndo_get_offload_stats, +#elif defined(HAVE_RHEL7_EXTENDED_OFFLOAD_STATS) + .extended.ndo_has_offload_stats = ice_repr_ndo_has_offload_stats, + .extended.ndo_get_offload_stats = ice_repr_ndo_get_offload_stats, +#endif }; /** @@ -298,19 +382,30 @@ static int ice_repr_add(struct ice_vf *vf) struct ice_q_vector *q_vector; struct ice_netdev_priv *np; struct ice_repr *repr; + struct ice_vsi *vsi; int err; + vsi = ice_get_vf_vsi(vf); + if (!vsi) + return -EINVAL; + repr = kzalloc(sizeof(*repr), GFP_KERNEL); if (!repr) return -ENOMEM; + repr->mac_rule = kzalloc(sizeof(*repr->mac_rule), GFP_KERNEL); + if (!repr->mac_rule) { + err = -ENOMEM; + goto err_alloc_rule; + } + repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv)); if (!repr->netdev) { err = -ENOMEM; goto err_alloc; } - repr->src_vsi = ice_get_vf_vsi(vf); + repr->src_vsi = vsi; repr->vf = vf; vf->repr = repr; np = netdev_priv(repr->netdev); @@ -329,8 +424,19 @@ static int ice_repr_add(struct ice_vf *vf) if (err) goto err_devlink; #endif /* HAVE_DEVLINK_PORT_ATTR_PCI_VF */ + +#ifdef HAVE_NETDEVICE_MIN_MAX_MTU +#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU + repr->netdev->extended->min_mtu = ETH_MIN_MTU; + repr->netdev->extended->max_mtu = ICE_MAX_MTU; +#else + repr->netdev->min_mtu = ETH_MIN_MTU; + repr->netdev->max_mtu = ICE_MAX_MTU; +#endif /* HAVE_RHEL7_EXTENDED_MIN_MAX_MTU */ +#endif /* HAVE_NETDEVICE_MIN_MAX_MTU */ #endif /* CONFIG_NET_DEVLINK */ + SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(vf->pf)); err = ice_repr_reg_netdev(repr->netdev); if (err) goto err_netdev; @@ -341,6 +447,8 @@ static int ice_repr_add(struct ice_vf *vf) #endif /* HAVE_DEVLINK_PORT_ATTR_PCI_VF */ #endif /* CONFIG_NET_DEVLINK */ + ice_virtchnl_set_repr_ops(vf); + return 0; err_netdev: @@ -356,6 +464,9 @@ err_alloc_q_vector: free_netdev(repr->netdev); repr->netdev = NULL; err_alloc: + kfree(repr->mac_rule); + repr->mac_rule = NULL; +err_alloc_rule: kfree(repr); vf->repr = NULL; return err; @@ -367,6 +478,9 @@ err_alloc: */ static void ice_repr_rem(struct ice_vf *vf) { + if (!vf->repr) + return; + #if IS_ENABLED(CONFIG_NET_DEVLINK) #ifdef HAVE_DEVLINK_PORT_ATTR_PCI_VF ice_devlink_destroy_vf_port(vf); @@ -377,42 +491,12 @@ static void ice_repr_rem(struct ice_vf *vf) unregister_netdev(vf->repr->netdev); free_netdev(vf->repr->netdev); vf->repr->netdev = NULL; + kfree(vf->repr->mac_rule); + vf->repr->mac_rule = NULL; kfree(vf->repr); vf->repr = NULL; -} - -/** - * ice_repr_add_for_all_vfs - add port representor for all VFs - * @pf: pointer to PF structure - */ -int ice_repr_add_for_all_vfs(struct ice_pf *pf) -{ - int err; - int i; - - - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; - - err = ice_repr_add(vf); - if (err) - goto err; - - ice_vc_change_ops_to_repr(&vf->vc_ops); - } - - return 0; - -err: - for (i = i - 1; i >= 0; i--) { - struct ice_vf *vf = &pf->vf[i]; - - ice_repr_rem(vf); - ice_vc_set_dflt_vf_ops(&vf->vc_ops); - } - - return err; + ice_virtchnl_set_dflt_ops(vf); } /** @@ -421,15 +505,39 @@ err: */ void ice_repr_rem_from_all_vfs(struct ice_pf *pf) { - int i; + struct ice_vf *vf; + unsigned int bkt; + lockdep_assert_held(&pf->vfs.table_lock); - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; - + ice_for_each_vf(pf, bkt, vf) ice_repr_rem(vf); - ice_vc_set_dflt_vf_ops(&vf->vc_ops); +} + +/** + * ice_repr_add_for_all_vfs - add port representor for all VFs + * @pf: pointer to PF structure + */ +int ice_repr_add_for_all_vfs(struct ice_pf *pf) +{ + struct ice_vf *vf; + unsigned int bkt; + int err; + + lockdep_assert_held(&pf->vfs.table_lock); + + ice_for_each_vf(pf, bkt, vf) { + err = ice_repr_add(vf); + if (err) + goto err; } + + return 0; + +err: + ice_repr_rem_from_all_vfs(pf); + + return err; } /** diff --git a/drivers/thirdparty/ice/ice_repr.h b/drivers/thirdparty/ice/ice_repr.h index a20c9d5ebe4f..f861d41b717e 100644 --- a/drivers/thirdparty/ice/ice_repr.h +++ b/drivers/thirdparty/ice/ice_repr.h @@ -3,7 +3,6 @@ #ifndef _ICE_REPR_H_ #define _ICE_REPR_H_ -#include "ice.h" struct ice_repr { struct ice_vsi *src_vsi; @@ -11,6 +10,9 @@ struct ice_repr { struct ice_q_vector *q_vector; struct net_device *netdev; struct metadata_dst *dst; + /* info about slow path MAC rule */ + struct ice_rule_query_data *mac_rule; + u8 rule_added; }; int ice_repr_add_for_all_vfs(struct ice_pf *pf); diff --git a/drivers/thirdparty/ice/ice_sched.c b/drivers/thirdparty/ice/ice_sched.c index bcfed501e999..72703c5c052d 100644 --- a/drivers/thirdparty/ice/ice_sched.c +++ b/drivers/thirdparty/ice/ice_sched.c @@ -3,8 +3,6 @@ #include "ice_sched.h" - - /** * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB * @pi: port information structure @@ -13,7 +11,7 @@ * This function inserts the root node of the scheduling tree topology * to the SW DB. */ -static enum ice_status +static int ice_sched_add_root_node(struct ice_port_info *pi, struct ice_aqc_txsched_elem_data *info) { @@ -21,20 +19,20 @@ ice_sched_add_root_node(struct ice_port_info *pi, struct ice_hw *hw; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL); if (!root) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* coverity[suspicious_sizeof] */ root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0], sizeof(*root), GFP_KERNEL); if (!root->children) { devm_kfree(ice_hw_to_dev(hw), root); - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } memcpy(&root->info, info, sizeof(*info)); @@ -98,14 +96,14 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid) * * This function sends a scheduling elements cmd (cmd_opc) */ -static enum ice_status +static int ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc, u16 elems_req, void *buf, u16 buf_size, u16 *elems_resp, struct ice_sq_cd *cd) { struct ice_aqc_sched_elem_cmd *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.sched_elem_cmd; ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc); @@ -129,7 +127,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc, * * Query scheduling elements (0x0404) */ -enum ice_status +int ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, struct ice_aqc_txsched_elem_data *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) @@ -147,18 +145,18 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, * * This function inserts a scheduler node to the SW DB. */ -enum ice_status +int ice_sched_add_node(struct ice_port_info *pi, u8 layer, struct ice_aqc_txsched_elem_data *info) { struct ice_aqc_txsched_elem_data elem; struct ice_sched_node *parent; struct ice_sched_node *node; - enum ice_status status; struct ice_hw *hw; + int status; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; @@ -168,7 +166,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, if (!parent) { ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n", le32_to_cpu(info->parent_teid)); - return ICE_ERR_PARAM; + return -EINVAL; } /* query the current node information from FW before adding it @@ -179,7 +177,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, return status; node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL); if (!node) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; if (hw->max_children[layer]) { /* coverity[suspicious_sizeof] */ node->children = devm_kcalloc(ice_hw_to_dev(hw), @@ -187,7 +185,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, sizeof(*node), GFP_KERNEL); if (!node->children) { devm_kfree(ice_hw_to_dev(hw), node); - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } } @@ -210,7 +208,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, * * Delete scheduling elements (0x040F) */ -static enum ice_status +static int ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req, struct ice_aqc_delete_elem *buf, u16 buf_size, u16 *grps_del, struct ice_sq_cd *cd) @@ -229,19 +227,19 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req, * * This function remove nodes from HW */ -static enum ice_status +static int ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, u16 num_nodes, u32 *node_teids) { struct ice_aqc_delete_elem *buf; u16 i, num_groups_removed = 0; - enum ice_status status; u16 buf_size; + int status; buf_size = struct_size(buf, teid, num_nodes); buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; buf->hdr.parent_teid = parent->info.node_teid; buf->hdr.num_elems = cpu_to_le16(num_nodes); @@ -370,14 +368,14 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) * * Get default scheduler topology (0x400) */ -enum ice_status +int ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport, struct ice_aqc_get_topo_elem *buf, u16 buf_size, u8 *num_branches, struct ice_sq_cd *cd) { struct ice_aqc_get_topo *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.get_topo; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo); @@ -400,7 +398,7 @@ ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport, * * Add scheduling elements (0x0401) */ -static enum ice_status +static int ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req, struct ice_aqc_add_elem *buf, u16 buf_size, u16 *grps_added, struct ice_sq_cd *cd) @@ -421,7 +419,7 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req, * * Configure scheduling elements (0x0403) */ -static enum ice_status +static int ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, struct ice_aqc_txsched_elem_data *buf, u16 buf_size, u16 *elems_cfgd, struct ice_sq_cd *cd) @@ -442,7 +440,7 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, * * Move scheduling elements (0x0408) */ -static enum ice_status +int ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req, struct ice_aqc_move_elem *buf, u16 buf_size, u16 *grps_movd, struct ice_sq_cd *cd) @@ -463,7 +461,7 @@ ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req, * * Suspend scheduling elements (0x0409) */ -static enum ice_status +static int ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) { @@ -483,7 +481,7 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, * * resume scheduling elements (0x040A) */ -static enum ice_status +static int ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) { @@ -501,7 +499,7 @@ ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, * * Query scheduler resource allocation (0x0412) */ -static enum ice_status +static int ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size, struct ice_aqc_query_txsched_res_resp *buf, struct ice_sq_cd *cd) @@ -521,18 +519,18 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size, * * This function suspends or resumes HW nodes */ -static enum ice_status +static int ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, bool suspend) { u16 i, buf_size, num_elem_ret = 0; - enum ice_status status; __le32 *buf; + int status; buf_size = sizeof(*buf) * num_nodes; buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; for (i = 0; i < num_nodes; i++) buf[i] = cpu_to_le32(node_teids[i]); @@ -559,7 +557,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, * @tc: TC number * @new_numqs: number of queues */ -static enum ice_status +static int ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) { struct ice_vsi_ctx *vsi_ctx; @@ -567,7 +565,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); if (!vsi_ctx) - return ICE_ERR_PARAM; + return -EINVAL; /* allocate LAN queue contexts */ if (!vsi_ctx->lan_q_ctx[tc]) { vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw), @@ -575,7 +573,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) sizeof(*q_ctx), GFP_KERNEL); if (!vsi_ctx->lan_q_ctx[tc]) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; vsi_ctx->num_lan_q_entries[tc] = new_numqs; return 0; } @@ -586,7 +584,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs, sizeof(*q_ctx), GFP_KERNEL); if (!q_ctx) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc], prev_num * sizeof(*q_ctx)); devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]); @@ -603,7 +601,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) * @tc: TC number * @new_numqs: number of queues */ -static enum ice_status +static int ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) { struct ice_vsi_ctx *vsi_ctx; @@ -611,7 +609,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); if (!vsi_ctx) - return ICE_ERR_PARAM; + return -EINVAL; /* allocate RDMA queue contexts */ if (!vsi_ctx->rdma_q_ctx[tc]) { vsi_ctx->rdma_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw), @@ -619,7 +617,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) sizeof(*q_ctx), GFP_KERNEL); if (!vsi_ctx->rdma_q_ctx[tc]) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; vsi_ctx->num_rdma_q_entries[tc] = new_numqs; return 0; } @@ -630,7 +628,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs, sizeof(*q_ctx), GFP_KERNEL); if (!q_ctx) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc], prev_num * sizeof(*q_ctx)); devm_kfree(ice_hw_to_dev(hw), vsi_ctx->rdma_q_ctx[tc]); @@ -652,14 +650,14 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) * * RL profile function to add, query, or remove profile(s) */ -static enum ice_status +static int ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode, u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd) { struct ice_aqc_rl_profile *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.rl_profile; @@ -683,7 +681,7 @@ ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode, * * Add RL profile (0x0410) */ -static enum ice_status +static int ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, u16 buf_size, u16 *num_profiles_added, struct ice_sq_cd *cd) @@ -702,7 +700,7 @@ ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles, * * Query RL profile (0x0411) */ -enum ice_status +int ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, u16 buf_size, struct ice_sq_cd *cd) @@ -722,7 +720,7 @@ ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles, * * Remove RL profile (0x0415) */ -static enum ice_status +static int ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, u16 buf_size, u16 *num_profiles_removed, struct ice_sq_cd *cd) @@ -741,24 +739,24 @@ ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles, * its associated parameters from HW DB,and locally. The caller needs to * hold scheduler lock. */ -static enum ice_status +static int ice_sched_del_rl_profile(struct ice_hw *hw, struct ice_aqc_rl_profile_info *rl_info) { struct ice_aqc_rl_profile_elem *buf; u16 num_profiles_removed; - enum ice_status status; u16 num_profiles = 1; + int status; if (rl_info->prof_id_ref != 0) - return ICE_ERR_IN_USE; + return -EBUSY; /* Safe to remove profile ID */ buf = &rl_info->profile; status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf), &num_profiles_removed, NULL); if (status || num_profiles_removed != num_profiles) - return ICE_ERR_CFG; + return -EIO; /* Delete stale entry now */ list_del(&rl_info->list_entry); @@ -777,14 +775,13 @@ static void ice_sched_clear_rl_prof(struct ice_port_info *pi) u16 ln; struct ice_hw *hw = pi->hw; - for (ln = 0; ln < hw->num_tx_sched_layers; ln++) { struct ice_aqc_rl_profile_info *rl_prof_elem; struct ice_aqc_rl_profile_info *rl_prof_tmp; list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp, &hw->rl_prof_list[ln], list_entry) { - enum ice_status status; + int status; rl_prof_elem->prof_id_ref = 0; status = ice_sched_del_rl_profile(hw, rl_prof_elem); @@ -884,6 +881,33 @@ void ice_sched_cleanup_all(struct ice_hw *hw) hw->max_cgds = 0; } +/** + * ice_aq_cfg_node_attr - configure nodes' per-cone flattening attributes + * @hw: pointer to the HW struct + * @num_nodes: the number of nodes whose attributes to configure + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @cd: pointer to command details structure or NULL + * + * Configure Node Attributes (0x0417) + */ +int +ice_aq_cfg_node_attr(struct ice_hw *hw, u16 num_nodes, + struct ice_aqc_node_attr_elem *buf, u16 buf_size, + struct ice_sq_cd *cd) +{ + struct ice_aqc_node_attr *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.node_attr; + ice_fill_dflt_direct_cmd_desc(&desc, + ice_aqc_opc_cfg_node_attr); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + cmd->num_entries = cpu_to_le16(num_nodes); + return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); +} + /** * ice_aq_cfg_l2_node_cgd - configures L2 node to CGD mapping * @hw: pointer to the HW struct @@ -894,7 +918,7 @@ void ice_sched_cleanup_all(struct ice_hw *hw) * * Configure L2 Node CGD (0x0414) */ -enum ice_status +int ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_l2_nodes, struct ice_aqc_cfg_l2_node_cgd_elem *buf, u16 buf_size, struct ice_sq_cd *cd) @@ -910,7 +934,6 @@ ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_l2_nodes, return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); } - /** * ice_sched_add_elems - add nodes to HW and SW DB * @pi: port information structure @@ -923,7 +946,7 @@ ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_l2_nodes, * * This function add nodes to HW as well as to SW DB for a given layer */ -static enum ice_status +static int ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, struct ice_sched_node *parent, u8 layer, u16 num_nodes, u16 *num_nodes_added, u32 *first_node_teid) @@ -931,15 +954,15 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, struct ice_sched_node *prev, *new_node; struct ice_aqc_add_elem *buf; u16 i, num_groups_added = 0; - enum ice_status status = 0; struct ice_hw *hw = pi->hw; + int status = 0; u16 buf_size; u32 teid; buf_size = struct_size(buf, generic, num_nodes); buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; buf->hdr.parent_teid = parent->info.node_teid; buf->hdr.num_elems = cpu_to_le16(num_nodes); @@ -966,7 +989,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n", hw->adminq.sq_last_status); devm_kfree(ice_hw_to_dev(hw), buf); - return ICE_ERR_CFG; + return -EIO; } *num_nodes_added = num_nodes; @@ -1022,7 +1045,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, * * Add nodes into specific hw layer. */ -static enum ice_status +static int ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, struct ice_sched_node *tc_node, struct ice_sched_node *parent, u8 layer, @@ -1037,7 +1060,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, return 0; if (!parent || layer < pi->hw->sw_entry_point_layer) - return ICE_ERR_PARAM; + return -EINVAL; /* max children per node per layer */ max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; @@ -1046,8 +1069,8 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, if ((parent->num_children + num_nodes) > max_child_nodes) { /* Fail if the parent is a TC node */ if (parent == tc_node) - return ICE_ERR_CFG; - return ICE_ERR_MAX_LIMIT; + return -EIO; + return -ENOSPC; } return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes, @@ -1066,7 +1089,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, * * This function add nodes to a given layer. */ -static enum ice_status +static int ice_sched_add_nodes_to_layer(struct ice_port_info *pi, struct ice_sched_node *tc_node, struct ice_sched_node *parent, u8 layer, @@ -1075,12 +1098,14 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, { u32 *first_teid_ptr = first_node_teid; u16 new_num_nodes = num_nodes; - enum ice_status status = 0; + int status = 0; *num_nodes_added = 0; while (*num_nodes_added < num_nodes) { u16 max_child_nodes, num_added = 0; +#ifdef __CHECKER__ /* cppcheck-suppress unusedVariable */ +#endif /* __CHECKER__ */ u32 temp; status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent, @@ -1093,14 +1118,14 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, if (*num_nodes_added > num_nodes) { ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes, *num_nodes_added); - status = ICE_ERR_CFG; + status = -EIO; break; } /* break if all the nodes are added successfully */ if (!status && (*num_nodes_added == num_nodes)) break; /* break if the error is not max limit */ - if (status && status != ICE_ERR_MAX_LIMIT) + if (status && status != -ENOSPC) break; /* Exceeded the max children */ max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; @@ -1150,12 +1175,11 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw) * 5 or less sw_entry_point_layer */ /* calculate the VSI layer based on number of layers. */ - if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) { - u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET; - - if (layer > hw->sw_entry_point_layer) - return layer; - } + if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) + return hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET; + else if (hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) + /* qgroup and VSI layers are same */ + return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET; return hw->sw_entry_point_layer; } @@ -1172,16 +1196,11 @@ static u8 ice_sched_get_agg_layer(struct ice_hw *hw) * 7 or less sw_entry_point_layer */ /* calculate the aggregator layer based on number of layers. */ - if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) { - u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET; - - if (layer > hw->sw_entry_point_layer) - return layer; - } + if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) + return hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET; return hw->sw_entry_point_layer; } - /** * ice_rm_dflt_leaf_node - remove the default leaf node in the tree * @pi: port information structure @@ -1201,7 +1220,7 @@ static void ice_rm_dflt_leaf_node(struct ice_port_info *pi) } if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) { u32 teid = le32_to_cpu(node->info.node_teid); - enum ice_status status; + int status; /* remove the default leaf node */ status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid); @@ -1247,24 +1266,23 @@ static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi) * resources, default topology created by firmware and storing the information * in SW DB. */ -enum ice_status ice_sched_init_port(struct ice_port_info *pi) +int ice_sched_init_port(struct ice_port_info *pi) { struct ice_aqc_get_topo_elem *buf; - enum ice_status status; struct ice_hw *hw; u8 num_branches; u16 num_elems; + int status; u8 i, j; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; - /* Query the Default Topology from FW */ buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Query default scheduling tree topology */ status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN, @@ -1276,7 +1294,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi) if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) { ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n", num_branches); - status = ICE_ERR_PARAM; + status = -EINVAL; goto err_init_port; } @@ -1287,7 +1305,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi) if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) { ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n", num_elems); - status = ICE_ERR_PARAM; + status = -EINVAL; goto err_init_port; } @@ -1376,27 +1394,28 @@ struct ice_sched_node *ice_sched_get_node(struct ice_port_info *pi, u32 teid) * * query FW for allocated scheduler resources and store in HW struct */ -enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) +int ice_sched_query_res_alloc(struct ice_hw *hw) { struct ice_aqc_query_txsched_res_resp *buf; - enum ice_status status = 0; __le16 max_sibl; - u8 i; + int status = 0; + u16 i; if (hw->layer_info) return status; buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL); if (status) goto sched_query_out; - hw->num_tx_sched_layers = le16_to_cpu(buf->sched_props.logical_levels); + hw->num_tx_sched_layers = + (u8)le16_to_cpu(buf->sched_props.logical_levels); hw->num_tx_sched_phys_layers = - le16_to_cpu(buf->sched_props.phys_levels); + (u8)le16_to_cpu(buf->sched_props.phys_levels); hw->flattened_layers = buf->sched_props.flattening_bitmap; hw->max_cgds = buf->sched_props.max_pf_cgds; @@ -1416,11 +1435,10 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) (hw->num_tx_sched_layers * sizeof(*hw->layer_info)), GFP_KERNEL); if (!hw->layer_info) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto sched_query_out; } - sched_query_out: devm_kfree(ice_hw_to_dev(hw), buf); return status; @@ -1561,10 +1579,11 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, { struct ice_sched_node *vsi_node, *qgrp_node; struct ice_vsi_ctx *vsi_ctx; + u8 qgrp_layer, vsi_layer; u16 max_children; - u8 qgrp_layer; qgrp_layer = ice_sched_get_qgrp_layer(pi->hw); + vsi_layer = ice_sched_get_vsi_layer(pi->hw); max_children = pi->hw->max_children[qgrp_layer]; vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); @@ -1575,6 +1594,12 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, if (!vsi_node) return NULL; + /* If the queue group and vsi layer are same then queues + * are all attached directly to VSI + */ + if (qgrp_layer == vsi_layer) + return vsi_node; + /* get the first queue group node from VSI sub-tree */ qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer); while (qgrp_node) { @@ -1661,8 +1686,8 @@ ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, static bool ice_sched_check_node(struct ice_hw *hw, struct ice_sched_node *node) { struct ice_aqc_txsched_elem_data buf; - enum ice_status status; u32 node_teid; + int status; node_teid = le32_to_cpu(node->info.node_teid); status = ice_sched_query_elem(hw, node_teid, &buf); @@ -1717,14 +1742,13 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) * This function adds the VSI child nodes to tree. It gets called for * LAN and RDMA separately. */ -static enum ice_status +static int ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, struct ice_sched_node *tc_node, u16 *num_nodes, u8 owner) { struct ice_sched_node *parent, *node; struct ice_hw *hw = pi->hw; - enum ice_status status; u32 first_node_teid; u16 num_added = 0; u8 i, qgl, vsil; @@ -1733,15 +1757,17 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, vsil = ice_sched_get_vsi_layer(hw); parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); for (i = vsil + 1; i <= qgl; i++) { + int status; + if (!parent) - return ICE_ERR_CFG; + return -EIO; status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, num_nodes[i], &first_node_teid, &num_added); if (status || num_nodes[i] != num_added) - return ICE_ERR_CFG; + return -EIO; /* The newly added node can be a new parent for the next * layer nodes @@ -1821,27 +1847,28 @@ ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi, * This function adds the VSI supported nodes into Tx tree including the * VSI, its parent and intermediate nodes in below layers */ -static enum ice_status +static int ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, struct ice_sched_node *tc_node, u16 *num_nodes) { struct ice_sched_node *parent = tc_node; - enum ice_status status; u32 first_node_teid; u16 num_added = 0; u8 i, vsil; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; vsil = ice_sched_get_vsi_layer(pi->hw); for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) { + int status; + status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, num_nodes[i], &first_node_teid, &num_added); if (status || num_nodes[i] != num_added) - return ICE_ERR_CFG; + return -EIO; /* The newly added node can be a new parent for the next * layer nodes @@ -1853,7 +1880,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, parent = parent->children[0]; if (!parent) - return ICE_ERR_CFG; + return -EIO; if (i == vsil) parent->vsi_handle = vsi_handle; @@ -1870,7 +1897,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, * * This function adds a new VSI into scheduler tree */ -static enum ice_status +static int ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) { u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; @@ -1878,7 +1905,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) - return ICE_ERR_PARAM; + return -EINVAL; /* calculate number of supported nodes needed for this VSI */ ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes); @@ -1898,7 +1925,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) * * This function updates the VSI child nodes based on the number of queues */ -static enum ice_status +static int ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 new_numqs, u8 owner) { @@ -1906,21 +1933,21 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, struct ice_sched_node *vsi_node; struct ice_sched_node *tc_node; struct ice_vsi_ctx *vsi_ctx; - enum ice_status status = 0; struct ice_hw *hw = pi->hw; + int status = 0; u16 prev_numqs; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) - return ICE_ERR_CFG; + return -EIO; vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); if (!vsi_node) - return ICE_ERR_CFG; + return -EIO; vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); if (!vsi_ctx) - return ICE_ERR_PARAM; + return -EINVAL; if (owner == ICE_SCHED_NODE_OWNER_LAN) prev_numqs = vsi_ctx->sched.max_lanq[tc]; @@ -1973,22 +2000,22 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, * enabled and VSI is in suspended state then resume the VSI back. If TC is * disabled then suspend the VSI if it is not already. */ -enum ice_status +int ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, u8 owner, bool enable) { struct ice_sched_node *vsi_node, *tc_node; struct ice_vsi_ctx *vsi_ctx; - enum ice_status status = 0; struct ice_hw *hw = pi->hw; + int status = 0; ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle); tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) - return ICE_ERR_PARAM; + return -EINVAL; vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); if (!vsi_ctx) - return ICE_ERR_PARAM; + return -EINVAL; vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); /* suspend the VSI if TC is not enabled */ @@ -2012,7 +2039,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); if (!vsi_node) - return ICE_ERR_CFG; + return -EIO; vsi_ctx->sched.vsi_node[tc] = vsi_node; vsi_node->in_use = true; @@ -2097,11 +2124,11 @@ static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node) * This function removes the VSI and its LAN or RDMA children nodes from the * scheduler tree. */ -static enum ice_status +static int ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) { - enum ice_status status = ICE_ERR_PARAM; struct ice_vsi_ctx *vsi_ctx; + int status = -EINVAL; u8 i; ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle); @@ -2126,7 +2153,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) if (ice_sched_is_leaf_node_present(vsi_node)) { ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i); - status = ICE_ERR_IN_USE; + status = -EBUSY; goto exit_sched_rm_vsi_cfg; } while (j < vsi_node->num_children) { @@ -2169,7 +2196,7 @@ exit_sched_rm_vsi_cfg: * This function clears the VSI and its LAN children nodes from scheduler tree * for all TCs. */ -enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) +int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) { return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN); } @@ -2182,7 +2209,7 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) * This function clears the VSI and its RDMA children nodes from scheduler tree * for all TCs. */ -enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle) +int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle) { return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA); } @@ -2222,7 +2249,7 @@ bool ice_sched_is_tree_balanced(struct ice_hw *hw, struct ice_sched_node *node) * This function retrieves the tree topology from the firmware for a given * node TEID to the root node. */ -enum ice_status +int ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf, u16 buf_size, struct ice_sq_cd *cd) @@ -2341,36 +2368,36 @@ ice_sched_update_parent(struct ice_sched_node *new_parent, * * This function move the child nodes to a given parent. */ -static enum ice_status +static int ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, u16 num_items, u32 *list) { struct ice_aqc_move_elem *buf; struct ice_sched_node *node; - enum ice_status status = 0; u16 i, grps_movd = 0; struct ice_hw *hw; + int status = 0; u16 buf_len; hw = pi->hw; if (!parent || !num_items) - return ICE_ERR_PARAM; + return -EINVAL; /* Does parent have enough space */ if (parent->num_children + num_items > hw->max_children[parent->tx_sched_layer]) - return ICE_ERR_AQ_FULL; + return -ENOSPC; buf_len = struct_size(buf, teid, 1); buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; for (i = 0; i < num_items; i++) { node = ice_sched_find_node_by_teid(pi->root, list[i]); if (!node) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto move_err_exit; } @@ -2381,7 +2408,7 @@ ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, status = ice_aq_move_sched_elems(hw, 1, buf, buf_len, &grps_movd, NULL); if (status && grps_movd != 1) { - status = ICE_ERR_CFG; + status = -EIO; goto move_err_exit; } @@ -2404,28 +2431,28 @@ move_err_exit: * This function moves a VSI to an aggregator node or its subtree. * Intermediate nodes may be created if required. */ -static enum ice_status +static int ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id, u8 tc) { struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent; u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; u32 first_node_teid, vsi_teid; - enum ice_status status; u16 num_nodes_added; u8 aggl, vsil, i; + int status; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) - return ICE_ERR_CFG; + return -EIO; agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); if (!agg_node) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); if (!vsi_node) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; /* Is this VSI already part of given aggregator? */ if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node)) @@ -2455,7 +2482,7 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id, &first_node_teid, &num_nodes_added); if (status || num_nodes[i] != num_nodes_added) - return ICE_ERR_CFG; + return -EIO; /* The newly added node can be a new parent for the next * layer nodes @@ -2467,7 +2494,7 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id, parent = parent->children[0]; if (!parent) - return ICE_ERR_CFG; + return -EIO; } move_nodes: @@ -2486,14 +2513,14 @@ move_nodes: * aggregator VSI info based on passed in boolean parameter rm_vsi_info. The * caller holds the scheduler lock. */ -static enum ice_status +static int ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info, u8 tc, bool rm_vsi_info) { struct ice_sched_agg_vsi_info *agg_vsi_info; struct ice_sched_agg_vsi_info *tmp; - enum ice_status status = 0; + int status = 0; list_for_each_entry_safe(agg_vsi_info, tmp, &agg_info->agg_vsi_list, list_entry) { @@ -2550,7 +2577,7 @@ ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node) * This function removes the aggregator node and intermediate nodes if any * from the given TC */ -static enum ice_status +static int ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) { struct ice_sched_node *tc_node, *agg_node; @@ -2558,15 +2585,15 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) - return ICE_ERR_CFG; + return -EIO; agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); if (!agg_node) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; /* Can't remove the aggregator node if it has children */ if (ice_sched_is_agg_inuse(pi, agg_node)) - return ICE_ERR_IN_USE; + return -EBUSY; /* need to remove the whole subtree if aggregator node is the * only child. @@ -2575,7 +2602,7 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) struct ice_sched_node *parent = agg_node->parent; if (!parent) - return ICE_ERR_CFG; + return -EIO; if (parent->num_children > 1) break; @@ -2598,11 +2625,11 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) * the aggregator configuration completely for requested TC. The caller needs * to hold the scheduler lock. */ -static enum ice_status +static int ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info, u8 tc, bool rm_vsi_info) { - enum ice_status status = 0; + int status = 0; /* If nothing to remove - return success */ if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) @@ -2631,7 +2658,7 @@ exit_rm_agg_cfg_tc: * Save aggregator TC bitmap. This function needs to be called with scheduler * lock held. */ -static enum ice_status +static int ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id, unsigned long *tc_bitmap) { @@ -2639,7 +2666,7 @@ ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id, agg_info = ice_get_agg_info(pi->hw, agg_id); if (!agg_info) - return ICE_ERR_PARAM; + return -EINVAL; bitmap_copy(agg_info->replay_tc_bitmap, tc_bitmap, ICE_MAX_TRAFFIC_CLASS); return 0; @@ -2654,20 +2681,20 @@ ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id, * This function creates an aggregator node and intermediate nodes if required * for the given TC */ -static enum ice_status +static int ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) { struct ice_sched_node *parent, *agg_node, *tc_node; u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; - enum ice_status status = 0; struct ice_hw *hw = pi->hw; u32 first_node_teid; u16 num_nodes_added; + int status = 0; u8 i, aggl; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) - return ICE_ERR_CFG; + return -EIO; agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); /* Does Agg node already exist ? */ @@ -2702,14 +2729,14 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) parent = tc_node; for (i = hw->sw_entry_point_layer; i <= aggl; i++) { if (!parent) - return ICE_ERR_CFG; + return -EIO; status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, num_nodes[i], &first_node_teid, &num_nodes_added); if (status || num_nodes[i] != num_nodes_added) - return ICE_ERR_CFG; + return -EIO; /* The newly added node can be a new parent for the next * layer nodes @@ -2744,13 +2771,13 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) * resources and remove aggregator ID. * This function needs to be called with scheduler lock held. */ -static enum ice_status +static int ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type, unsigned long *tc_bitmap) { struct ice_sched_agg_info *agg_info; - enum ice_status status = 0; struct ice_hw *hw = pi->hw; + int status = 0; u8 tc; agg_info = ice_get_agg_info(hw, agg_id); @@ -2759,7 +2786,7 @@ ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id, agg_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*agg_info), GFP_KERNEL); if (!agg_info) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; agg_info->agg_id = agg_id; agg_info->agg_type = agg_type; @@ -2806,12 +2833,12 @@ ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id, * * This function configures aggregator node(s). */ -enum ice_status +int ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type, u8 tc_bitmap) { unsigned long bitmap = tc_bitmap; - enum ice_status status; + int status; mutex_lock(&pi->sched_lock); status = ice_sched_cfg_agg(pi, agg_id, agg_type, @@ -2877,7 +2904,7 @@ ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle) * Save VSI to aggregator TC bitmap. This function needs to call with scheduler * lock held. */ -static enum ice_status +static int ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, unsigned long *tc_bitmap) { @@ -2886,11 +2913,11 @@ ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, agg_info = ice_get_agg_info(pi->hw, agg_id); if (!agg_info) - return ICE_ERR_PARAM; + return -EINVAL; /* check if entry already exist */ agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); if (!agg_vsi_info) - return ICE_ERR_PARAM; + return -EINVAL; bitmap_copy(agg_vsi_info->replay_tc_bitmap, tc_bitmap, ICE_MAX_TRAFFIC_CLASS); return 0; @@ -2907,21 +2934,22 @@ ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, * already associated to the aggregator node then no operation is performed on * the tree. This function needs to be called with scheduler lock held. */ -static enum ice_status +static int ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, unsigned long *tc_bitmap) { struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL; + struct ice_sched_agg_vsi_info *iter; struct ice_sched_agg_info *agg_info, *old_agg_info; - enum ice_status status = 0; struct ice_hw *hw = pi->hw; + int status = 0; u8 tc; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; agg_info = ice_get_agg_info(hw, agg_id); if (!agg_info) - return ICE_ERR_PARAM; + return -EINVAL; /* If the vsi is already part of another aggregator then update * its vsi info list */ @@ -2929,11 +2957,13 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, if (old_agg_info && old_agg_info != agg_info) { struct ice_sched_agg_vsi_info *vtmp; - list_for_each_entry_safe(old_agg_vsi_info, vtmp, + list_for_each_entry_safe(iter, vtmp, &old_agg_info->agg_vsi_list, list_entry) - if (old_agg_vsi_info->vsi_handle == vsi_handle) + if (iter->vsi_handle == vsi_handle) { + old_agg_vsi_info = iter; break; + } } /* check if entry already exist */ @@ -2943,7 +2973,7 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, agg_vsi_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*agg_vsi_info), GFP_KERNEL); if (!agg_vsi_info) - return ICE_ERR_PARAM; + return -EINVAL; /* add VSI ID into the aggregator list */ agg_vsi_info->vsi_handle = vsi_handle; @@ -3004,16 +3034,19 @@ static void ice_sched_rm_unused_rl_prof(struct ice_hw *hw) * returns success or error on config sched element failure. The caller * needs to hold scheduler lock. */ -static enum ice_status +static int ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, struct ice_aqc_txsched_elem_data *info) { struct ice_aqc_txsched_elem_data buf; - enum ice_status status; u16 elem_cfgd = 0; u16 num_elems = 1; + int status; buf = *info; + /* For TC nodes, CIR config is not supported */ + if (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_TC) + buf.data.valid_sections &= ~ICE_AQC_ELEM_VALID_CIR; /* Parent TEID is reserved field in this aq call */ buf.parent_teid = 0; /* Element type is reserved field in this aq call */ @@ -3027,7 +3060,7 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, &elem_cfgd, NULL); if (status || elem_cfgd != num_elems) { ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n"); - return ICE_ERR_CFG; + return -EIO; } /* Config success case */ @@ -3046,13 +3079,13 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, * * This function configures node element's BW allocation. */ -static enum ice_status +static int ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, enum ice_rl_type rl_type, u16 bw_alloc) { struct ice_aqc_txsched_elem_data buf; struct ice_aqc_txsched_elem *data; - enum ice_status status; + int status; buf = node->info; data = &buf.data; @@ -3063,7 +3096,7 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc); } else { - return ICE_ERR_PARAM; + return -EINVAL; } /* Configure element */ @@ -3080,12 +3113,12 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, * * Move or associate VSI to a new or default aggregator node. */ -enum ice_status +int ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, u8 tc_bitmap) { unsigned long bitmap = tc_bitmap; - enum ice_status status; + int status; mutex_lock(&pi->sched_lock); status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle, @@ -3105,16 +3138,16 @@ ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, * This function removes aggregator reference to VSI and delete aggregator ID * info. It removes the aggregator configuration completely. */ -enum ice_status ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id) +int ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id) { struct ice_sched_agg_info *agg_info; - enum ice_status status = 0; + int status = 0; u8 tc; mutex_lock(&pi->sched_lock); agg_info = ice_get_agg_info(pi->hw, agg_id); if (!agg_info) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto exit_ice_rm_agg_cfg; } @@ -3125,7 +3158,7 @@ enum ice_status ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id) } if (!bitmap_empty(agg_info->tc_bitmap, ICE_MAX_TRAFFIC_CLASS)) { - status = ICE_ERR_IN_USE; + status = -EBUSY; goto exit_ice_rm_agg_cfg; } @@ -3187,17 +3220,17 @@ ice_set_clear_eir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc) * * Save BW alloc information of VSI type node for post replay use. */ -static enum ice_status +static int ice_sched_save_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type, u16 bw_alloc) { struct ice_vsi_ctx *vsi_ctx; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); if (!vsi_ctx) - return ICE_ERR_PARAM; + return -EINVAL; switch (rl_type) { case ICE_MIN_BW: ice_set_clear_cir_bw_alloc(&vsi_ctx->sched.bw_t_info[tc], @@ -3208,7 +3241,7 @@ ice_sched_save_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, bw_alloc); break; default: - return ICE_ERR_PARAM; + return -EINVAL; } return 0; } @@ -3280,17 +3313,17 @@ static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw) * * Save BW information of VSI type node for post replay use. */ -static enum ice_status +static int ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type, u32 bw) { struct ice_vsi_ctx *vsi_ctx; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); if (!vsi_ctx) - return ICE_ERR_PARAM; + return -EINVAL; switch (rl_type) { case ICE_MIN_BW: ice_set_clear_cir_bw(&vsi_ctx->sched.bw_t_info[tc], bw); @@ -3302,7 +3335,7 @@ ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc, ice_set_clear_shared_bw(&vsi_ctx->sched.bw_t_info[tc], bw); break; default: - return ICE_ERR_PARAM; + return -EINVAL; } return 0; } @@ -3332,19 +3365,19 @@ static void ice_set_clear_prio(struct ice_bw_type_info *bw_t_info, u8 prio) * * Save priority information of VSI type node for post replay use. */ -static enum ice_status +static int ice_sched_save_vsi_prio(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 prio) { struct ice_vsi_ctx *vsi_ctx; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); if (!vsi_ctx) - return ICE_ERR_PARAM; + return -EINVAL; if (tc >= ICE_MAX_TRAFFIC_CLASS) - return ICE_ERR_PARAM; + return -EINVAL; ice_set_clear_prio(&vsi_ctx->sched.bw_t_info[tc], prio); return 0; } @@ -3359,7 +3392,7 @@ ice_sched_save_vsi_prio(struct ice_port_info *pi, u16 vsi_handle, u8 tc, * * Save BW alloc information of AGG type node for post replay use. */ -static enum ice_status +static int ice_sched_save_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 tc, enum ice_rl_type rl_type, u16 bw_alloc) { @@ -3367,9 +3400,9 @@ ice_sched_save_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 tc, agg_info = ice_get_agg_info(pi->hw, agg_id); if (!agg_info) - return ICE_ERR_PARAM; + return -EINVAL; if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) - return ICE_ERR_PARAM; + return -EINVAL; switch (rl_type) { case ICE_MIN_BW: ice_set_clear_cir_bw_alloc(&agg_info->bw_t_info[tc], bw_alloc); @@ -3378,7 +3411,7 @@ ice_sched_save_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 tc, ice_set_clear_eir_bw_alloc(&agg_info->bw_t_info[tc], bw_alloc); break; default: - return ICE_ERR_PARAM; + return -EINVAL; } return 0; } @@ -3393,7 +3426,7 @@ ice_sched_save_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 tc, * * Save BW information of AGG type node for post replay use. */ -static enum ice_status +static int ice_sched_save_agg_bw(struct ice_port_info *pi, u32 agg_id, u8 tc, enum ice_rl_type rl_type, u32 bw) { @@ -3401,9 +3434,9 @@ ice_sched_save_agg_bw(struct ice_port_info *pi, u32 agg_id, u8 tc, agg_info = ice_get_agg_info(pi->hw, agg_id); if (!agg_info) - return ICE_ERR_PARAM; + return -EINVAL; if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) - return ICE_ERR_PARAM; + return -EINVAL; switch (rl_type) { case ICE_MIN_BW: ice_set_clear_cir_bw(&agg_info->bw_t_info[tc], bw); @@ -3415,7 +3448,7 @@ ice_sched_save_agg_bw(struct ice_port_info *pi, u32 agg_id, u8 tc, ice_set_clear_shared_bw(&agg_info->bw_t_info[tc], bw); break; default: - return ICE_ERR_PARAM; + return -EINVAL; } return 0; } @@ -3431,11 +3464,11 @@ ice_sched_save_agg_bw(struct ice_port_info *pi, u32 agg_id, u8 tc, * This function configures BW limit of VSI scheduling node based on TC * information. */ -enum ice_status +int ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type, u32 bw) { - enum ice_status status; + int status; status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle, ICE_AGG_TYPE_VSI, @@ -3458,11 +3491,11 @@ ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, * This function configures default BW limit of VSI scheduling node based on TC * information. */ -enum ice_status +int ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type) { - enum ice_status status; + int status; status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle, ICE_AGG_TYPE_VSI, @@ -3488,11 +3521,11 @@ ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, * This function applies BW limit to aggregator scheduling node based on TC * information. */ -enum ice_status +int ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, enum ice_rl_type rl_type, u32 bw) { - enum ice_status status; + int status; status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG, tc, rl_type, bw); @@ -3514,11 +3547,11 @@ ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, * This function applies default BW limit to aggregator scheduling node based * on TC information. */ -enum ice_status +int ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, enum ice_rl_type rl_type) { - enum ice_status status; + int status; status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG, tc, rl_type, @@ -3543,7 +3576,7 @@ ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, * Configure shared rate limiter(SRL) of all VSI type nodes across all traffic * classes for VSI matching handle. */ -enum ice_status +int ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 min_bw, u32 max_bw, u32 shared_bw) { @@ -3559,7 +3592,7 @@ ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 min_bw, * This function removes the shared rate limiter(SRL) of all VSI type nodes * across all traffic classes for VSI matching handle. */ -enum ice_status +int ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle) { return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle, @@ -3579,7 +3612,7 @@ ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle) * This function configures the shared rate limiter(SRL) of all aggregator type * nodes across all traffic classes for aggregator matching agg_id. */ -enum ice_status +int ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw, u32 max_bw, u32 shared_bw) { @@ -3595,7 +3628,7 @@ ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw, * This function removes the shared rate limiter(SRL) of all aggregator type * nodes across all traffic classes for aggregator matching agg_id. */ -enum ice_status +int ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id) { return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, ICE_SCHED_DFLT_BW, @@ -3615,7 +3648,7 @@ ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id) * This function configures the shared rate limiter(SRL) of all aggregator type * nodes across all traffic classes for aggregator matching agg_id. */ -enum ice_status +int ice_cfg_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw) { @@ -3632,7 +3665,7 @@ ice_cfg_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, * This function configures the shared rate limiter(SRL) of all aggregator type * nodes across all traffic classes for aggregator matching agg_id. */ -enum ice_status +int ice_cfg_agg_bw_no_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc) { return ice_sched_set_agg_bw_shared_lmt_per_tc(pi, agg_id, tc, @@ -3651,11 +3684,11 @@ ice_cfg_agg_bw_no_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc) * This function configures the queue node priority (Sibling Priority) of the * passed in VSI's queue(s) for a given traffic class (TC). */ -enum ice_status +int ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids, u8 *q_prio) { - enum ice_status status = ICE_ERR_PARAM; + int status = -EINVAL; u16 i; mutex_lock(&pi->sched_lock); @@ -3666,7 +3699,7 @@ ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids, node = ice_sched_find_node_by_teid(pi->root, q_ids[i]); if (!node || node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) { - status = ICE_ERR_PARAM; + status = -EINVAL; break; } /* Configure Priority */ @@ -3691,17 +3724,17 @@ ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids, * This function configures the node priority (Sibling Priority) of the * passed in VSI's for a given traffic class (TC) of an Aggregator ID. */ -enum ice_status +int ice_cfg_agg_vsi_priority_per_tc(struct ice_port_info *pi, u32 agg_id, u16 num_vsis, u16 *vsi_handle_arr, u8 *node_prio, u8 tc) { struct ice_sched_agg_vsi_info *agg_vsi_info; struct ice_sched_node *tc_node, *agg_node; - enum ice_status status = ICE_ERR_PARAM; struct ice_sched_agg_info *agg_info; bool agg_id_present = false; struct ice_hw *hw = pi->hw; + int status = -EINVAL; u16 i; mutex_lock(&pi->sched_lock); @@ -3729,7 +3762,7 @@ ice_cfg_agg_vsi_priority_per_tc(struct ice_port_info *pi, u32 agg_id, bool vsi_handle_valid = false; u16 vsi_handle; - status = ICE_ERR_PARAM; + status = -EINVAL; vsi_handle = vsi_handle_arr[i]; if (!ice_is_vsi_valid(hw, vsi_handle)) goto exit_agg_priority_per_tc; @@ -3737,7 +3770,9 @@ ice_cfg_agg_vsi_priority_per_tc(struct ice_port_info *pi, u32 agg_id, list_for_each_entry(agg_vsi_info, &agg_info->agg_vsi_list, list_entry) if (agg_vsi_info->vsi_handle == vsi_handle) { +#ifdef __CHECKER__ /* cppcheck-suppress unreadVariable */ +#endif /* __CHECKER__ */ vsi_handle_valid = true; break; } @@ -3778,15 +3813,15 @@ exit_agg_priority_per_tc: * This function configures the BW allocation of the passed in VSI's * node(s) for enabled traffic class. */ -enum ice_status +int ice_cfg_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 ena_tcmap, enum ice_rl_type rl_type, u8 *bw_alloc) { - enum ice_status status = 0; + int status = 0; u8 tc; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&pi->sched_lock); @@ -3830,14 +3865,14 @@ ice_cfg_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 ena_tcmap, * This function configures the BW allocation of passed in aggregator for * enabled traffic class(s). */ -enum ice_status +int ice_cfg_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 ena_tcmap, enum ice_rl_type rl_type, u8 *bw_alloc) { struct ice_sched_agg_info *agg_info; bool agg_id_present = false; - enum ice_status status = 0; struct ice_hw *hw = pi->hw; + int status = 0; u8 tc; mutex_lock(&pi->sched_lock); @@ -3847,7 +3882,7 @@ ice_cfg_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 ena_tcmap, break; } if (!agg_id_present) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto exit_cfg_agg_bw_alloc; } @@ -3895,8 +3930,8 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw) u16 wakeup = 0; /* Get the wakeup integer value */ - bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE); - wakeup_int = div64_long(hw->psm_clk_freq, bytes_per_sec); + bytes_per_sec = div64_s64((s64)bw * 1000, BITS_PER_BYTE); + wakeup_int = div64_s64(hw->psm_clk_freq, bytes_per_sec); if (wakeup_int > 63) { wakeup = (u16)((1 << 15) | wakeup_int); } else { @@ -3904,18 +3939,18 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw) * Convert Integer value to a constant multiplier */ wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int; - wakeup_a = div64_long((s64)ICE_RL_PROF_MULTIPLIER * hw->psm_clk_freq, - bytes_per_sec); + wakeup_a = div64_s64((s64)ICE_RL_PROF_MULTIPLIER * hw->psm_clk_freq, + bytes_per_sec); /* Get Fraction value */ wakeup_f = wakeup_a - wakeup_b; /* Round up the Fractional value via Ceil(Fractional value) */ - if (wakeup_f > div64_long(ICE_RL_PROF_MULTIPLIER, 2)) + if (wakeup_f > div64_s64(ICE_RL_PROF_MULTIPLIER, 2)) wakeup_f += 1; - wakeup_f_int = (s32) div64_long(wakeup_f * ICE_RL_PROF_FRACTION, - ICE_RL_PROF_MULTIPLIER); + wakeup_f_int = (s32) div64_s64(wakeup_f * ICE_RL_PROF_FRACTION, + ICE_RL_PROF_MULTIPLIER); wakeup |= (u16)(wakeup_int << 9); wakeup |= (u16)(0x1ff & wakeup_f_int); } @@ -3931,12 +3966,12 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw) * * This function converts the BW to profile structure format. */ -static enum ice_status +static int ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw, struct ice_aqc_rl_profile_elem *profile) { - enum ice_status status = ICE_ERR_PARAM; s64 bytes_per_sec, ts_rate, mv_tmp; + int status = -EINVAL; bool found = false; s32 encode = 0; s64 mv = 0; @@ -3947,20 +3982,20 @@ ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw, return status; /* Bytes per second from Kbps */ - bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE); + bytes_per_sec = div64_s64((s64)bw * 1000, BITS_PER_BYTE); /* encode is 6 bits but really useful are 5 bits */ for (i = 0; i < 64; i++) { u64 pow_result = BIT_ULL(i); - ts_rate = div64_long((s64)hw->psm_clk_freq, - pow_result * ICE_RL_PROF_TS_MULTIPLIER); + ts_rate = div64_s64((s64)hw->psm_clk_freq, + pow_result * ICE_RL_PROF_TS_MULTIPLIER); if (ts_rate <= 0) continue; /* Multiplier value */ - mv_tmp = div64_long(bytes_per_sec * ICE_RL_PROF_MULTIPLIER, - ts_rate); + mv_tmp = div64_s64(bytes_per_sec * ICE_RL_PROF_MULTIPLIER, + ts_rate); /* Round to the nearest ICE_RL_PROF_MULTIPLIER */ mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER); @@ -3983,7 +4018,7 @@ ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw, profile->rl_encode = cpu_to_le16(encode); status = 0; } else { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; } return status; @@ -4009,10 +4044,10 @@ ice_sched_add_rl_profile(struct ice_hw *hw, enum ice_rl_type rl_type, struct ice_aqc_rl_profile_info *rl_prof_elem; u16 profiles_added = 0, num_profiles = 1; struct ice_aqc_rl_profile_elem *buf; - enum ice_status status; u8 profile_type; + int status; - if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) + if (!hw || layer_num >= hw->num_tx_sched_layers) return NULL; switch (rl_type) { case ICE_MIN_BW: @@ -4028,8 +4063,6 @@ ice_sched_add_rl_profile(struct ice_hw *hw, enum ice_rl_type rl_type, return NULL; } - if (!hw) - return NULL; list_for_each_entry(rl_prof_elem, &hw->rl_prof_list[layer_num], list_entry) if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) == @@ -4080,7 +4113,7 @@ exit_add_rl_prof: * * This function configures node element's BW limit. */ -static enum ice_status +static int ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node, enum ice_rl_type rl_type, u16 rl_prof_id) { @@ -4104,7 +4137,7 @@ ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node, break; default: /* Unknown rate limit type */ - return ICE_ERR_PARAM; + return -EINVAL; } /* Configure element */ @@ -4224,15 +4257,15 @@ ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer) * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold * scheduler lock. */ -static enum ice_status +static int ice_sched_rm_rl_profile(struct ice_hw *hw, u8 layer_num, u8 profile_type, u16 profile_id) { struct ice_aqc_rl_profile_info *rl_prof_elem; - enum ice_status status = 0; + int status = 0; - if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) - return ICE_ERR_PARAM; + if (!hw || layer_num >= hw->num_tx_sched_layers) + return -EINVAL; /* Check the existing list for RL profile */ list_for_each_entry(rl_prof_elem, &hw->rl_prof_list[layer_num], list_entry) @@ -4245,11 +4278,11 @@ ice_sched_rm_rl_profile(struct ice_hw *hw, u8 layer_num, u8 profile_type, /* Remove old profile ID from database */ status = ice_sched_del_rl_profile(hw, rl_prof_elem); - if (status && status != ICE_ERR_IN_USE) + if (status && status != -EBUSY) ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n"); break; } - if (status == ICE_ERR_IN_USE) + if (status == -EBUSY) status = 0; return status; } @@ -4265,15 +4298,15 @@ ice_sched_rm_rl_profile(struct ice_hw *hw, u8 layer_num, u8 profile_type, * type CIR, EIR, or SRL to default. This function needs to be called * with the scheduler lock held. */ -static enum ice_status +static int ice_sched_set_node_bw_dflt(struct ice_port_info *pi, struct ice_sched_node *node, enum ice_rl_type rl_type, u8 layer_num) { - enum ice_status status; struct ice_hw *hw; u8 profile_type; u16 rl_prof_id; + int status; u16 old_id; hw = pi->hw; @@ -4292,7 +4325,7 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi, rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID; break; default: - return ICE_ERR_PARAM; + return -EINVAL; } /* Save existing RL prof ID for later clean up */ old_id = ice_sched_get_node_rl_prof_id(node, rl_type); @@ -4321,14 +4354,14 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi, * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile * ID from local database. The caller needs to hold scheduler lock. */ -static enum ice_status +static int ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, enum ice_rl_type rl_type, u32 bw, u8 layer_num) { struct ice_aqc_rl_profile_info *rl_prof_info; - enum ice_status status = ICE_ERR_PARAM; struct ice_hw *hw = pi->hw; u16 old_id, rl_prof_id; + int status = -EINVAL; rl_prof_info = ice_sched_add_rl_profile(hw, rl_type, bw, layer_num); if (!rl_prof_info) @@ -4370,7 +4403,7 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, * NOTE: Caller provides the correct SRL node in case of shared profile * settings. */ -static enum ice_status +static int ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, enum ice_rl_type rl_type, u32 bw) { @@ -4378,7 +4411,7 @@ ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, u8 layer_num; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; /* Remove unused RL profile IDs from HW and SW DB */ ice_sched_rm_unused_rl_prof(hw); @@ -4386,14 +4419,13 @@ ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, node->tx_sched_layer); if (layer_num >= hw->num_tx_sched_layers) - return ICE_ERR_PARAM; + return -EINVAL; if (bw == ICE_SCHED_DFLT_BW) return ice_sched_set_node_bw_dflt(pi, node, rl_type, layer_num); return ice_sched_set_node_bw(pi, node, rl_type, bw, layer_num); } - /** * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default * @pi: port information structure @@ -4404,7 +4436,7 @@ ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, * type CIR, EIR, or SRL to default. This function needs to be called * with the scheduler lock held. */ -static enum ice_status +static int ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi, struct ice_sched_node *node, enum ice_rl_type rl_type) @@ -4422,7 +4454,7 @@ ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi, * behalf of the requested node (first argument). This function needs to be * called with scheduler lock held. */ -static enum ice_status +static int ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer) { /* SRL profiles are not available on all layers. Check if the @@ -4437,7 +4469,7 @@ ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer) (node->parent && node->parent->num_children == 1))) return 0; - return ICE_ERR_CFG; + return -EIO; } /** @@ -4448,7 +4480,7 @@ ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer) * * Save BW information of queue type node for post replay use. */ -static enum ice_status +static int ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw) { switch (rl_type) { @@ -4462,7 +4494,7 @@ ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw) ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw); break; default: - return ICE_ERR_PARAM; + return -EINVAL; } return 0; } @@ -4478,16 +4510,16 @@ ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw) * * This function sets BW limit of queue scheduling node. */ -static enum ice_status +static int ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type, u32 bw) { - enum ice_status status = ICE_ERR_PARAM; struct ice_sched_node *node; struct ice_q_ctx *q_ctx; + int status = -EINVAL; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&pi->sched_lock); q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle); if (!q_ctx) @@ -4509,7 +4541,7 @@ ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type, node->tx_sched_layer); if (sel_layer >= pi->hw->num_tx_sched_layers) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto exit_q_bw_lmt; } status = ice_sched_validate_srl_node(node, sel_layer); @@ -4541,7 +4573,7 @@ exit_q_bw_lmt: * * This function configures BW limit of queue scheduling node. */ -enum ice_status +int ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type, u32 bw) { @@ -4559,7 +4591,7 @@ ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, * * This function configures BW default limit of queue scheduling node. */ -enum ice_status +int ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type) { @@ -4577,12 +4609,12 @@ ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, * This function saves the modified values of bandwidth settings for later * replay purpose (restore) after reset. */ -static enum ice_status +static int ice_sched_save_tc_node_bw(struct ice_port_info *pi, u8 tc, enum ice_rl_type rl_type, u32 bw) { if (tc >= ICE_MAX_TRAFFIC_CLASS) - return ICE_ERR_PARAM; + return -EINVAL; switch (rl_type) { case ICE_MIN_BW: ice_set_clear_cir_bw(&pi->tc_node_bw_t_info[tc], bw); @@ -4594,7 +4626,7 @@ ice_sched_save_tc_node_bw(struct ice_port_info *pi, u8 tc, ice_set_clear_shared_bw(&pi->tc_node_bw_t_info[tc], bw); break; default: - return ICE_ERR_PARAM; + return -EINVAL; } return 0; } @@ -4608,12 +4640,12 @@ ice_sched_save_tc_node_bw(struct ice_port_info *pi, u8 tc, * * This function configures bandwidth limit of TC node. */ -static enum ice_status +static int ice_sched_set_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc, enum ice_rl_type rl_type, u32 bw) { - enum ice_status status = ICE_ERR_PARAM; struct ice_sched_node *tc_node; + int status = -EINVAL; if (tc >= ICE_MAX_TRAFFIC_CLASS) return status; @@ -4643,7 +4675,7 @@ exit_set_tc_node_bw: * This function configures BW limit of TC node. * Note: The minimum guaranteed reservation is done via DCBX. */ -enum ice_status +int ice_cfg_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc, enum ice_rl_type rl_type, u32 bw) { @@ -4658,7 +4690,7 @@ ice_cfg_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc, * * This function configures BW default limit of TC node. */ -enum ice_status +int ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info *pi, u8 tc, enum ice_rl_type rl_type) { @@ -4674,12 +4706,12 @@ ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info *pi, u8 tc, * * Save BW alloc information of VSI type node for post replay use. */ -static enum ice_status +static int ice_sched_save_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, enum ice_rl_type rl_type, u16 bw_alloc) { if (tc >= ICE_MAX_TRAFFIC_CLASS) - return ICE_ERR_PARAM; + return -EINVAL; switch (rl_type) { case ICE_MIN_BW: ice_set_clear_cir_bw_alloc(&pi->tc_node_bw_t_info[tc], @@ -4690,7 +4722,7 @@ ice_sched_save_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, bw_alloc); break; default: - return ICE_ERR_PARAM; + return -EINVAL; } return 0; } @@ -4706,12 +4738,12 @@ ice_sched_save_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, * changed settings for replay purpose, and return success if it succeeds * in modifying bandwidth alloc setting. */ -static enum ice_status +static int ice_sched_set_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, enum ice_rl_type rl_type, u8 bw_alloc) { - enum ice_status status = ICE_ERR_PARAM; struct ice_sched_node *tc_node; + int status = -EINVAL; if (tc >= ICE_MAX_TRAFFIC_CLASS) return status; @@ -4740,7 +4772,7 @@ exit_set_tc_node_bw_alloc: * This function configures BW limit of TC node. * Note: The minimum guaranteed reservation is done via DCBX. */ -enum ice_status +int ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, enum ice_rl_type rl_type, u8 bw_alloc) { @@ -4756,18 +4788,18 @@ ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, * and sets node's BW limit to default. This function needs to be * called with the scheduler lock held. */ -enum ice_status +int ice_sched_set_agg_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle) { struct ice_vsi_ctx *vsi_ctx; - enum ice_status status = 0; + int status = 0; u8 tc; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); if (!vsi_ctx) - return ICE_ERR_PARAM; + return -EINVAL; ice_for_each_traffic_class(tc) { struct ice_sched_node *node; @@ -4812,7 +4844,6 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id, enum ice_agg_type agg_type, u8 tc) { struct ice_sched_node *node = NULL; - struct ice_sched_node *child_node; switch (agg_type) { case ICE_AGG_TYPE_VSI: { @@ -4840,16 +4871,19 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id, case ICE_AGG_TYPE_Q: /* The current implementation allows single queue to modify */ - node = ice_sched_get_node(pi, id); + node = ice_sched_find_node_by_teid(pi->root, id); break; - case ICE_AGG_TYPE_QG: + case ICE_AGG_TYPE_QG: { + struct ice_sched_node *child_node; + /* The current implementation allows single qg to modify */ - child_node = ice_sched_get_node(pi, id); + child_node = ice_sched_find_node_by_teid(pi->root, id); if (!child_node) break; node = child_node->parent; break; + } default: break; @@ -4870,13 +4904,13 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id, * This function sets BW limit of VSI or Aggregator scheduling node * based on TC information from passed in argument BW. */ -enum ice_status +int ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id, enum ice_agg_type agg_type, u8 tc, enum ice_rl_type rl_type, u32 bw) { - enum ice_status status = ICE_ERR_PARAM; struct ice_sched_node *node; + int status = -EINVAL; if (!pi) return status; @@ -4909,20 +4943,20 @@ exit_set_node_bw_lmt_per_tc: * different than the VSI node layer on all TC(s).This function needs to be * called with scheduler lock held. */ -static enum ice_status +static int ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle) { u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM; u8 tc; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; /* Return success if no nodes are present across TC */ ice_for_each_traffic_class(tc) { struct ice_sched_node *tc_node, *vsi_node; enum ice_rl_type rl_type = ICE_SHARED_BW; - enum ice_status status; + int status; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) @@ -4940,7 +4974,7 @@ ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle) layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, node_layer); if (layer_num >= pi->hw->num_tx_sched_layers) - return ICE_ERR_PARAM; + return -EINVAL; sel_layer = layer_num; } @@ -4964,12 +4998,12 @@ ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle) * class, and saves those value for later use for replaying purposes. The * caller holds the scheduler lock. */ -static enum ice_status +static int ice_sched_set_save_vsi_srl_node_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc, struct ice_sched_node *srl_node, enum ice_rl_type rl_type, u32 bw) { - enum ice_status status; + int status; if (bw == ICE_SCHED_DFLT_BW) { status = ice_sched_set_node_bw_dflt_lmt(pi, srl_node, rl_type); @@ -4996,31 +5030,31 @@ ice_sched_set_save_vsi_srl_node_bw(struct ice_port_info *pi, u16 vsi_handle, * is passed, it removes the corresponding bw from the node. The caller * holds scheduler lock. */ -static enum ice_status +static int ice_sched_set_vsi_node_srl_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw) { struct ice_sched_node *tc_node, *vsi_node, *cfg_node; - enum ice_status status; u8 layer_num; + int status; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) - return ICE_ERR_CFG; + return -EIO; vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); if (!vsi_node) - return ICE_ERR_CFG; + return -EIO; layer_num = ice_sched_get_rl_prof_layer(pi, ICE_SHARED_BW, vsi_node->tx_sched_layer); if (layer_num >= pi->hw->num_tx_sched_layers) - return ICE_ERR_PARAM; + return -EINVAL; /* SRL node may be different */ cfg_node = ice_sched_get_srl_node(vsi_node, layer_num); if (!cfg_node) - return ICE_ERR_CFG; + return -EIO; status = ice_sched_set_save_vsi_srl_node_bw(pi, vsi_handle, tc, cfg_node, ICE_MIN_BW, @@ -5050,18 +5084,18 @@ ice_sched_set_vsi_node_srl_per_tc(struct ice_port_info *pi, u16 vsi_handle, * classes for VSI matching handle. When BW value of ICE_SCHED_DFLT_BW is * passed, it removes those value(s) from the node. */ -enum ice_status +int ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 min_bw, u32 max_bw, u32 shared_bw) { - enum ice_status status = 0; + int status = 0; u8 tc; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&pi->sched_lock); status = ice_sched_validate_vsi_srl_node(pi, vsi_handle); @@ -5100,13 +5134,13 @@ exit_set_vsi_bw_shared_lmt: * different than the AGG node layer on all TC(s).This function needs to be * called with scheduler lock held. */ -static enum ice_status +static int ice_sched_validate_agg_srl_node(struct ice_port_info *pi, u32 agg_id) { u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM; struct ice_sched_agg_info *agg_info; bool agg_id_present = false; - enum ice_status status = 0; + int status = 0; u8 tc; list_for_each_entry(agg_info, &pi->hw->agg_list, list_entry) @@ -5115,7 +5149,7 @@ ice_sched_validate_agg_srl_node(struct ice_port_info *pi, u32 agg_id) break; } if (!agg_id_present) - return ICE_ERR_PARAM; + return -EINVAL; /* Return success if no nodes are present across TC */ ice_for_each_traffic_class(tc) { struct ice_sched_node *tc_node, *agg_node; @@ -5136,7 +5170,7 @@ ice_sched_validate_agg_srl_node(struct ice_port_info *pi, u32 agg_id) layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, node_layer); if (layer_num >= pi->hw->num_tx_sched_layers) - return ICE_ERR_PARAM; + return -EINVAL; sel_layer = layer_num; } @@ -5154,13 +5188,13 @@ ice_sched_validate_agg_srl_node(struct ice_port_info *pi, u32 agg_id) * * This function validates aggregator id. Caller holds the scheduler lock. */ -static enum ice_status +static int ice_sched_validate_agg_id(struct ice_port_info *pi, u32 agg_id) { struct ice_sched_agg_info *agg_info; struct ice_sched_agg_info *tmp; bool agg_id_present = false; - enum ice_status status; + int status; status = ice_sched_validate_agg_srl_node(pi, agg_id); if (status) @@ -5173,7 +5207,7 @@ ice_sched_validate_agg_id(struct ice_port_info *pi, u32 agg_id) } if (!agg_id_present) - return ICE_ERR_PARAM; + return -EINVAL; return 0; } @@ -5191,12 +5225,12 @@ ice_sched_validate_agg_id(struct ice_port_info *pi, u32 agg_id) * requested traffic class, and saves those value for later use for * replaying purposes. The caller holds the scheduler lock. */ -static enum ice_status +static int ice_sched_set_save_agg_srl_node_bw(struct ice_port_info *pi, u32 agg_id, u8 tc, struct ice_sched_node *srl_node, enum ice_rl_type rl_type, u32 bw) { - enum ice_status status; + int status; if (bw == ICE_SCHED_DFLT_BW) { status = ice_sched_set_node_bw_dflt_lmt(pi, srl_node, rl_type); @@ -5223,32 +5257,32 @@ ice_sched_set_save_agg_srl_node_bw(struct ice_port_info *pi, u32 agg_id, u8 tc, * value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the node. Caller * holds the scheduler lock. */ -static enum ice_status +static int ice_sched_set_agg_node_srl_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw) { struct ice_sched_node *tc_node, *agg_node, *cfg_node; enum ice_rl_type rl_type = ICE_SHARED_BW; - enum ice_status status = ICE_ERR_CFG; + int status = -EIO; u8 layer_num; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) - return ICE_ERR_CFG; + return -EIO; agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); if (!agg_node) - return ICE_ERR_CFG; + return -EIO; layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, agg_node->tx_sched_layer); if (layer_num >= pi->hw->num_tx_sched_layers) - return ICE_ERR_PARAM; + return -EINVAL; /* SRL node may be different */ cfg_node = ice_sched_get_srl_node(agg_node, layer_num); if (!cfg_node) - return ICE_ERR_CFG; + return -EIO; status = ice_sched_set_save_agg_srl_node_bw(pi, agg_id, tc, cfg_node, ICE_MIN_BW, min_bw); @@ -5278,15 +5312,15 @@ ice_sched_set_agg_node_srl_per_tc(struct ice_port_info *pi, u32 agg_id, * BW value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the * node(s). */ -enum ice_status +int ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw, u32 max_bw, u32 shared_bw) { - enum ice_status status; + int status; u8 tc; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&pi->sched_lock); status = ice_sched_validate_agg_id(pi, agg_id); @@ -5330,15 +5364,15 @@ exit_agg_bw_shared_lmt: * node for a given traffic class for aggregator matching agg_id. When BW * value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the node. */ -enum ice_status +int ice_sched_set_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw) { - enum ice_status status; + int status; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&pi->sched_lock); status = ice_sched_validate_agg_id(pi, agg_id); if (status) @@ -5361,17 +5395,17 @@ exit_agg_bw_shared_lmt_per_tc: * This function configures node element's sibling priority only. This * function needs to be called with scheduler lock held. */ -enum ice_status +int ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi, struct ice_sched_node *node, u8 priority) { struct ice_aqc_txsched_elem_data buf; struct ice_aqc_txsched_elem *data; struct ice_hw *hw = pi->hw; - enum ice_status status; + int status; if (!hw) - return ICE_ERR_PARAM; + return -EINVAL; buf = node->info; data = &buf.data; data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC; @@ -5394,13 +5428,13 @@ ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi, * burst size value is used for future rate limit calls. It doesn't change the * existing or previously created RL profiles. */ -enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) +int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) { u16 burst_size_to_prog; if (bytes < ICE_MIN_BURST_SIZE_ALLOWED || bytes > ICE_MAX_BURST_SIZE_ALLOWED) - return ICE_ERR_PARAM; + return -EINVAL; if (ice_round_to_num(bytes, 64) <= ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) { /* 64 byte granularity case */ @@ -5435,13 +5469,13 @@ enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) * This function configures node element's priority value. It * needs to be called with scheduler lock held. */ -static enum ice_status +static int ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node, u8 priority) { struct ice_aqc_txsched_elem_data buf; struct ice_aqc_txsched_elem *data; - enum ice_status status; + int status; buf = node->info; data = &buf.data; @@ -5462,12 +5496,12 @@ ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node, * This function restores node's BW from bw_t_info. The caller needs * to hold the scheduler lock. */ -static enum ice_status +static int ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node, struct ice_bw_type_info *bw_t_info) { struct ice_port_info *pi = hw->port_info; - enum ice_status status = ICE_ERR_PARAM; + int status = -EINVAL; u16 bw_alloc; if (!node) @@ -5520,27 +5554,27 @@ ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node, * This function re-creates aggregator type nodes. The caller needs to hold * the scheduler lock. */ -static enum ice_status +static int ice_sched_replay_agg_bw(struct ice_hw *hw, struct ice_sched_agg_info *agg_info) { struct ice_sched_node *tc_node, *agg_node; - enum ice_status status = 0; + int status = 0; u8 tc; if (!agg_info) - return ICE_ERR_PARAM; + return -EINVAL; ice_for_each_traffic_class(tc) { if (bitmap_empty(agg_info->bw_t_info[tc].bw_t_bitmap, ICE_BW_TYPE_CNT)) continue; tc_node = ice_sched_get_tc_node(hw->port_info, tc); if (!tc_node) { - status = ICE_ERR_PARAM; + status = -EINVAL; break; } agg_node = ice_sched_get_agg_node(hw->port_info, tc_node, agg_info->agg_id); if (!agg_node) { - status = ICE_ERR_PARAM; + status = -EINVAL; break; } status = ice_sched_replay_node_bw(hw, agg_node, @@ -5594,7 +5628,7 @@ void ice_sched_replay_agg(struct ice_hw *hw) if (!bitmap_equal(agg_info->tc_bitmap, agg_info->replay_tc_bitmap, ICE_MAX_TRAFFIC_CLASS)) { DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); - enum ice_status status; + int status; bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); ice_sched_get_ena_tc_bitmap(pi, @@ -5651,12 +5685,12 @@ void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw) * * Replay root node BW settings. */ -enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi) +int ice_sched_replay_root_node_bw(struct ice_port_info *pi) { - enum ice_status status = 0; + int status = 0; if (!pi->hw) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&pi->sched_lock); status = ice_sched_replay_node_bw(pi->hw, pi->root, @@ -5671,13 +5705,13 @@ enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi) * * This function replay TC nodes. */ -enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi) +int ice_sched_replay_tc_node_bw(struct ice_port_info *pi) { - enum ice_status status = 0; + int status = 0; u8 tc; if (!pi->hw) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&pi->sched_lock); ice_for_each_traffic_class(tc) { struct ice_sched_node *tc_node; @@ -5703,7 +5737,7 @@ enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi) * This function replays VSI type nodes bandwidth. This function needs to be * called with scheduler lock held. */ -static enum ice_status +static int ice_sched_replay_vsi_bw(struct ice_hw *hw, u16 vsi_handle, unsigned long *tc_bitmap) { @@ -5711,12 +5745,12 @@ ice_sched_replay_vsi_bw(struct ice_hw *hw, u16 vsi_handle, struct ice_port_info *pi = hw->port_info; struct ice_bw_type_info *bw_t_info; struct ice_vsi_ctx *vsi_ctx; - enum ice_status status = 0; + int status = 0; u8 tc; vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); if (!vsi_ctx) - return ICE_ERR_PARAM; + return -EINVAL; ice_for_each_traffic_class(tc) { if (!ice_is_tc_ena(*tc_bitmap, tc)) continue; @@ -5743,18 +5777,18 @@ ice_sched_replay_vsi_bw(struct ice_hw *hw, u16 vsi_handle, * their node bandwidth information. This function needs to be called with * scheduler lock held. */ -static enum ice_status +static int ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) { DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); struct ice_sched_agg_vsi_info *agg_vsi_info; struct ice_port_info *pi = hw->port_info; struct ice_sched_agg_info *agg_info; - enum ice_status status; + int status; bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; agg_info = ice_get_vsi_agg_info(hw, vsi_handle); if (!agg_info) return 0; /* Not present in list - default Agg case */ @@ -5794,10 +5828,10 @@ ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) * This function replays association of VSI to aggregator type nodes, and * node bandwidth information. */ -enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) +int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) { struct ice_port_info *pi = hw->port_info; - enum ice_status status; + int status; mutex_lock(&pi->sched_lock); status = ice_sched_replay_vsi_agg(hw, vsi_handle); @@ -5813,7 +5847,7 @@ enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) * This function replays queue type node bandwidth. This function needs to be * called with scheduler lock held. */ -enum ice_status +int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx) { struct ice_sched_node *q_node; @@ -5821,6 +5855,6 @@ ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx) /* Following also checks the presence of node in tree */ q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); if (!q_node) - return ICE_ERR_PARAM; + return -EINVAL; return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info); } diff --git a/drivers/thirdparty/ice/ice_sched.h b/drivers/thirdparty/ice/ice_sched.h index 48642a54f8a9..971118b24830 100644 --- a/drivers/thirdparty/ice/ice_sched.h +++ b/drivers/thirdparty/ice/ice_sched.h @@ -6,6 +6,9 @@ #include "ice_common.h" +#define ICE_SCHED_5_LAYERS 5 +#define ICE_SCHED_9_LAYERS 9 + #define ICE_QGRP_LAYER_OFFSET 2 #define ICE_VSI_LAYER_OFFSET 4 #define ICE_AGG_LAYER_OFFSET 6 @@ -34,7 +37,6 @@ #define ICE_PSM_CLK_446MHZ_IN_HZ 446428571 #define ICE_PSM_CLK_390MHZ_IN_HZ 390625000 - struct rl_profile_params { u32 bw; /* in Kbps */ u16 rl_multiplier; @@ -73,24 +75,32 @@ struct ice_sched_agg_info { }; /* FW AQ command calls */ -enum ice_status +int ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport, struct ice_aqc_get_topo_elem *buf, u16 buf_size, u8 *num_branches, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, u16 buf_size, struct ice_sq_cd *cd); -enum ice_status +int +ice_aq_cfg_node_attr(struct ice_hw *hw, u16 num_nodes, + struct ice_aqc_node_attr_elem *buf, u16 buf_size, + struct ice_sq_cd *cd); +int ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_nodes, struct ice_aqc_cfg_l2_node_cgd_elem *buf, u16 buf_size, struct ice_sq_cd *cd); -enum ice_status +int +ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req, + struct ice_aqc_move_elem *buf, u16 buf_size, + u16 *grps_movd, struct ice_sq_cd *cd); +int ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, struct ice_aqc_txsched_elem_data *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd); -enum ice_status ice_sched_init_port(struct ice_port_info *pi); -enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw); +int ice_sched_init_port(struct ice_port_info *pi); +int ice_sched_query_res_alloc(struct ice_hw *hw); void ice_sched_get_psm_clk_freq(struct ice_hw *hw); /* Functions to cleanup scheduler SW DB */ @@ -103,7 +113,7 @@ struct ice_sched_node *ice_sched_get_node(struct ice_port_info *pi, u32 teid); struct ice_sched_node * ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid); /* Add a scheduling node into SW DB for given info */ -enum ice_status +int ice_sched_add_node(struct ice_port_info *pi, u8 layer, struct ice_aqc_txsched_elem_data *info); void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node); @@ -111,114 +121,113 @@ struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc); struct ice_sched_node * ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 owner); -enum ice_status +int ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, u8 owner, bool enable); -enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle); -enum ice_status -ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle); +int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle); +int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle); struct ice_sched_node * ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, u16 vsi_handle); bool ice_sched_is_tree_balanced(struct ice_hw *hw, struct ice_sched_node *node); -enum ice_status +int ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf, u16 buf_size, struct ice_sq_cd *cd); /* Tx scheduler rate limiter functions */ -enum ice_status +int ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type, u8 tc_bitmap); -enum ice_status +int ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, u8 tc_bitmap); -enum ice_status ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id); -enum ice_status +int ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id); +int ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type, u32 bw); -enum ice_status +int ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type); -enum ice_status +int ice_cfg_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc, enum ice_rl_type rl_type, u32 bw); -enum ice_status +int ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info *pi, u8 tc, enum ice_rl_type rl_type); -enum ice_status +int ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type, u32 bw); -enum ice_status +int ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type); -enum ice_status +int ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, enum ice_rl_type rl_type, u32 bw); -enum ice_status +int ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, enum ice_rl_type rl_type); -enum ice_status +int ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 min_bw, u32 max_bw, u32 shared_bw); -enum ice_status +int ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle); -enum ice_status +int ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw, u32 max_bw, u32 shared_bw); -enum ice_status +int ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id); -enum ice_status +int ice_cfg_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw); -enum ice_status +int ice_cfg_agg_bw_no_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc); -enum ice_status +int ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids, u8 *q_prio); -enum ice_status +int ice_cfg_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 ena_tcmap, enum ice_rl_type rl_type, u8 *bw_alloc); -enum ice_status +int ice_cfg_agg_vsi_priority_per_tc(struct ice_port_info *pi, u32 agg_id, u16 num_vsis, u16 *vsi_handle_arr, u8 *node_prio, u8 tc); -enum ice_status +int ice_cfg_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 ena_tcmap, enum ice_rl_type rl_type, u8 *bw_alloc); bool ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base, struct ice_sched_node *node); -enum ice_status +int ice_sched_set_agg_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle); -enum ice_status +int ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id, enum ice_agg_type agg_type, u8 tc, enum ice_rl_type rl_type, u32 bw); -enum ice_status +int ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 min_bw, u32 max_bw, u32 shared_bw); -enum ice_status +int ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw, u32 max_bw, u32 shared_bw); -enum ice_status +int ice_sched_set_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw); -enum ice_status +int ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi, struct ice_sched_node *node, u8 priority); -enum ice_status +int ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, enum ice_rl_type rl_type, u8 bw_alloc); -enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes); +int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes); void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw); void ice_sched_replay_agg(struct ice_hw *hw); -enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi); -enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle); -enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi); -enum ice_status +int ice_sched_replay_tc_node_bw(struct ice_port_info *pi); +int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle); +int ice_sched_replay_root_node_bw(struct ice_port_info *pi); +int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx); #endif /* _ICE_SCHED_H_ */ diff --git a/drivers/thirdparty/ice/ice_sched_cfg.h b/drivers/thirdparty/ice/ice_sched_cfg.h new file mode 100644 index 000000000000..fdcf1b2d9286 --- /dev/null +++ b/drivers/thirdparty/ice/ice_sched_cfg.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_SCHED_CFG_H_ +#define _ICE_SCHED_CFG_H_ + +int ice_sched_cfg_set_bw_lmt(struct ice_pf *pf, + ice_cfg_set_bw_lmt_data *cfg_data); +int ice_sched_cfg_rm_bw_lmt(struct ice_pf *pf, + ice_cfg_rm_bw_lmt_data *cfg_data); +int ice_sched_cfg_bw_alloc(struct ice_pf *pf, + ice_cfg_bw_alloc_data *cfg_data); + +int ice_sched_cfg_vf_set_bw_lmt(struct ice_pf *pf, + ice_cfg_vf_set_bw_lmt_data *cfg_data); +int ice_sched_cfg_vf_rm_bw_lmt(struct ice_pf *pf, + ice_cfg_vf_rm_bw_lmt_data *cfg_data); +int ice_sched_cfg_vf_bw_alloc(struct ice_pf *pf, + ice_cfg_vf_bw_alloc_data *cfg_data); + +int ice_sched_cfg_q_set_bw_lmt(struct ice_pf *pf, + ice_cfg_q_set_bw_lmt_data *cfg_data); +int ice_sched_cfg_q_rm_bw_lmt(struct ice_pf *pf, + ice_cfg_q_rm_bw_lmt_data *cfg_data); + +#endif /* _ICE_SCHED_CFG_H_ */ diff --git a/drivers/thirdparty/ice/ice_siov.c b/drivers/thirdparty/ice/ice_siov.c new file mode 100644 index 000000000000..6db7e2d3deb9 --- /dev/null +++ b/drivers/thirdparty/ice/ice_siov.c @@ -0,0 +1,1236 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#include "ice.h" +#include "ice_lib.h" +#include "ice_virtchnl_allowlist.h" +#include "ice_fltr.h" +#include "siov_regs.h" +#include "ice_irq.h" +#include "ice_vf_lib_private.h" + +struct ice_mbx { + u32 mbx_asqh; + u32 mbx_asqt; + u32 mbx_asqbal; + u32 mbx_asqbah; + u32 mbx_arqh; + u32 mbx_arqt; + u32 mbx_arqbal; + u32 mbx_arqbah; +}; + +struct ice_adi_priv { + struct ice_adi adi; + struct ice_vf vf; + u32 pasid; + void *token; + struct work_struct update_hash_entry; + enum virtchnl_vfr_states reset_state; + struct ice_mbx ice_adi_mbx; +}; + +static inline struct ice_adi_priv *adi_priv(struct ice_adi *adi) +{ + return (struct ice_adi_priv *) + container_of(adi, struct ice_adi_priv, adi); +} + +static inline struct ice_adi_priv *vf_to_adi_priv(struct ice_vf *vf) +{ + return (struct ice_adi_priv *) + container_of(vf, struct ice_adi_priv, vf); +} + +struct ice_adi_sparse_mmap_info { + u64 start; + u64 end; + u64 cnt; + u64 phy_addr; +}; + +enum ice_adi_sparse_mmap_type { + ICE_ADI_SPARSE_MBX = 0, + ICE_ADI_SPARSE_RXQ, + ICE_ADI_SPARSE_TXQ, + ICE_ADI_SPARSE_DYN_CTL01, + ICE_ADI_SPARSE_DYN_CTL, + ICE_ADI_SPARSE_MAX, +}; + +/** + * ice_adi_close - close ADI + * @adi: ADI pointer + * + * Return 0 for success, negative for failure + */ +static int ice_adi_close(struct ice_adi *adi) +{ + struct ice_adi_priv *priv = adi_priv(adi); + struct ice_vf *vf = &priv->vf; + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + dev_err(ice_pf_to_dev(pf), "Invalid VSI pointer"); + return -EFAULT; + } + + ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); + ice_vsi_stop_all_rx_rings(vsi); + + ice_set_vf_state_qs_dis(vf); + + return 0; +} + +/** + * ice_adi_reset - reset ADI + * @adi: ADI pointer + * + * Return 0 for success, negative for failure + */ +static int ice_adi_reset(struct ice_adi *adi) +{ + struct ice_adi_priv *priv; + struct ice_vf *vf; + + priv = adi_priv(adi); + vf = &priv->vf; + + return ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK); +} + +/** + * ice_vsi_configure_pasid - config pasid for VSI + * @vf: VF pointer + * @pasid: pasid value + * @ena: enable + * + * Return 0 for success, negative for failure + */ +static int ice_vsi_configure_pasid(struct ice_vf *vf, u32 pasid, bool ena) +{ + struct ice_adi_priv *priv = vf_to_adi_priv(vf); + struct ice_vsi_ctx *ctxt; + struct ice_vsi *vsi; + struct device *dev; + struct ice_hw *hw; + int status; + + hw = &vf->pf->hw; + dev = ice_pf_to_dev(vf->pf); + + vsi = ice_get_vf_vsi(vf); + if (!vsi) + return -EINVAL; + + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); + if (!ctxt) + return -ENOMEM; + + ctxt->info.valid_sections = + cpu_to_le16(ICE_AQ_VSI_PROP_PASID_VALID); + pasid &= ICE_AQ_VSI_PASID_ID_M; + if (ena) + pasid |= ICE_AQ_VSI_PASID_ID_VALID; + else + pasid &= ~ICE_AQ_VSI_PASID_ID_VALID; + ctxt->info.pasid_id = cpu_to_le32(pasid); + status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (status) { + dev_err(dev, "Failed to update pasid id in VSI context, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); + } else { + vsi->info.pasid_id = pasid; + priv->pasid = pasid; + } + + kfree(ctxt); + return status; +} + +/** + * ice_adi_cfg_pasid - config pasid for ADI + * @adi: ADI pointer + * @pasid: pasid value + * @ena: enable + * + * Return 0 for success, negative for failure + */ +static int ice_adi_cfg_pasid(struct ice_adi *adi, u32 pasid, bool ena) +{ + struct ice_adi_priv *priv; + struct ice_vf *vf; + int ret; + + priv = adi_priv(adi); + vf = &priv->vf; + ret = ice_vsi_configure_pasid(vf, pasid, ena); + return ret; +} + +/** + * ice_dis_siov_vf_mapping - disable SIOV VF MSIX mapping + * @vf: pointer to the VF structure + * + * Return 0 for success, negative for failure + */ +static int ice_dis_siov_vf_mapping(struct ice_vf *vf) +{ + struct ice_hw *hw = &vf->pf->hw; + struct ice_vsi *vsi; + + vsi = ice_get_vf_vsi(vf); + if (!vsi) + return -EINVAL; + + wr32(hw, VPINT_MBX_CTL(vsi->vsi_num), 0); + + return 0; +} + +/** + * ice_free_adi - Free a ADI + * @priv: pointer to ADI private structure + */ +static void ice_free_adi(struct ice_adi_priv *priv) +{ + struct ice_vf *vf = &priv->vf; + struct ice_vfs *vfs; + + WARN_ON(!hash_hashed(&vf->entry)); + + vfs = &vf->pf->vfs; + + /* Remove the VF from the hash table, and then release its main + * reference with ice_put_vf(). Once the last reference is dropped it + * will be freed via ice_siov_free_vf. + */ + mutex_lock(&vfs->table_lock); + hash_del_rcu(&vf->entry); + mutex_unlock(&vfs->table_lock); + + cancel_work_sync(&priv->update_hash_entry); + ice_put_vf(vf); +} + +/** + * ice_adi_vsi_setup - Set up a VSI for the ADI + * @vf: pointer to VF structure + * + * Returns pointer to the successfully allocated VSI struct on success, + * otherwise returns NULL on failure. + */ +static struct ice_vsi *ice_adi_vsi_setup(struct ice_vf *vf) +{ + struct ice_port_info *pi = vf->pf->hw.port_info; + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + struct device *dev; + int err; + + dev = ice_pf_to_dev(pf); + vsi = ice_vsi_setup(pf, pi, ICE_VSI_ADI, vf, NULL, 0); + if (!vsi) { + dev_err(dev, "ADI VSI setup failed\n"); + ice_vf_invalidate_vsi(vf); + return NULL; + } + vf->lan_vsi_idx = vsi->idx; + vf->lan_vsi_num = vsi->vsi_num; + vf->vf_id = vsi->vsi_num; + + err = ice_vf_init_host_cfg(vf, vsi); + if (err) { + dev_err(dev, "Failed to initialize host configuration\n"); + goto release_vsi; + } + + return vsi; + +release_vsi: + ice_vsi_release(vsi); + ice_vf_invalidate_vsi(vf); + return NULL; +} + +/** + * ice_siov_free_vf - Free VF memory after all references are dropped + * @vf: the VF to free + * + * Called by ice_put_vf through ice_release_vf when the last VF reference is + * dropped. Do not call this or the .free function directly. Instead, use + * ice_put_vf to ensure that the memory is only released once all references + * are finished. + */ +static void ice_siov_free_vf(struct ice_vf *vf) +{ + struct ice_adi_priv *priv = vf_to_adi_priv(vf); + struct ice_vsi *vsi; + + /* ice_free_adi() takes care of removing the VF from the hash table */ + ice_dis_siov_vf_mapping(vf); + vsi = ice_get_vf_vsi(vf); + if (vsi) + ice_vsi_release(vsi); + mutex_destroy(&vf->cfg_lock); + kfree_rcu(priv, vf.rcu); +} + +/** + * ice_siov_clear_reset_state - clears S-IOV VF Reset status indication + * @vf: the vf to configure + */ +static void ice_siov_clear_reset_state(struct ice_vf *vf) +{ + struct ice_adi_priv *priv = vf_to_adi_priv(vf); + + /* Clear the reset status so that VF does not get a mistaken + * indication of an active VF when reading VFGEN_RSTAT. + */ + priv->reset_state = VIRTCHNL_VFR_INPROGRESS; +} + +/** + * ice_siov_clear_mbx_register - clears S-IOV VF's mailbox registers + * @vf: the vf to configure + */ +static void ice_siov_clear_mbx_register(struct ice_vf *vf) +{ + struct ice_adi_priv *priv = vf_to_adi_priv(vf); + struct ice_hw *hw = &vf->pf->hw; + + /* Save mailbox registers. MBX_ARQLEN and MBX_ATQLEN won't + * be saved and restored because AVF driver will check + * ARQLEN to determine whether reset has been triggered. + */ + priv->ice_adi_mbx.mbx_asqh = rd32(hw, VSI_MBX_ATQH(vf->vf_id)); + priv->ice_adi_mbx.mbx_asqt = rd32(hw, VSI_MBX_ATQT(vf->vf_id)); + priv->ice_adi_mbx.mbx_asqbal = rd32(hw, VSI_MBX_ATQBAL(vf->vf_id)); + priv->ice_adi_mbx.mbx_asqbah = rd32(hw, VSI_MBX_ATQBAH(vf->vf_id)); + priv->ice_adi_mbx.mbx_arqh = rd32(hw, VSI_MBX_ARQH(vf->vf_id)); + priv->ice_adi_mbx.mbx_arqt = rd32(hw, VSI_MBX_ARQT(vf->vf_id)); + priv->ice_adi_mbx.mbx_arqbal = rd32(hw, VSI_MBX_ARQBAL(vf->vf_id)); + priv->ice_adi_mbx.mbx_arqbah = rd32(hw, VSI_MBX_ARQBAH(vf->vf_id)); + + wr32(hw, VSI_MBX_ARQLEN(vf->vf_id), 0); + wr32(hw, VSI_MBX_ATQLEN(vf->vf_id), 0); + + wr32(hw, VSI_MBX_ATQH(vf->vf_id), 0); + wr32(hw, VSI_MBX_ATQT(vf->vf_id), 0); + wr32(hw, VSI_MBX_ATQBAL(vf->vf_id), 0); + wr32(hw, VSI_MBX_ATQBAH(vf->vf_id), 0); + wr32(hw, VSI_MBX_ARQH(vf->vf_id), 0); + wr32(hw, VSI_MBX_ARQT(vf->vf_id), 0); + wr32(hw, VSI_MBX_ARQBAL(vf->vf_id), 0); + wr32(hw, VSI_MBX_ARQBAH(vf->vf_id), 0); +} + +/** + * ice_siov_trigger_reset_register - trigger VF reset for S-IOV VF + * @vf: pointer to VF structure + * @is_vflr: true if reset occurred due to VFLR + * + * Trigger and cleanup a reset for a Scalable IOV VF. + */ +static void ice_siov_trigger_reset_register(struct ice_vf *vf, bool is_vflr) +{ + struct ice_adi_priv *priv = vf_to_adi_priv(vf); + struct ice_pf *pf = vf->pf; + struct ice_hw *hw; + u32 reg; + int i; + + hw = &pf->hw; + + /* VF hardware reset is about to start, so we need to clear the + * VFR_VFACTIVE state now. + */ + priv->reset_state = VIRTCHNL_VFR_INPROGRESS; + + /* In the case of VFLR, HW has already reset the VF and we just need + * to cleanup. Otherwise we need to trigger the reset using the + * VSIGEN_RTRIG register. + */ + if (!is_vflr) { + reg = rd32(hw, VSIGEN_RTRIG(vf->vf_id)); + reg |= VSIGEN_RTRIG_VMSWR_M; + wr32(hw, VSIGEN_RTRIG(vf->vf_id), reg); + ice_flush(hw); + } + + wr32(hw, PFPCI_VMINDEX, vf->vf_id); + for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) { + reg = rd32(hw, PFPCI_VMPEND); + /* no transactions pending so stop polling */ + if ((reg & VF_TRANS_PENDING_M) == 0) + break; + + dev_err(ice_pf_to_dev(pf), "VM %u PCI transactions stuck\n", + vf->vf_id); + udelay(ICE_PCI_CIAD_WAIT_DELAY_US); + } +} + +/** + * ice_siov_poll_reset_status - poll S-IOV VF reset status + * @vf: pointer to VF structure + * + * Returns true when reset is successful, else returns false + */ +static bool ice_siov_poll_reset_status(struct ice_vf *vf) +{ + struct ice_adi_priv *priv = vf_to_adi_priv(vf); + struct ice_hw *hw = &vf->pf->hw; + unsigned int i; + u32 reg; + + for (i = 0; i < 10; i++) { + /* VF reset requires driver to first reset the VF and then + * poll the status register to make sure that the reset + * completed successfully. + */ + reg = rd32(hw, VSIGEN_RSTAT(vf->vf_id)); + if (reg & VSIGEN_RSTAT_VMRD_M) { + priv->reset_state = VIRTCHNL_VFR_COMPLETED; + return true; + } + + /* only sleep if the reset is not done */ + usleep_range(10, 20); + } + return false; +} + +/** + * ice_siov_irq_close - Close any IRQ data prior to resetting the VF + * @vf: the VF to process + * + * Called by generic virtualization code during reset to close any previous + * IRQ configuration before rebuilding a new VSI. + */ +static void ice_siov_irq_close(struct ice_vf *vf) +{ + struct ice_adi_priv *priv = vf_to_adi_priv(vf); + + /* Release the previous VSI IRQ context */ + ice_vdcm_pre_rebuild_irqctx(priv->token); +} + +/** + * ice_siov_create_vsi - Create a new S-IOV VSI after a reset + * @vf: pointer to VF structure + * + * Called by ice_vf_recreate_vsi to create a new VSI after the old VSI has + * been removed. + * + * Returns 0 on success, else returns a negative value; + */ +static int ice_siov_create_vsi(struct ice_vf *vf) +{ + struct ice_vsi *vsi; + + /* Make sure the old mapping is disabled */ + ice_dis_siov_vf_mapping(vf); + + vsi = ice_adi_vsi_setup(vf); + if (!vsi) + return -ENOMEM; + + return 0; +} + +/** + * ice_ena_siov_vf_mapping - enable SIOV VF MSIX mapping + * @vf: pointer to the VF structure + * + * Returns 0 on success, else returns a negative value; + */ +static int ice_ena_siov_vf_mapping(struct ice_vf *vf) +{ + struct ice_hw *hw = &vf->pf->hw; + struct ice_q_vector *q_vector; + struct ice_vsi *vsi; + u32 reg; + + vsi = ice_get_vf_vsi(vf); + if (!vsi || !vsi->q_vectors) + return -EINVAL; + + q_vector = vsi->q_vectors[0]; + if (!q_vector) + return -EINVAL; + + reg = ((q_vector->reg_idx << VPINT_MBX_CTL_MSIX_INDX_S) & + VPINT_MBX_CTL_MSIX_INDX_M) | VPINT_MBX_CTL_CAUSE_ENA_M; + wr32(hw, VPINT_MBX_CTL(vsi->vsi_num), reg); + + return 0; +} + +/** + * ice_siov_post_vsi_rebuild - post S-IOV VSI rebuild operations + * @vf: pointer to VF structure + * + * After a VSI is re-created or rebuilt, perform the necessary operations to + * complete the VSI rebuild. This function is called after an individual VF + * reset or after a global PF reset. + */ +static void ice_siov_post_vsi_rebuild(struct ice_vf *vf) +{ + struct ice_adi_priv *priv = vf_to_adi_priv(vf); + bool update_hash_entry; + struct ice_vsi *vsi; + struct device *dev; + struct ice_hw *hw; + int err; + + vsi = ice_get_vf_vsi(vf); + if (!vsi) + return; + + dev = ice_pf_to_dev(vf->pf); + hw = &vf->pf->hw; + + /* If the VSI number has changed after the rebuild, we need to update + * the VF ID and move the entry in the hash table + */ + if (vsi->vsi_num != vf->vf_id) { + vf->vf_id = vsi->vsi_num; + update_hash_entry = true; + } else { + update_hash_entry = false; + } + + err = ice_vdcm_rebuild_irqctx(priv->token); + if (err) + dev_err(dev, "failed to rebuild irq context, error %d\n", err); + + /* Make sure to zap all the pages only after the new VSI is setup. + * When ice_siov_vsi_rebuild is called by VF_RESET virtchnl, this + * function is scheduled in a kernel thread. At the same time, VM + * will keep accessing old VSI's mbx register set. + * + * If we zapped the pages before the new VSI was setup, the VF might + * read the mailbox register while we're still setting up the new VSI. + * This would trigger a page fault that generates a new GPA to HPA + * mapping, but with the old VSI registers. + * + * By zapping the pages only after the new VSI is setup, we avoid + * this possibility. + */ + ice_vdcm_zap(priv->token); + + err = ice_vsi_configure_pasid(vf, priv->pasid, true); + if (err) + dev_err(dev, "failed to reconfigure PASID for VF %u, error %d\n", + vf->vf_id, err); + + if (ice_ena_siov_vf_mapping(vf)) + dev_err(dev, "Failed to map SIOV VF\n"); + + /* If the VSI number has changed after the rebuild, we need to update + * the hash table. This can't be done immediately in this thread + * because we might be iterating the hash table in this thread, and we + * can't take the table lock without causing a deadlock here. Schedule + * a thread to update the hash table. + * + * If we don't need to update the hash entry, its safe to let the VF + * driver activate. Otherwise, delay this until we finish updating the + * hash entry. + */ + if (update_hash_entry) + queue_work(ice_wq, &priv->update_hash_entry); + else + priv->reset_state = VIRTCHNL_VFR_VFACTIVE; + + /* Restore mailbox values. Don't restore MBX_ARQLEN and + * MBX_ATQLEN as explained in ice_siov_clear_mbx_register. + */ + wr32(hw, VSI_MBX_ATQH(vf->vf_id), priv->ice_adi_mbx.mbx_asqh); + wr32(hw, VSI_MBX_ATQT(vf->vf_id), priv->ice_adi_mbx.mbx_asqt); + wr32(hw, VSI_MBX_ATQBAL(vf->vf_id), priv->ice_adi_mbx.mbx_asqbal); + wr32(hw, VSI_MBX_ATQBAH(vf->vf_id), priv->ice_adi_mbx.mbx_asqbah); + wr32(hw, VSI_MBX_ARQH(vf->vf_id), priv->ice_adi_mbx.mbx_arqh); + wr32(hw, VSI_MBX_ARQT(vf->vf_id), priv->ice_adi_mbx.mbx_arqt); + wr32(hw, VSI_MBX_ARQBAL(vf->vf_id), priv->ice_adi_mbx.mbx_arqbal); + wr32(hw, VSI_MBX_ARQBAH(vf->vf_id), priv->ice_adi_mbx.mbx_arqbah); +} + +/** + * ice_siov_clear_reset_trigger - enable VF to access hardware + * @vf: VF to enabled hardware access for + */ +static void ice_siov_clear_reset_trigger(struct ice_vf *vf) +{ + struct ice_hw *hw = &vf->pf->hw; + u32 reg; + + reg = rd32(hw, VSIGEN_RTRIG(vf->vf_id)); + reg &= ~VSIGEN_RTRIG_VMSWR_M; + wr32(hw, VSIGEN_RTRIG(vf->vf_id), reg); + ice_flush(hw); +} + +static struct ice_q_vector *ice_siov_get_q_vector(struct ice_vf *vf, + struct ice_vsi *vsi, + u16 vector_id) +{ + if (!vsi || !vsi->q_vectors) + return NULL; + + /* don't subtract OICR vector since SIOV VF stores the corresponding + * vector_id in the vsi's q_vector array. + */ + return vsi->q_vectors[vector_id]; +} + +static const struct ice_vf_ops ice_siov_vf_ops = { + .reset_type = ICE_VM_RESET, + .free = ice_siov_free_vf, + .clear_reset_state = ice_siov_clear_reset_state, + .clear_mbx_register = ice_siov_clear_mbx_register, + .trigger_reset_register = ice_siov_trigger_reset_register, + .poll_reset_status = ice_siov_poll_reset_status, + .clear_reset_trigger = ice_siov_clear_reset_trigger, + .irq_close = ice_siov_irq_close, + .create_vsi = ice_siov_create_vsi, + .post_vsi_rebuild = ice_siov_post_vsi_rebuild, + .get_q_vector = ice_siov_get_q_vector, +}; + +/** + * ice_siov_update_hash_entry - work task to fix VF hash entry + * @work: the work task structure + * + * Work item scheduled to fix the VF hash entry after a rebuild. Called when + * the VSI number, and thus the VF ID has changed. This update cannot be done + * in the same thread because it cannot guarantee a safe method of acquiring + * the table lock mutex, and because the calling thread might be iterating the + * hash table using the standard iterator which is not protected against hash + * table modification. + */ +static void ice_siov_update_hash_entry(struct work_struct *work) +{ + struct ice_adi_priv *priv = container_of(work, struct ice_adi_priv, + update_hash_entry); + struct ice_vf *vf = &priv->vf; + struct ice_vfs *vfs; + + vfs = &vf->pf->vfs; + + mutex_lock(&vfs->table_lock); + mutex_lock(&vf->cfg_lock); + + hash_del_rcu(&vf->entry); + hash_add_rcu(vfs->table, &vf->entry, vf->vf_id); + + /* We've finished cleaning up in software. Update the reset + * state, allowing the VF to detect that its safe to proceed. + */ + priv->reset_state = VIRTCHNL_VFR_VFACTIVE; + + mutex_unlock(&vf->cfg_lock); + mutex_unlock(&vfs->table_lock); +} + +/** + * ice_create_adi - Set up the VF structure and create VSI + * @pf: pointer to PF structure + * + * Returns pointer to the successfully allocated VSI struct on success, + * otherwise returns NULL on failure. + */ +static struct ice_adi_priv *ice_create_adi(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + struct ice_vfs *vfs = &pf->vfs; + struct ice_adi_priv *priv; + struct ice_vsi *vsi; + struct ice_vf *vf; + + /* Disable global interrupts */ + wr32(&pf->hw, GLINT_DYN_CTL(pf->oicr_idx), + ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); + set_bit(ICE_OICR_INTR_DIS, pf->state); + ice_flush(&pf->hw); + if (ice_get_avail_txq_count(pf) < ICE_DFLT_QS_PER_SIOV_VF || + ice_get_avail_rxq_count(pf) < ICE_DFLT_QS_PER_SIOV_VF) + return NULL; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return NULL; + vf = &priv->vf; + kref_init(&vf->refcnt); + vf->pf = pf; + + /* set S-IOV specific vf ops for VFs created during S-IOV flow */ + vf->vf_ops = &ice_siov_vf_ops; + if (ice_initialize_vf_entry(vf)) { + dev_err(dev, "Failed to initialize the VF entry for SIOV VF\n"); + goto init_vf_err; + } + INIT_WORK(&priv->update_hash_entry, ice_siov_update_hash_entry); + + vf->vf_sw_id = pf->first_sw; + vsi = ice_adi_vsi_setup(vf); + if (!vsi) { + dev_err(dev, "Failed to initialize VSI resources for SIOV VF\n"); + goto init_vf_err; + } + + if (ice_ena_siov_vf_mapping(vf)) { + dev_err(dev, "Failed to map SIOV VF\n"); + goto vf_mapping_err; + } + + mutex_init(&vf->cfg_lock); + + set_bit(ICE_VF_STATE_INIT, vf->vf_states); + wr32(&pf->hw, VSIGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); + ice_flush(&pf->hw); + clear_bit(ICE_VF_DIS, pf->state); + + /* re-enable interrupts */ + if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state)) + ice_irq_dynamic_ena(&pf->hw, NULL, NULL); + + mutex_lock(&pf->vfs.table_lock); + hash_add_rcu(vfs->table, &vf->entry, vf->vf_id); + mutex_unlock(&pf->vfs.table_lock); + + return priv; + +vf_mapping_err: + ice_vsi_release(vsi); + ice_vf_invalidate_vsi(vf); +init_vf_err: + kfree_rcu(priv, vf.rcu); + return NULL; +} + +/** + * ice_adi_get_vector_num - get number of vectors assigned to this ADI + * @adi: ADI pointer + * + * Return 0 or postive for success, negative for failure + */ +static int ice_adi_get_vector_num(struct ice_adi *adi) +{ + struct ice_adi_priv *priv = adi_priv(adi); + struct ice_vf *vf = &priv->vf; + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + dev_err(ice_pf_to_dev(pf), "Invalid VSI pointer"); + return -EFAULT; + } + + return vsi->num_q_vectors; +} + +/** + * ice_adi_get_vector_irq - get OS IRQ number per vector + * @adi: ADI pointer + * @vector: IRQ vector index + * + * Return 0 or postive for success, negative for failure + */ +static int ice_adi_get_vector_irq(struct ice_adi *adi, u32 vector) +{ + struct ice_adi_priv *priv = adi_priv(adi); + struct ice_vf *vf = &priv->vf; + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + dev_err(ice_pf_to_dev(pf), "Invalid VSI pointer"); + return -EFAULT; + } + + if (vector >= vsi->num_q_vectors) + return -EINVAL; + + return ice_get_irq_num(pf, vsi->base_vector + vector); +} + +/** + * ice_adi_read_reg32 - read ADI register + * @adi: ADI pointer + * @offs: register offset + * + * Return register value at the offset + */ +static u32 ice_adi_read_reg32(struct ice_adi *adi, size_t offs) +{ + struct ice_adi_priv *priv = adi_priv(adi); + struct ice_vf *vf = &priv->vf; + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + u32 index, reg_val; + struct ice_hw *hw; + + if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) { + if (offs == VFGEN_RSTAT1) + return VIRTCHNL_VFR_INPROGRESS; + else + return 0xdeadbeef; + } + + hw = &pf->hw; + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + dev_err(ice_pf_to_dev(pf), "Invalid VSI pointer"); + return 0xdeadbeef; + } + + /* check for 4-byte aligned register access */ + if (!IS_ALIGNED(offs, 4)) + return 0xdeadbeef; + + switch (offs) { + case VFGEN_RSTAT1: + reg_val = rd32(hw, VSIGEN_RSTAT(vsi->vsi_num)); + + if (reg_val & VSIGEN_RSTAT_VMRD_M) { + if (priv->reset_state == VIRTCHNL_VFR_VFACTIVE) + return VIRTCHNL_VFR_VFACTIVE; + else + return VIRTCHNL_VFR_COMPLETED; + } + + return VIRTCHNL_VFR_INPROGRESS; + case VF_MBX_ATQBAL1: + return rd32(hw, VSI_MBX_ATQBAL(vsi->vsi_num)); + case VF_MBX_ATQBAH1: + return rd32(hw, VSI_MBX_ATQBAH(vsi->vsi_num)); + case VF_MBX_ATQLEN1: + return rd32(hw, VSI_MBX_ATQLEN(vsi->vsi_num)); + case VF_MBX_ATQH1: + return rd32(hw, VSI_MBX_ATQH(vsi->vsi_num)); + case VF_MBX_ATQT1: + return rd32(hw, VSI_MBX_ATQT(vsi->vsi_num)); + case VF_MBX_ARQBAL1: + return rd32(hw, VSI_MBX_ARQBAL(vsi->vsi_num)); + case VF_MBX_ARQBAH1: + return rd32(hw, VSI_MBX_ARQBAH(vsi->vsi_num)); + case VF_MBX_ARQLEN1: + return rd32(hw, VSI_MBX_ARQLEN(vsi->vsi_num)); + case VF_MBX_ARQH1: + return rd32(hw, VSI_MBX_ARQH(vsi->vsi_num)); + case VF_MBX_ARQT1: + return rd32(hw, VSI_MBX_ARQT(vsi->vsi_num)); + case VFINT_DYN_CTL0: + if (WARN_ON_ONCE(!vsi->q_vectors || !vsi->q_vectors[0])) + return 0xdeadbeef; + return rd32(hw, GLINT_DYN_CTL(vsi->q_vectors[0]->reg_idx)); + case VFINT_ITR0(0): + case VFINT_ITR0(1): + case VFINT_ITR0(2): + if (WARN_ON_ONCE(!vsi->q_vectors || !vsi->q_vectors[0])) + return 0xdeadbeef; + index = (offs - VFINT_ITR0(0)) / 4; + return rd32(hw, GLINT_ITR(index, vsi->q_vectors[0]->reg_idx)); + case VFINT_DYN_CTLN(0) ... VFINT_DYN_CTLN(63): + /* vsi's vector 0 reserved for OICR, + * data Q vectors start from index 1 + */ + index = (offs - VFINT_DYN_CTLN(0)) / 4 + 1; + if (index >= vsi->num_q_vectors || !vsi->q_vectors[index]) { + dev_warn_once(ice_pf_to_dev(pf), "Invalid vector pointer for VSI %d\n", + vsi->vsi_num); + return 0xdeadbeef; + } + return rd32(hw, GLINT_DYN_CTL(vsi->q_vectors[index]->reg_idx)); + default: + return 0xdeadbeef; + } +} + +/** + * ice_adi_write_reg32 - write ADI register + * @adi: ADI pointer + * @offs: register offset + * @data: register value + */ +static void ice_adi_write_reg32(struct ice_adi *adi, size_t offs, u32 data) +{ + struct ice_adi_priv *priv = adi_priv(adi); + struct ice_vf *vf = &priv->vf; + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + struct ice_hw *hw; + u32 index; + + if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) + return; + + hw = &pf->hw; + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + dev_err(ice_pf_to_dev(pf), "Invalid VSI pointer"); + return; + } + + /* check for 4-byte aligned register access */ + if (!IS_ALIGNED(offs, 4)) + return; + + switch (offs) { + case VF_MBX_ATQBAL1: + wr32(hw, VSI_MBX_ATQBAL(vsi->vsi_num), data); + break; + case VF_MBX_ATQBAH1: + wr32(hw, VSI_MBX_ATQBAH(vsi->vsi_num), data); + break; + case VF_MBX_ATQLEN1: + wr32(hw, VSI_MBX_ATQLEN(vsi->vsi_num), data); + break; + case VF_MBX_ATQH1: + wr32(hw, VSI_MBX_ATQH(vsi->vsi_num), data); + break; + case VF_MBX_ATQT1: + wr32(hw, VSI_MBX_ATQT(vsi->vsi_num), data); + break; + case VF_MBX_ARQBAL1: + wr32(hw, VSI_MBX_ARQBAL(vsi->vsi_num), data); + break; + case VF_MBX_ARQBAH1: + wr32(hw, VSI_MBX_ARQBAH(vsi->vsi_num), data); + break; + case VF_MBX_ARQLEN1: + wr32(hw, VSI_MBX_ARQLEN(vsi->vsi_num), data); + break; + case VF_MBX_ARQH1: + wr32(hw, VSI_MBX_ARQH(vsi->vsi_num), data); + break; + case VF_MBX_ARQT1: + wr32(hw, VSI_MBX_ARQT(vsi->vsi_num), data); + break; + case VFINT_DYN_CTL0: + if (WARN_ON_ONCE(!vsi->q_vectors || !vsi->q_vectors[0])) + break; + wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[0]->reg_idx), data); + break; + case VFINT_ITR0(0): + case VFINT_ITR0(1): + case VFINT_ITR0(2): + if (WARN_ON_ONCE(!vsi->q_vectors || !vsi->q_vectors[0])) + break; + index = (offs - VFINT_ITR0(0)) / 4; + wr32(hw, GLINT_ITR(index, vsi->q_vectors[0]->reg_idx), data); + break; + case VFINT_DYN_CTLN(0) ... VFINT_DYN_CTLN(63): + /* vsi's vector 0 reserved for OICR, + * data Q vectors start from index 1 + */ + index = (offs - VFINT_DYN_CTLN(0)) / 4 + 1; + if (index >= vsi->num_q_vectors || !vsi->q_vectors[index]) + goto err_resource; + wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[index]->reg_idx), data); + break; + case QTX_TAIL(0) ... QTX_TAIL(255): + index = (offs - QTX_TAIL(0)) / 4; + if (!vsi->txq_map || index >= vsi->alloc_txq) + goto err_resource; + wr32(hw, QTX_COMM_DBELL_PAGE(vsi->txq_map[index]), data); + break; + case QRX_TAIL1(0) ... QRX_TAIL1(255): + index = (offs - QRX_TAIL1(0)) / 4; + if (!vsi->rxq_map || index >= vsi->alloc_rxq) + goto err_resource; + wr32(hw, QRX_TAIL_PAGE(vsi->rxq_map[index]), data); + break; + default: + break; + } + return; + +err_resource: + dev_warn_once(ice_pf_to_dev(pf), "Invalid resource access for VF VSI %d\n", + vsi->vsi_num); +} + +/** + * ice_adi_get_sparse_mmap_hpa - get VDEV HPA + * @adi: pointer to assignable device interface + * @index: VFIO BAR index + * @vm_pgoff: page offset of virtual memory area + * @addr: VDEV address + * + * Return 0 if success, negative for failure. + */ +static int +ice_adi_get_sparse_mmap_hpa(struct ice_adi *adi, u32 index, + u64 vm_pgoff, u64 *addr) +{ + struct ice_adi_priv *priv; + struct pci_dev *pdev; + struct ice_vsi *vsi; + struct ice_vf *vf; + u64 reg_off; + int q_idx; + + if (!addr || index != VFIO_PCI_BAR0_REGION_INDEX) + return -EINVAL; + + priv = adi_priv(adi); + vf = &priv->vf; + vsi = ice_get_vf_vsi(vf); + if (!vsi) + return -EINVAL; + + pdev = vf->pf->pdev; + switch (vm_pgoff) { + case PHYS_PFN(VDEV_MBX_START): + /* MBX Registers */ + reg_off = VSI_MBX_ATQBAL(vsi->vsi_num); + break; + case PHYS_PFN(VDEV_QRX_TAIL_START) ... + (PHYS_PFN(VDEV_QRX_BUFQ_TAIL_START) - 1): + /* RXQ tail register */ + q_idx = vm_pgoff - PHYS_PFN(VDEV_QRX_TAIL_START); + if (q_idx >= vsi->alloc_rxq) + return -EINVAL; + reg_off = QRX_TAIL_PAGE(vsi->rxq_map[q_idx]); + break; + case PHYS_PFN(VDEV_QTX_TAIL_START) ... + (PHYS_PFN(VDEV_QTX_COMPL_TAIL_START) - 1): + /* TXQ tail register */ + q_idx = vm_pgoff - PHYS_PFN(VDEV_QTX_TAIL_START); + if (q_idx >= vsi->alloc_txq) + return -EINVAL; + reg_off = QTX_COMM_DBELL_PAGE(vsi->txq_map[q_idx]); + break; + case PHYS_PFN(VDEV_INT_DYN_CTL01): + /* INT DYN CTL01, ITR0/1/2 */ + if (!vsi->num_q_vectors) + return -EINVAL; + reg_off = PF0INT_DYN_CTL(vsi->q_vectors[0]->reg_idx); + break; + case PHYS_PFN(VDEV_INT_DYN_CTL(0)) ... + (PHYS_PFN(ICE_VDCM_BAR0_SIZE) - 1): + /* INT DYN CTL, ITR0/1/2 + * the first several vectors in q_vectors[] is for mailbox, + * mailbox vector's number is defined with ICE_NONQ_VECS_VF + */ + q_idx = vm_pgoff - PHYS_PFN(VDEV_INT_DYN_CTL(0)) + + ICE_NONQ_VECS_VF; + if (q_idx >= vsi->num_q_vectors) + return -EINVAL; + reg_off = PF0INT_DYN_CTL(vsi->q_vectors[q_idx]->reg_idx); + break; + default: + return -EFAULT; + } + + /* add BAR0 start address */ + *addr = pci_resource_start(pdev, 0) + reg_off; + return 0; +} + +/** + * ice_adi_get_sparse_mmap_num - get number of sparse memory + * @adi: pointer to assignable device interface + * + * Return number of sparse memory areas. + */ +static int +ice_adi_get_sparse_mmap_num(struct ice_adi *adi) +{ + struct ice_adi_priv *priv; + struct ice_vsi *vsi; + struct ice_vf *vf; + + priv = adi_priv(adi); + vf = &priv->vf; + vsi = ice_get_vf_vsi(vf); + if (!vsi) + return -EINVAL; + + /** + * Besides tx/rx queue registers, INT DYN CTL, ITR0/1/2 registers, + * we also need to reserve for MBX registers, which is defined + * with ICE_NONQ_VECS_VF + */ + return vsi->alloc_txq + vsi->alloc_rxq + + vsi->num_q_vectors + ICE_NONQ_VECS_VF; +} + +/** + * ice_adi_get_sparse_mmap_area - get sparse memory layout for mmap + * @adi: pointer to assignable device interface + * @index: index of sparse memory + * @offset: pointer to sparse memory areas offset + * @size: pointer to sparse memory areas size + * + * Return 0 if success, negative for failure. + */ +static int +ice_adi_get_sparse_mmap_area(struct ice_adi *adi, int index, + u64 *offset, u64 *size) +{ + struct ice_adi_sparse_mmap_info pattern[ICE_ADI_SPARSE_MAX]; + struct ice_adi_priv *priv; + struct ice_vsi *vsi; + struct ice_vf *vf; + int nr_areas = 0; + u64 ai; + int i; + + memset(pattern, 0, sizeof(pattern[0]) * ICE_ADI_SPARSE_MAX); + + priv = adi_priv(adi); + vf = &priv->vf; + vsi = ice_get_vf_vsi(vf); + if (!vsi) + return -EINVAL; + + nr_areas = ice_adi_get_sparse_mmap_num(adi); + + if (index < 0 || (index > (nr_areas - 1))) + return -EINVAL; + + ai = (u64)index; + + i = ICE_ADI_SPARSE_MBX; + pattern[i].start = 0; + pattern[i].cnt = 1; + pattern[i].end = pattern[i].start + pattern[i].cnt; + pattern[i].phy_addr = VDEV_MBX_START; + + i = ICE_ADI_SPARSE_RXQ; + pattern[i].start = pattern[i - 1].end; + pattern[i].cnt = vsi->alloc_rxq; + pattern[i].end = pattern[i].start + pattern[i].cnt; + pattern[i].phy_addr = VDEV_QRX_TAIL_START; + + i = ICE_ADI_SPARSE_TXQ; + pattern[i].start = pattern[i - 1].end; + pattern[i].cnt = vsi->alloc_txq; + pattern[i].end = pattern[i].start + pattern[i].cnt; + pattern[i].phy_addr = VDEV_QTX_TAIL_START; + + i = ICE_ADI_SPARSE_DYN_CTL01; + pattern[i].start = pattern[i - 1].end; + if (vsi->num_q_vectors > 0) + pattern[i].cnt = 1; + pattern[i].end = pattern[i].start + pattern[i].cnt; + pattern[i].phy_addr = VDEV_INT_DYN_CTL01; + + i = ICE_ADI_SPARSE_DYN_CTL; + pattern[i].start = pattern[i - 1].end; + /* the first q_vector is for mailbox, which has been allocated */ + if (vsi->num_q_vectors > 1) + pattern[i].cnt = vsi->num_q_vectors - 1; + pattern[i].end = pattern[i].start + pattern[i].cnt; + pattern[i].phy_addr = VDEV_INT_DYN_CTL(0); + + for (i = 0; i < ICE_ADI_SPARSE_MAX; i++) { + if (ai >= pattern[i].start && ai < pattern[i].end) { + *offset = pattern[i].phy_addr + + PAGE_SIZE * (ai - pattern[i].start); + *size = PAGE_SIZE; + break; + } + } + + return (i == ICE_ADI_SPARSE_MAX) ? -EINVAL : 0; +} + +/** + * ice_vdcm_alloc_adi - alloc one ADI + * @dev: linux device associated with ADI + * @token: pointer to VDCM + * + * Return Non zero pointer for success, NULL for failure + */ +struct ice_adi *ice_vdcm_alloc_adi(struct device *dev, void *token) +{ + struct ice_adi_priv *priv; + struct ice_adi *adi; + struct ice_pf *pf; + + pf = pci_get_drvdata(to_pci_dev(dev)); + + priv = ice_create_adi(pf); + if (!priv) + return NULL; + + adi = &priv->adi; + priv->token = token; + adi->cfg_pasid = ice_adi_cfg_pasid; + adi->close = ice_adi_close; + adi->reset = ice_adi_reset; + adi->get_vector_num = ice_adi_get_vector_num; + adi->get_vector_irq = ice_adi_get_vector_irq; + adi->read_reg32 = ice_adi_read_reg32; + adi->write_reg32 = ice_adi_write_reg32; + adi->get_sparse_mmap_hpa = ice_adi_get_sparse_mmap_hpa; + adi->get_sparse_mmap_num = ice_adi_get_sparse_mmap_num; + adi->get_sparse_mmap_area = ice_adi_get_sparse_mmap_area; + + return adi; +} + +/** + * ice_vdcm_free_adi - free ADI + * @adi: ADI pointer + */ +void ice_vdcm_free_adi(struct ice_adi *adi) +{ + struct ice_adi_priv *priv = adi_priv(adi); + + ice_free_adi(priv); +} + +/** + * ice_restore_pasid_config - restore PASID mbx support + * @pf: PF pointer structure + * @reset_type: type of reset + * + * On CORER/GLOBER, the global PASID mbx support bit gets + * cleared. For successful restoration of Scalable IOV VFs + * on these kind of resets, we need to reenable PASID mbx + * support. + */ +void ice_restore_pasid_config(struct ice_pf *pf, enum ice_reset_req reset_type) +{ + if (reset_type == ICE_RESET_CORER || reset_type == ICE_RESET_GLOBR) + wr32(&pf->hw, GL_MBX_PASID, GL_MBX_PASID_PASID_MODE_M); +} + +/** + * ice_initialize_siov_res - initialize SIOV related resources + * @pf: PF pointer structure + */ +void ice_initialize_siov_res(struct ice_pf *pf) +{ + int err; + + err = ice_vdcm_init(pf->pdev); + if (err) { + dev_err(ice_pf_to_dev(pf), "Error enabling Scalable IOV\n"); + return; + } + /* enable PASID mailbox support */ + wr32(&pf->hw, GL_MBX_PASID, GL_MBX_PASID_PASID_MODE_M); + set_bit(ICE_FLAG_SIOV_ENA, pf->flags); + + /* set default SIOV VF resources */ + pf->vfs.num_msix_per = ICE_NUM_VF_MSIX_SMALL; + pf->vfs.num_qps_per = min_t(int, pf->vfs.num_msix_per, + ICE_DFLT_QS_PER_SIOV_VF); + + /* ensure mutual exclusivity of SRIOV and SIOV */ + clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); + dev_info(ice_pf_to_dev(pf), "Scalable IOV has been enabled, disabling SRIOV\n"); + + ice_dcf_init_sw_rule_mgmt(pf); +} diff --git a/drivers/thirdparty/ice/ice_siov.h b/drivers/thirdparty/ice/ice_siov.h new file mode 100644 index 000000000000..51904792016b --- /dev/null +++ b/drivers/thirdparty/ice/ice_siov.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_SIOV_H_ +#define _ICE_SIOV_H_ + +#define ICE_DFLT_QS_PER_SIOV_VF 4 + +#if IS_ENABLED(CONFIG_VFIO_MDEV) && defined(HAVE_PASID_SUPPORT) +void ice_initialize_siov_res(struct ice_pf *pf); +void ice_restore_pasid_config(struct ice_pf *pf, enum ice_reset_req reset_type); +#else +static inline void ice_initialize_siov_res(struct ice_pf *pf) { } +static inline void ice_restore_pasid_config(struct ice_pf *pf, + enum ice_reset_req reset_type) { } +#endif /* CONFIG_VFIO_MDEV && HAVE_PASID_SUPPORT */ + +#endif /* _ICE_SIOV_H_ */ diff --git a/drivers/thirdparty/ice/ice_sriov.c b/drivers/thirdparty/ice/ice_sriov.c index a66f896b5a28..047378d4e249 100644 --- a/drivers/thirdparty/ice/ice_sriov.c +++ b/drivers/thirdparty/ice/ice_sriov.c @@ -1,535 +1,2247 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2018-2021, Intel Corporation. */ -#include "ice_common.h" -#include "ice_sriov.h" +#include "ice.h" +#include "ice_vf_lib_private.h" +#include "ice_base.h" +#include "ice_lib.h" +#include "ice_fltr.h" +#include "ice_dcb_lib.h" +#include "ice_eswitch.h" +#include "ice_virtchnl_allowlist.h" +#include "ice_flex_pipe.h" +#include "ice_vf_adq.h" +#include "ice_tc_lib.h" /** - * ice_aq_send_msg_to_vf - * @hw: pointer to the hardware structure - * @vfid: VF ID to send msg - * @v_opcode: opcodes for VF-PF communication - * @v_retval: return error code - * @msg: pointer to the msg buffer - * @msglen: msg length - * @cd: pointer to command details + * ice_free_vf_entries - Free all VF entries from the hash table + * @pf: pointer to the PF structure * - * Send message to VF driver (0x0802) using mailbox - * queue and asynchronously sending message via - * ice_sq_send_cmd() function + * Iterate over the VF hash table, removing and releasing all VF entries. + * Called during VF teardown or as cleanup during failed VF initialization. */ -enum ice_status -ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, - u8 *msg, u16 msglen, struct ice_sq_cd *cd) +static void ice_free_vf_entries(struct ice_pf *pf) { - struct ice_aqc_pf_vf_msg *cmd; - struct ice_aq_desc desc; + struct ice_vfs *vfs = &pf->vfs; + struct hlist_node *tmp; + struct ice_vf *vf; + unsigned int bkt; - ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf); - - cmd = &desc.params.virt; - cmd->id = cpu_to_le32(vfid); - - desc.cookie_high = cpu_to_le32(v_opcode); - desc.cookie_low = cpu_to_le32(v_retval); - - if (msglen) - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - - return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd); -} - - -/** - * ice_conv_link_speed_to_virtchnl - * @adv_link_support: determines the format of the returned link speed - * @link_speed: variable containing the link_speed to be converted - * - * Convert link speed supported by HW to link speed supported by virtchnl. - * If adv_link_support is true, then return link speed in Mbps. Else return - * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller - * needs to cast back to an enum virtchnl_link_speed in the case where - * adv_link_support is false, but when adv_link_support is true the caller can - * expect the speed in Mbps. - */ -u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed) -{ - u32 speed; - - if (adv_link_support) - switch (link_speed) { - case ICE_AQ_LINK_SPEED_10MB: - speed = ICE_LINK_SPEED_10MBPS; - break; - case ICE_AQ_LINK_SPEED_100MB: - speed = ICE_LINK_SPEED_100MBPS; - break; - case ICE_AQ_LINK_SPEED_1000MB: - speed = ICE_LINK_SPEED_1000MBPS; - break; - case ICE_AQ_LINK_SPEED_2500MB: - speed = ICE_LINK_SPEED_2500MBPS; - break; - case ICE_AQ_LINK_SPEED_5GB: - speed = ICE_LINK_SPEED_5000MBPS; - break; - case ICE_AQ_LINK_SPEED_10GB: - speed = ICE_LINK_SPEED_10000MBPS; - break; - case ICE_AQ_LINK_SPEED_20GB: - speed = ICE_LINK_SPEED_20000MBPS; - break; - case ICE_AQ_LINK_SPEED_25GB: - speed = ICE_LINK_SPEED_25000MBPS; - break; - case ICE_AQ_LINK_SPEED_40GB: - speed = ICE_LINK_SPEED_40000MBPS; - break; - case ICE_AQ_LINK_SPEED_50GB: - speed = ICE_LINK_SPEED_50000MBPS; - break; - case ICE_AQ_LINK_SPEED_100GB: - speed = ICE_LINK_SPEED_100000MBPS; - break; - default: - speed = ICE_LINK_SPEED_UNKNOWN; - break; - } - else - /* Virtchnl speeds are not defined for every speed supported in - * the hardware. To maintain compatibility with older AVF - * drivers, while reporting the speed the new speed values are - * resolved to the closest known virtchnl speeds - */ - switch (link_speed) { - case ICE_AQ_LINK_SPEED_10MB: - case ICE_AQ_LINK_SPEED_100MB: - speed = (u32)VIRTCHNL_LINK_SPEED_100MB; - break; - case ICE_AQ_LINK_SPEED_1000MB: - case ICE_AQ_LINK_SPEED_2500MB: - case ICE_AQ_LINK_SPEED_5GB: - speed = (u32)VIRTCHNL_LINK_SPEED_1GB; - break; - case ICE_AQ_LINK_SPEED_10GB: - speed = (u32)VIRTCHNL_LINK_SPEED_10GB; - break; - case ICE_AQ_LINK_SPEED_20GB: - speed = (u32)VIRTCHNL_LINK_SPEED_20GB; - break; - case ICE_AQ_LINK_SPEED_25GB: - speed = (u32)VIRTCHNL_LINK_SPEED_25GB; - break; - case ICE_AQ_LINK_SPEED_40GB: - case ICE_AQ_LINK_SPEED_50GB: - case ICE_AQ_LINK_SPEED_100GB: - speed = (u32)VIRTCHNL_LINK_SPEED_40GB; - break; - default: - speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN; - break; - } - - return speed; -} - -/* The mailbox overflow detection algorithm helps to check if there - * is a possibility of a malicious VF transmitting too many MBX messages to the - * PF. - * 1. The mailbox snapshot structure, ice_mbx_snapshot, is initialized during - * driver initialization in ice_init_hw() using ice_mbx_init_snapshot(). - * The struct ice_mbx_snapshot helps to track and traverse a static window of - * messages within the mailbox queue while looking for a malicious VF. - * - * 2. When the caller starts processing its mailbox queue in response to an - * interrupt, the structure ice_mbx_snapshot is expected to be cleared before - * the algorithm can be run for the first time for that interrupt. This can be - * done via ice_mbx_reset_snapshot(). - * - * 3. For every message read by the caller from the MBX Queue, the caller must - * call the detection algorithm's entry function ice_mbx_vf_state_handler(). - * Before every call to ice_mbx_vf_state_handler() the struct ice_mbx_data is - * filled as it is required to be passed to the algorithm. - * - * 4. Every time a message is read from the MBX queue, a VFId is received which - * is passed to the state handler. The boolean output is_malvf of the state - * handler ice_mbx_vf_state_handler() serves as an indicator to the caller - * whether this VF is malicious or not. - * - * 5. When a VF is identified to be malicious, the caller can send a message - * to the system administrator. The caller can invoke ice_mbx_report_malvf() - * to help determine if a malicious VF is to be reported or not. This function - * requires the caller to maintain a global bitmap to track all malicious VFs - * and pass that to ice_mbx_report_malvf() along with the VFID which was identified - * to be malicious by ice_mbx_vf_state_handler(). - * - * 6. The global bitmap maintained by PF can be cleared completely if PF is in - * reset or the bit corresponding to a VF can be cleared if that VF is in reset. - * When a VF is shut down and brought back up, we assume that the new VF - * brought up is not malicious and hence report it if found malicious. - * - * 7. The function ice_mbx_reset_snapshot() is called to reset the information - * in ice_mbx_snapshot for every new mailbox interrupt handled. - * - * 8. The memory allocated for variables in ice_mbx_snapshot is de-allocated - * when driver is unloaded. - */ -#define ICE_RQ_DATA_MASK(rq_data) ((rq_data) & PF_MBX_ARQH_ARQH_M) -/* Using the highest value for an unsigned 16-bit value 0xFFFF to indicate that - * the max messages check must be ignored in the algorithm - */ -#define ICE_IGNORE_MAX_MSG_CNT 0xFFFF - -/** - * ice_mbx_traverse - Pass through mailbox snapshot - * @hw: pointer to the HW struct - * @new_state: new algorithm state - * - * Traversing the mailbox static snapshot without checking - * for malicious VFs. - */ -static void -ice_mbx_traverse(struct ice_hw *hw, - enum ice_mbx_snapshot_state *new_state) -{ - struct ice_mbx_snap_buffer_data *snap_buf; - u32 num_iterations; - - snap_buf = &hw->mbx_snapshot.mbx_buf; - - /* As mailbox buffer is circular, applying a mask - * on the incremented iteration count. + /* Remove all VFs from the hash table and release their main + * reference. Once all references to the VF are dropped, ice_put_vf() + * will call ice_sriov_free_vf which will remove the VF memory. */ - num_iterations = ICE_RQ_DATA_MASK(++snap_buf->num_iterations); + lockdep_assert_held(&vfs->table_lock); - /* Checking either of the below conditions to exit snapshot traversal: - * Condition-1: If the number of iterations in the mailbox is equal to - * the mailbox head which would indicate that we have reached the end - * of the static snapshot. - * Condition-2: If the maximum messages serviced in the mailbox for a - * given interrupt is the highest possible value then there is no need - * to check if the number of messages processed is equal to it. If not - * check if the number of messages processed is greater than or equal - * to the maximum number of mailbox entries serviced in current work item. + hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) { + hash_del_rcu(&vf->entry); + ice_put_vf(vf); + } +} + +/** + * ice_free_vf_res - Free a VF's resources + * @vf: pointer to the VF info + */ +static void ice_free_vf_res(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + int i, last_vector_idx; + + /* First, disable VF's configuration API to prevent OS from + * accessing the VF's VSI after it's freed or invalidated. */ - if (num_iterations == snap_buf->head || - (snap_buf->max_num_msgs_mbx < ICE_IGNORE_MAX_MSG_CNT && - ++snap_buf->num_msg_proc >= snap_buf->max_num_msgs_mbx)) - *new_state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; + clear_bit(ICE_VF_STATE_INIT, vf->vf_states); + ice_vf_fdir_exit(vf); + /* free VF control VSI */ + if (vf->ctrl_vsi_idx != ICE_NO_VSI) + ice_vf_ctrl_vsi_release(vf); + + ice_vf_fsub_exit(vf); + + /* free VSI and disconnect it from the parent uplink */ + if (vf->lan_vsi_idx != ICE_NO_VSI) { + ice_vf_vsi_release(vf); + vf->num_mac = 0; + } + + last_vector_idx = vf->first_vector_idx + pf->vfs.num_msix_per - 1; + + /* clear VF MDD event information */ + memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); + memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); + + ice_vf_adq_release(vf); + + /* Disable interrupts so that VF starts in a known state */ + for (i = vf->first_vector_idx; i <= last_vector_idx; i++) { + wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M); + ice_flush(&pf->hw); + } + /* reset some of the state variables keeping track of the resources */ + clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); + clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); } /** - * ice_mbx_detect_malvf - Detect malicious VF in snapshot - * @hw: pointer to the HW struct - * @vf_id: relative virtual function ID - * @new_state: new algorithm state - * @is_malvf: boolean output to indicate if VF is malicious - * - * This function tracks the number of asynchronous messages - * sent per VF and marks the VF as malicious if it exceeds - * the permissible number of messages to send. + * ice_dis_vf_mappings + * @vf: pointer to the VF structure */ -static enum ice_status -ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id, - enum ice_mbx_snapshot_state *new_state, - bool *is_malvf) +static void ice_dis_vf_mappings(struct ice_vf *vf) { - struct ice_mbx_snapshot *snap = &hw->mbx_snapshot; + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + struct device *dev; + int first, last, v; + struct ice_hw *hw; - if (vf_id >= snap->mbx_vf.vfcntr_len) - return ICE_ERR_OUT_OF_RANGE; - - /* increment the message count in the VF array */ - snap->mbx_vf.vf_cntr[vf_id]++; - - if (snap->mbx_vf.vf_cntr[vf_id] >= ICE_ASYNC_VF_MSG_THRESHOLD) - *is_malvf = true; - - /* continue to iterate through the mailbox snapshot */ - ice_mbx_traverse(hw, new_state); - - return 0; -} - -/** - * ice_mbx_reset_snapshot - Reset mailbox snapshot structure - * @snap: pointer to mailbox snapshot structure in the ice_hw struct - * - * Reset the mailbox snapshot structure and clear VF counter array. - */ -static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap) -{ - u32 vfcntr_len; - - if (!snap || !snap->mbx_vf.vf_cntr) + hw = &pf->hw; + vsi = ice_get_vf_vsi(vf); + if (WARN_ON(!vsi)) return; - /* Clear VF counters. */ - vfcntr_len = snap->mbx_vf.vfcntr_len; - if (vfcntr_len) - memset(snap->mbx_vf.vf_cntr, 0, - (vfcntr_len * sizeof(*snap->mbx_vf.vf_cntr))); + dev = ice_pf_to_dev(pf); + wr32(hw, VPINT_ALLOC(vf->vf_id), 0); + wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0); - /* Reset mailbox snapshot for a new capture. */ - memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf)); - snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; + first = vf->first_vector_idx; + last = first + pf->vfs.num_msix_per - 1; + for (v = first; v <= last; v++) { + u32 reg; + + reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) & + GLINT_VECT2FUNC_IS_PF_M) | + ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & + GLINT_VECT2FUNC_PF_NUM_M)); + wr32(hw, GLINT_VECT2FUNC(v), reg); + } + + if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) + wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0); + else + dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n"); + + if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) + wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0); + else + dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n"); } /** - * ice_mbx_vf_state_handler - Handle states of the overflow algorithm - * @hw: pointer to the HW struct - * @mbx_data: pointer to structure containing mailbox data - * @vf_id: relative virtual function (VF) ID - * @is_malvf: boolean output to indicate if VF is malicious + * ice_sriov_free_msix_res - Reset/free any used MSIX resources + * @pf: pointer to the PF structure * - * The function serves as an entry point for the malicious VF - * detection algorithm by handling the different states and state - * transitions of the algorithm: - * New snapshot: This state is entered when creating a new static - * snapshot. The data from any previous mailbox snapshot is - * cleared and a new capture of the mailbox head and tail is - * logged. This will be the new static snapshot to detect - * asynchronous messages sent by VFs. On capturing the snapshot - * and depending on whether the number of pending messages in that - * snapshot exceed the watermark value, the state machine enters - * traverse or detect states. - * Traverse: If pending message count is below watermark then iterate - * through the snapshot without any action on VF. - * Detect: If pending message count exceeds watermark traverse - * the static snapshot and look for a malicious VF. + * Since no MSIX entries are taken from the pf->irq_tracker then just clear + * the pf->sriov_base_vector. + * + * Returns 0 on success, and -EINVAL on error. */ -enum ice_status -ice_mbx_vf_state_handler(struct ice_hw *hw, - struct ice_mbx_data *mbx_data, u16 vf_id, - bool *is_malvf) +static int ice_sriov_free_msix_res(struct ice_pf *pf) { - struct ice_mbx_snapshot *snap = &hw->mbx_snapshot; - struct ice_mbx_snap_buffer_data *snap_buf; - struct ice_ctl_q_info *cq = &hw->mailboxq; - enum ice_mbx_snapshot_state new_state; - enum ice_status status = 0; + struct ice_res_tracker *res; - if (!is_malvf || !mbx_data) - return ICE_ERR_BAD_PTR; + if (!pf) + return -EINVAL; - /* When entering the mailbox state machine assume that the VF - * is not malicious until detected. + res = pf->irq_tracker; + if (!res) + return -EINVAL; + + /* give back irq_tracker resources used */ + WARN_ON(pf->sriov_base_vector < res->num_entries); + + pf->sriov_base_vector = 0; + + return 0; +} + +/** + * ice_free_vfs - Free all VFs + * @pf: pointer to the PF structure + */ +void ice_free_vfs(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + struct ice_vfs *vfs = &pf->vfs; + struct ice_hw *hw = &pf->hw; + struct ice_vf *vf; + unsigned int bkt; + + if (!ice_has_vfs(pf)) + return; + + while (test_and_set_bit(ICE_VF_DIS, pf->state)) + usleep_range(1000, 2000); + + /* Disable IOV before freeing resources. This lets any VF drivers + * running in the host get themselves cleaned up before we yank + * the carpet out from underneath their feet. */ - *is_malvf = false; + if (!pci_vfs_assigned(pf->pdev)) + pci_disable_sriov(pf->pdev); + else + dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); - /* Checking if max messages allowed to be processed while servicing current - * interrupt is not less than the defined AVF message threshold. - */ - if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD) - return ICE_ERR_INVAL_SIZE; + mutex_lock(&vfs->table_lock); - /* The watermark value should not be lesser than the threshold limit - * set for the number of asynchronous messages a VF can send to mailbox - * nor should it be greater than the maximum number of messages in the - * mailbox serviced in current interrupt. - */ - if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD || - mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx) - return ICE_ERR_PARAM; + ice_eswitch_release(pf); - new_state = ICE_MAL_VF_DETECT_STATE_INVALID; - snap_buf = &snap->mbx_buf; + if (ice_dcf_get_state(pf) != ICE_DCF_STATE_OFF) { + ice_rm_all_dcf_sw_rules(pf); + ice_dcf_set_state(pf, ICE_DCF_STATE_OFF); + pf->dcf.vf = NULL; + } - switch (snap_buf->state) { - case ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT: - /* Clear any previously held data in mailbox snapshot structure. */ - ice_mbx_reset_snapshot(snap); + ice_for_each_vf(pf, bkt, vf) { + u32 reg_idx, bit_idx; - /* Collect the pending ARQ count, number of messages processed and - * the maximum number of messages allowed to be processed from the - * Mailbox for current interrupt. - */ - snap_buf->num_pending_arq = mbx_data->num_pending_arq; - snap_buf->num_msg_proc = mbx_data->num_msg_proc; - snap_buf->max_num_msgs_mbx = mbx_data->max_num_msgs_mbx; + mutex_lock(&vf->cfg_lock); - /* Capture a new static snapshot of the mailbox by logging the - * head and tail of snapshot and set num_iterations to the tail - * value to mark the start of the iteration through the snapshot. - */ - snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean + - mbx_data->num_pending_arq); - snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1); - snap_buf->num_iterations = snap_buf->tail; + ice_dis_vf_qs(vf); - /* Pending ARQ messages returned by ice_clean_rq_elem - * is the difference between the head and tail of the - * mailbox queue. Comparing this value against the watermark - * helps to check if we potentially have malicious VFs. - */ - if (snap_buf->num_pending_arq >= - mbx_data->async_watermark_val) { - new_state = ICE_MAL_VF_DETECT_STATE_DETECT; - status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf); - } else { - new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE; - ice_mbx_traverse(hw, &new_state); + if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + u16 abs_vf_id = ice_abs_vf_id(hw, vf->vf_id); + struct iidc_core_dev_info *rcdi; + + /* disable VF qp mappings and set VF disable state */ + ice_dis_vf_mappings(vf); + set_bit(ICE_VF_STATE_DIS, vf->vf_states); + rcdi = ice_find_cdev_info_by_id(pf, IIDC_RDMA_ID); + ice_send_vf_reset_to_aux(rcdi, abs_vf_id); + ice_free_vf_res(vf); } - break; - case ICE_MAL_VF_DETECT_STATE_TRAVERSE: - new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE; - ice_mbx_traverse(hw, &new_state); - break; + /* If we disabled VFs, we need to acknowledge VFLR. Without + * this, the VF won't work properly when SR-IOV gets + * re-enabled. Don't acknowledge the VFLR if VFs are still + * assigned into guests. + */ + if (!pci_vfs_assigned(pf->pdev)) { + reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; + bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; + wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); + } - case ICE_MAL_VF_DETECT_STATE_DETECT: - new_state = ICE_MAL_VF_DETECT_STATE_DETECT; - status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf); - break; + /* clear malicious info since the VF is getting released */ + if (ice_mbx_clear_malvf(&hw->mbx_snapshot, vfs->malvfs, + ICE_MAX_SRIOV_VFS, vf->vf_id)) + dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", + vf->vf_id); + mutex_unlock(&vf->cfg_lock); + } + + if (ice_sriov_free_msix_res(pf)) + dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n"); + + vfs->num_qps_per = 0; + ice_free_vf_entries(pf); + + mutex_unlock(&vfs->table_lock); + + clear_bit(ICE_VF_DIS, pf->state); + clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags); +} + +/** + * ice_vf_vsi_setup - Set up a VF VSI + * @vf: VF to setup VSI for + * + * Returns pointer to the successfully allocated VSI struct on success, + * otherwise returns NULL on failure. + */ +static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf) +{ + struct ice_port_info *pi = ice_vf_get_port_info(vf); + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + + vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf, NULL, 0); + + if (!vsi) { + dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n"); + ice_vf_invalidate_vsi(vf); + return NULL; + } + + vf->lan_vsi_idx = vsi->idx; + vf->lan_vsi_num = vsi->vsi_num; + + return vsi; +} + +/** + * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space + * @pf: pointer to PF structure + * @vf: pointer to VF that the first MSIX vector index is being calculated for + * + * This returns the first MSIX vector index in PF space that is used by this VF. + * This index is used when accessing PF relative registers such as + * GLINT_VECT2FUNC and GLINT_DYN_CTL. + * This will always be the OICR index in the AVF driver so any functionality + * using vf->first_vector_idx for queue configuration will have to increment by + * 1 to avoid meddling with the OICR index. + */ +static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf) +{ + return pf->sriov_base_vector + vf->vf_id * pf->vfs.num_msix_per; +} + +/** + * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware + * @vf: VF to enable MSIX mappings for + * + * Some of the registers need to be indexed/configured using hardware global + * device values and other registers need 0-based values, which represent PF + * based values. + */ +static void ice_ena_vf_msix_mappings(struct ice_vf *vf) +{ + int device_based_first_msix, device_based_last_msix; + int pf_based_first_msix, pf_based_last_msix, v; + struct ice_pf *pf = vf->pf; + int device_based_vf_id; + struct ice_hw *hw; + u32 reg; + + hw = &pf->hw; + pf_based_first_msix = vf->first_vector_idx; + pf_based_last_msix = (pf_based_first_msix + pf->vfs.num_msix_per) - 1; + + device_based_first_msix = pf_based_first_msix + + pf->hw.func_caps.common_cap.msix_vector_first_id; + device_based_last_msix = + (device_based_first_msix + pf->vfs.num_msix_per) - 1; + device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + + reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) & + VPINT_ALLOC_FIRST_M) | + ((device_based_last_msix << VPINT_ALLOC_LAST_S) & + VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M); + wr32(hw, VPINT_ALLOC(vf->vf_id), reg); + + reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S) + & VPINT_ALLOC_PCI_FIRST_M) | + ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) & + VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M); + wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg); + + /* map the interrupts to its functions */ + for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) { + reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) & + GLINT_VECT2FUNC_VF_NUM_M) | + ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & + GLINT_VECT2FUNC_PF_NUM_M)); + wr32(hw, GLINT_VECT2FUNC(v), reg); + } + + /* Map mailbox interrupt to VF MSI-X vector 0 */ + wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M); +} + +/** + * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF + * @vf: VF to enable the mappings for + * @max_txq: max Tx queues allowed on the VF's VSI + * @max_rxq: max Rx queues allowed on the VF's VSI + */ +static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + struct ice_hw *hw = &vf->pf->hw; + u32 reg; + + if (WARN_ON(!vsi)) + return; + + /* set regardless of mapping mode */ + wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M); + + /* VF Tx queues allocation */ + if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) { + /* set the VF PF Tx queue range + * VFNUMQ value should be set to (number of queues - 1). A value + * of 0 means 1 queue and a value of 255 means 256 queues + */ + reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) & + VPLAN_TX_QBASE_VFFIRSTQ_M) | + (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) & + VPLAN_TX_QBASE_VFNUMQ_M)); + wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg); + } else { + dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n"); + } + + /* set regardless of mapping mode */ + wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M); + + /* VF Rx queues allocation */ + if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) { + /* set the VF PF Rx queue range + * VFNUMQ value should be set to (number of queues - 1). A value + * of 0 means 1 queue and a value of 255 means 256 queues + */ + reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) & + VPLAN_RX_QBASE_VFFIRSTQ_M) | + (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) & + VPLAN_RX_QBASE_VFNUMQ_M)); + wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg); + } else { + dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n"); + } +} + +/** + * ice_ena_vf_mappings - enable VF MSIX and queue mapping + * @vf: pointer to the VF structure + */ +static void ice_ena_vf_mappings(struct ice_vf *vf) +{ + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + u16 max_txq, max_rxq; + + if (WARN_ON(!vsi)) + return; + + ice_ena_vf_msix_mappings(vf); + + if (ice_is_vf_adq_ena(vf)) { + u16 offset, num_qps; + + offset = vf->ch[vf->num_tc - 1].offset; + num_qps = vf->ch[vf->num_tc - 1].num_qps; + max_txq = offset + num_qps; + max_rxq = offset + num_qps; + } else { + max_txq = vsi->alloc_txq; + max_rxq = vsi->alloc_rxq; + } + + ice_ena_vf_q_mappings(vf, max_txq, max_rxq); +} + +/** + * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space + * @vf: VF to calculate the register index for + * @q_vector: a q_vector associated to the VF + * @tc: Traffic class number for VF ADQ + */ +int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector, + u8 __maybe_unused tc) +{ + struct ice_pf *pf; + u32 reg_idx; + + if (!vf || !q_vector) + return -EINVAL; + + pf = vf->pf; + + /* always add one to account for the OICR being the first MSIX */ + reg_idx = pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id + + q_vector->v_idx + 1; + + if (tc && ice_is_vf_adq_ena(vf)) + return reg_idx + vf->ch[tc].offset; + else + return reg_idx; +} + +/** + * ice_get_max_valid_res_idx - Get the max valid resource index + * @res: pointer to the resource to find the max valid index for + * + * Start from the end of the ice_res_tracker and return right when we find the + * first res->list entry with the ICE_RES_VALID_BIT set. This function is only + * valid for SR-IOV because it is the only consumer that manipulates the + * res->end and this is always called when res->end is set to res->num_entries. + */ +static int ice_get_max_valid_res_idx(struct ice_res_tracker *res) +{ + int i; + + if (!res) + return -EINVAL; + + for (i = res->num_entries - 1; i >= 0; i--) + if (res->list[i] & ICE_RES_VALID_BIT) + return i; + + return 0; +} + +/** + * ice_sriov_set_msix_res - Set any used MSIX resources + * @pf: pointer to PF structure + * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs + * + * This function allows SR-IOV resources to be taken from the end of the PF's + * allowed HW MSIX vectors so that the irq_tracker will not be affected. We + * just set the pf->sriov_base_vector and return success. + * + * If there are not enough resources available, return an error. This should + * always be caught by ice_set_per_vf_res(). + * + * Return 0 on success, and -EINVAL when there are not enough MSIX vectors + * in the PF's space available for SR-IOV. + */ +static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) +{ + u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; + int vectors_used = pf->irq_tracker->num_entries; + int sriov_base_vector; + + sriov_base_vector = total_vectors - num_msix_needed; + + /* make sure we only grab irq_tracker entries from the list end and + * that we have enough available MSIX vectors + */ + if (sriov_base_vector < vectors_used) + return -EINVAL; + + pf->sriov_base_vector = sriov_base_vector; + + return 0; +} + +/** + * ice_set_per_vf_res - check if vectors and queues are available + * @pf: pointer to the PF structure + * @num_vfs: the number of SR-IOV VFs being configured + * + * First, determine HW interrupts from common pool. If we allocate fewer VFs, we + * get more vectors and can enable more queues per VF. Note that this does not + * grab any vectors from the SW pool already allocated. Also note, that all + * vector counts include one for each VF's miscellaneous interrupt vector + * (i.e. OICR). + * + * Minimum VFs - 2 vectors, 1 queue pair + * Small VFs - 5 vectors, 4 queue pairs + * Medium VFs - 17 vectors, 16 queue pairs + * + * While more vectors can be assigned to a VF, the RSS LUT + * is only 4 bits wide, so we can only do 16 queues of RSS + * per VF. + * + * ADQ sizes: + * Small ADQ VFs - 5 vectors, 4 TCs, 16 queue pairs (4 queue pairs/int) + * Medium ADQ VFs - 17 vectors, 4 TCs, 16 queue pairs (1 queue pairs/int) + * + * Second, determine number of queue pairs per VF by starting with a pre-defined + * maximum each VF supports. If this is not possible, then we adjust based on + * queue pairs available on the device. + * + * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used + * by each VF during VF initialization and reset. + */ +static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) +{ + int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker); + u16 num_msix_per_vf, num_txq, num_rxq, avail_qs; + int msix_avail_per_vf, msix_avail_for_sriov; + struct device *dev = ice_pf_to_dev(pf); + int err; + + lockdep_assert_held(&pf->vfs.table_lock); + + if (!num_vfs) + return -EINVAL; + + if (max_valid_res_idx < 0) + return -ENOSPC; + + /* determine MSI-X resources per VF */ + msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors - + pf->irq_tracker->num_entries; + msix_avail_per_vf = msix_avail_for_sriov / num_vfs; + if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MAX) { + num_msix_per_vf = ICE_NUM_VF_MSIX_MAX; + } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_LARGE) { + num_msix_per_vf = ICE_NUM_VF_MSIX_LARGE; + } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) { + num_msix_per_vf = ICE_NUM_VF_MSIX_MED; + } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) { + num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL; + } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) { + num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN; + } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) { + num_msix_per_vf = ICE_MIN_INTR_PER_VF; + } else { + dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n", + msix_avail_for_sriov, ICE_MIN_INTR_PER_VF, + num_vfs); + return -ENOSPC; + } + + num_txq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF, + ICE_MAX_DFLT_QS_PER_VF); + avail_qs = ice_get_avail_txq_count(pf) / num_vfs; + if (!avail_qs) + num_txq = 0; + else if (num_txq > avail_qs) + num_txq = rounddown_pow_of_two(avail_qs); + + num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF, + ICE_MAX_DFLT_QS_PER_VF); + avail_qs = ice_get_avail_rxq_count(pf) / num_vfs; + if (!avail_qs) + num_rxq = 0; + else if (num_rxq > avail_qs) + num_rxq = rounddown_pow_of_two(avail_qs); + + if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) { + dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n", + ICE_MIN_QS_PER_VF, num_vfs); + return -ENOSPC; + } + + err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs); + if (err) { + dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n", + num_vfs, err); + return err; + } + + /* only allow equal Tx/Rx queue count (i.e. queue pairs) */ + pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq); + pf->vfs.num_msix_per = num_msix_per_vf; + dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n", + num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per); + + return 0; +} + +/** + * ice_init_vf_vsi_res - initialize/setup VF VSI resources + * @vf: VF to initialize/setup the VSI for + * + * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the + * VF VSI's broadcast filter and is only used during initial VF creation. + */ +static int ice_init_vf_vsi_res(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + int err; + + vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf); + + vsi = ice_vf_vsi_setup(vf); + if (!vsi) + return -ENOMEM; + + err = ice_vf_init_host_cfg(vf, vsi); + if (err) + goto release_vsi; + + return 0; + +release_vsi: + ice_vf_vsi_release(vf); + return err; +} + +/** + * ice_start_vfs - start VFs so they are ready to be used by SR-IOV + * @pf: PF the VFs are associated with + */ +static int ice_start_vfs(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + unsigned int bkt, it_cnt; + struct ice_vf *vf; + int retval; + + lockdep_assert_held(&pf->vfs.table_lock); + + it_cnt = 0; + ice_for_each_vf(pf, bkt, vf) { + vf->vf_ops->clear_reset_trigger(vf); + + retval = ice_init_vf_vsi_res(vf); + if (retval) { + ice_dev_err_errno(ice_pf_to_dev(pf), retval, + "Failed to initialize VSI resources for VF %d", + vf->vf_id); + goto teardown; + } + + set_bit(ICE_VF_STATE_INIT, vf->vf_states); + ice_ena_vf_mappings(vf); + wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); + it_cnt++; + } + + ice_flush(hw); + return 0; + +teardown: + ice_for_each_vf(pf, bkt, vf) { + if (it_cnt == 0) + break; + + ice_dis_vf_mappings(vf); + ice_vf_vsi_release(vf); + it_cnt--; + } + + return retval; +} + +/** + * ice_sriov_free_vf - Free VF memory after all references are dropped + * @vf: pointer to VF to free + * + * Called by ice_put_vf through ice_release_vf once the last reference to a VF + * structure has been dropped. + */ +static void ice_sriov_free_vf(struct ice_vf *vf) +{ + mutex_destroy(&vf->cfg_lock); + + kfree_rcu(vf, rcu); +} + +/** + * ice_sriov_clear_mbx_register - clears SRIOV VF's mailbox registers + * @vf: the vf to configure + */ +static void ice_sriov_clear_mbx_register(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + + wr32(&pf->hw, VF_MBX_ARQLEN(vf->vf_id), 0); + wr32(&pf->hw, VF_MBX_ATQLEN(vf->vf_id), 0); +} + +/** + * ice_sriov_trigger_reset_register - trigger VF reset for SRIOV VF + * @vf: pointer to VF structure + * @is_vflr: true if reset occurred due to VFLR + * + * Trigger and cleanup after a VF reset for a SR-IOV VF. + */ +static void ice_sriov_trigger_reset_register(struct ice_vf *vf, bool is_vflr) +{ + struct ice_pf *pf = vf->pf; + u32 reg, reg_idx, bit_idx; + unsigned int vf_abs_id, i; + struct device *dev; + struct ice_hw *hw; + + dev = ice_pf_to_dev(pf); + hw = &pf->hw; + vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; + + /* In the case of a VFLR, HW has already reset the VF and we just need + * to clean up. Otherwise we must first trigger the reset using the + * VFRTRIG register. + */ + if (!is_vflr) { + reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); + reg |= VPGEN_VFRTRIG_VFSWR_M; + wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); + } + + /* clear the VFLR bit in GLGEN_VFLRSTAT */ + reg_idx = (vf_abs_id) / 32; + bit_idx = (vf_abs_id) % 32; + wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); + ice_flush(hw); + + wr32(hw, PF_PCI_CIAA, + VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S)); + for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) { + reg = rd32(hw, PF_PCI_CIAD); + /* no transactions pending so stop polling */ + if ((reg & VF_TRANS_PENDING_M) == 0) + break; + + dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id); + udelay(ICE_PCI_CIAD_WAIT_DELAY_US); + } +} + +/** + * ice_sriov_poll_reset_status - poll SRIOV VF reset status + * @vf: pointer to VF structure + * + * Returns true when reset is successful, else returns false + */ +static bool ice_sriov_poll_reset_status(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + unsigned int i; + u32 reg; + + for (i = 0; i < 10; i++) { + /* VF reset requires driver to first reset the VF and then + * poll the status register to make sure that the reset + * completed successfully. + */ + reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id)); + if (reg & VPGEN_VFRSTAT_VFRD_M) + return true; + + /* only sleep if the reset is not done */ + usleep_range(10, 20); + } + return false; +} + +/** + * ice_sriov_clear_reset_trigger - enable VF to access hardware + * @vf: VF to enabled hardware access for + */ +static void ice_sriov_clear_reset_trigger(struct ice_vf *vf) +{ + struct ice_hw *hw = &vf->pf->hw; + u32 reg; + + reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); + reg &= ~VPGEN_VFRTRIG_VFSWR_M; + wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); + ice_flush(hw); +} + +/** + * ice_sriov_create_vsi - Create a new VSI for a VF + * @vf: VF to create the VSI for + * + * This is called by ice_vf_recreate_vsi to create the new VSI after the old + * VSI has been released. + */ +static int ice_sriov_create_vsi(struct ice_vf *vf) +{ + struct ice_vsi *vsi; + + vsi = ice_vf_vsi_setup(vf); + if (!vsi) + return -ENOMEM; + + return 0; +} + +/** + * ice_sriov_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt + * @vf: VF to perform tasks on + */ +static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf) +{ + ice_vf_rebuild_adq_host_cfg(vf); + + ice_ena_vf_mappings(vf); + wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); +} + +static struct ice_q_vector *ice_sriov_get_q_vector(struct ice_vf *vf, + struct ice_vsi *vsi, + u16 vector_id) +{ + if (!vsi || !vsi->q_vectors) + return NULL; + + /* Subtract non queue vector from vector_id passed by VF + * to get actual number of VSI queue vector array index + */ + return vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF]; +} + +/** + * ice_sriov_get_glint_ceqctl_idx - get GLINT_CEQCTL index relative to the PF + * @vf: VF used to get the index + * @ceq_idx: 0-based index from the VF + * + * Use the VF relative (0-based) CEQ index plus the first PF MSI-X index + * assigned to this VF (relative to the PF's MSIX space) to determine the + * index of the GLINT_CEQCTL register + */ +static u16 ice_sriov_get_glint_ceqctl_idx(struct ice_vf *vf, u16 ceq_idx) +{ + return vf->first_vector_idx + ceq_idx; +} + +/** + * ice_sriov_clear_ceq_irq_map - clear the CEQ IRQ mapping + * @vf: VF used to clear the mapping + * @ceq_idx: VF relative (0-based) CEQ index + */ +static void ice_sriov_clear_ceq_irq_map(struct ice_vf *vf, u16 ceq_idx) +{ + u16 glint_ceqctl_idx = ice_sriov_get_glint_ceqctl_idx(vf, ceq_idx); + + wr32(&vf->pf->hw, GLINT_CEQCTL(glint_ceqctl_idx), 0); +} + +/** + * ice_sriov_clear_aeq_irq_map - clear the AEQ IRQ mapping + * @vf: VF used to clear the mapping + */ +static void ice_sriov_clear_aeq_irq_map(struct ice_vf *vf) +{ + wr32(&vf->pf->hw, VPINT_AEQCTL(vf->vf_id), 0); +} + +/** + * ice_sriov_clear_rdma_irq_map - clear the RDMA IRQ mapping + * @vf: VF used to clear the mapping + * + * Clear any RDMA IRQ mapping that a VF might have requested. Since the number + * of CEQ indices are never greater than the num_msix_per_vf just clear all CEQ + * indices that are possibly associated to this VF. Also clear the AEQ for this + * VF. Doing it this way prevents the need to cache the configuration received + * on VIRTCHNL_OP_CONFIG_RMDA_IRQ_MAP since VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP is + * designed to clear the entire RDMA IRQ mapping configuration. + */ +static void ice_sriov_clear_rdma_irq_map(struct ice_vf *vf) +{ + u16 i; + + for (i = 0; i < vf->pf->vfs.num_msix_per; i++) + ice_sriov_clear_ceq_irq_map(vf, i); + + ice_sriov_clear_aeq_irq_map(vf); +} + +/** + * ice_sriov_cfg_rdma_ceq_irq_map - configure the CEQ IRQ mapping + * @vf: VF structure associated to the VF that requested the mapping + * @qv_info: RDMA queue vector mapping information + * + * Configure the CEQ index for the passed in VF. This will result in the CEQ + * being able to generate interrupts + */ +static void +ice_sriov_cfg_rdma_ceq_irq_map(struct ice_vf *vf, + struct virtchnl_rdma_qv_info *qv_info) +{ + u16 glint_ceqctl_idx = ice_sriov_get_glint_ceqctl_idx(vf, + qv_info->ceq_idx); + + u32 regval = (qv_info->v_idx & GLINT_CEQCTL_MSIX_INDX_M) | + ((qv_info->itr_idx << GLINT_CEQCTL_ITR_INDX_S) & + GLINT_CEQCTL_ITR_INDX_M) | GLINT_CEQCTL_CAUSE_ENA_M; + + wr32(&vf->pf->hw, GLINT_CEQCTL(glint_ceqctl_idx), regval); +} + +/** + * ice_sriov_cfg_rdma_aeq_irq_map - configure the AEQ IRQ mapping + * @vf: VF structure associated to the VF that requested the mapping + * @qv_info: RDMA queue vector mapping information + * + * Configure the AEQ for the passed in VF. This will result in the AEQ being + * able to generate interrupts + */ +static void +ice_sriov_cfg_rdma_aeq_irq_map(struct ice_vf *vf, + struct virtchnl_rdma_qv_info *qv_info) +{ + u32 regval = (qv_info->v_idx & PFINT_AEQCTL_MSIX_INDX_M) | + ((qv_info->itr_idx << VPINT_AEQCTL_ITR_INDX_S) & + VPINT_AEQCTL_ITR_INDX_M) | VPINT_AEQCTL_CAUSE_ENA_M; + + wr32(&vf->pf->hw, VPINT_AEQCTL(vf->vf_id), regval); +} + +/** + * ice_sriov_cfg_rdma_irq_map - Configure RDMA IRQ mappings + * @vf: VF structure associated to the VF that requested the mapping + * @qv_info: RDMA queue vector mapping information + * + * Configure the AEQ and CEQ IRQ mappings for an SRIOV VF. + */ +static void +ice_sriov_cfg_rdma_irq_map(struct ice_vf *vf, + struct virtchnl_rdma_qv_info *qv_info) +{ + if (qv_info->ceq_idx != VIRTCHNL_RDMA_INVALID_QUEUE_IDX) + ice_sriov_cfg_rdma_ceq_irq_map(vf, qv_info); + + if (qv_info->aeq_idx != VIRTCHNL_RDMA_INVALID_QUEUE_IDX) + ice_sriov_cfg_rdma_aeq_irq_map(vf, qv_info); +} + +static const struct ice_vf_ops ice_sriov_vf_ops = { + .reset_type = ICE_VF_RESET, + .free = ice_sriov_free_vf, + .clear_mbx_register = ice_sriov_clear_mbx_register, + .trigger_reset_register = ice_sriov_trigger_reset_register, + .poll_reset_status = ice_sriov_poll_reset_status, + .clear_reset_trigger = ice_sriov_clear_reset_trigger, + .irq_close = NULL, + .create_vsi = ice_sriov_create_vsi, + .post_vsi_rebuild = ice_sriov_post_vsi_rebuild, + .get_q_vector = ice_sriov_get_q_vector, + .cfg_rdma_irq_map = ice_sriov_cfg_rdma_irq_map, + .clear_rdma_irq_map = ice_sriov_clear_rdma_irq_map, +}; + +/** + * ice_create_vf_entries - Allocate and insert VF entries + * @pf: pointer to the PF structure + * @num_vfs: the number of VFs to allocate + * + * Allocate new VF entries and insert them into the hash table. Set some + * basic default fields for initializing the new VFs. + * + * After this function exits, the hash table will have num_vfs entries + * inserted. + * + * Returns 0 on success or an integer error code on failure. + */ +static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs) +{ + struct device *dev = ice_pf_to_dev(pf); + struct ice_vfs *vfs = &pf->vfs; + struct ice_vf *vf; + u16 vf_id; + int err; + + lockdep_assert_held(&vfs->table_lock); + + for (vf_id = 0; vf_id < num_vfs; vf_id++) { + vf = kzalloc(sizeof(*vf), GFP_KERNEL); + if (!vf) { + err = -ENOMEM; + goto err_free_entries; + } + kref_init(&vf->refcnt); + + vf->pf = pf; + vf->vf_id = vf_id; + + /* set sriov vf ops for VFs created during SRIOV flow */ + vf->vf_ops = &ice_sriov_vf_ops; + + err = ice_initialize_vf_entry(vf); + if (err) { + dev_err(dev, "Failed to initialize the VF entry for SRIOV VF\n"); + goto err_free_entries; + } + + vf->vf_sw_id = pf->first_sw; + + hash_add_rcu(vfs->table, &vf->entry, vf_id); + } + return 0; + +err_free_entries: + ice_free_vf_entries(pf); + return err; +} + +/** + * ice_ena_vfs - enable VFs so they are ready to be used + * @pf: pointer to the PF structure + * @num_vfs: number of VFs to enable + */ +static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) +{ + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + int ret; + + /* Disable global interrupt 0 so we don't try to handle the VFLR. */ + wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), + ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); + set_bit(ICE_OICR_INTR_DIS, pf->state); + ice_flush(hw); + + ret = pci_enable_sriov(pf->pdev, num_vfs); + if (ret) + goto err_unroll_intr; + + mutex_lock(&pf->vfs.table_lock); + + ice_dcf_init_sw_rule_mgmt(pf); + + ret = ice_set_per_vf_res(pf, num_vfs); + if (ret) { + dev_err(dev, "Not enough resources for %d VFs, err %d. Try with fewer number of VFs\n", + num_vfs, ret); + goto err_unroll_sriov; + } + + ret = ice_create_vf_entries(pf, num_vfs); + if (ret) { + dev_err(dev, "Failed to allocate VF entries for %d VFs\n", + num_vfs); + goto err_unroll_sriov; + } + + ret = ice_start_vfs(pf); + if (ret) { + dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret); + ret = -EAGAIN; + goto err_unroll_vf_entries; + } + + clear_bit(ICE_VF_DIS, pf->state); + + ret = ice_eswitch_configure(pf); + if (ret) { + dev_err(dev, "Failed to configure eswitch, err %d\n", ret); + goto err_unroll_vf_entries; + } + + /* rearm global interrupts */ + if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state)) + ice_irq_dynamic_ena(hw, NULL, NULL); + + mutex_unlock(&pf->vfs.table_lock); + + return 0; + +err_unroll_vf_entries: + ice_free_vf_entries(pf); +err_unroll_sriov: + mutex_unlock(&pf->vfs.table_lock); + pci_disable_sriov(pf->pdev); +err_unroll_intr: + /* rearm interrupts here */ + ice_irq_dynamic_ena(hw, NULL, NULL); + clear_bit(ICE_OICR_INTR_DIS, pf->state); + return ret; +} + +/** + * ice_pci_sriov_ena - Enable or change number of VFs + * @pf: pointer to the PF structure + * @num_vfs: number of VFs to allocate + * + * Returns 0 on success and negative on failure + */ +static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) +{ + int pre_existing_vfs = pci_num_vf(pf->pdev); + struct device *dev = ice_pf_to_dev(pf); + int err; + + if (pre_existing_vfs && pre_existing_vfs != num_vfs) + ice_free_vfs(pf); + else if (pre_existing_vfs && pre_existing_vfs == num_vfs) + return 0; + + if (num_vfs > pf->vfs.num_supported) { + dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n", + num_vfs, pf->vfs.num_supported); + return -EOPNOTSUPP; + } + + dev_info(dev, "Enabling %d VFs\n", num_vfs); + err = ice_ena_vfs(pf, num_vfs); + if (err) { + ice_dev_err_errno(dev, err, "Failed to enable SR-IOV"); + return err; + } + + set_bit(ICE_FLAG_SRIOV_ENA, pf->flags); + return 0; +} + +/** + * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks + * @pf: PF to enabled SR-IOV on + */ +static int ice_check_sriov_allowed(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + + if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) { + dev_err(dev, "This device is not capable of SR-IOV\n"); + return -EOPNOTSUPP; + } + + if (test_bit(ICE_RECOVERY_MODE, pf->state)) { + dev_err(dev, "SR-IOV cannot be configured - Device is in Recovery Mode\n"); + return -EOPNOTSUPP; + } + + if (ice_is_safe_mode(pf)) { + dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n"); + return -EOPNOTSUPP; + } + + if (!ice_pf_state_is_nominal(pf)) { + dev_err(dev, "Cannot enable SR-IOV, device not ready\n"); + return -EBUSY; + } + + return 0; +} + +/** + * ice_sriov_configure - Enable or change number of VFs via sysfs + * @pdev: pointer to a pci_dev structure + * @num_vfs: number of VFs to allocate or 0 to free VFs + * + * This function is called when the user updates the number of VFs in sysfs. On + * success return whatever num_vfs was set to by the caller. Return negative on + * failure. + */ +int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct ice_pf *pf = pci_get_drvdata(pdev); + struct device *dev = ice_pf_to_dev(pf); + int err; + + err = ice_check_sriov_allowed(pf); + if (err) + return err; + + if (!num_vfs) { + if (!pci_vfs_assigned(pdev)) { + ice_free_vfs(pf); + ice_mbx_deinit_snapshot(&pf->hw); +#ifdef HAVE_NETDEV_UPPER_INFO + if (pf->lag) + ice_enable_lag(pf->lag); +#endif /* HAVE_NETDEV_UPPER_INFO */ + return 0; + } + + dev_err(dev, "can't free VFs because some are assigned to VMs.\n"); + return -EBUSY; + } + + err = ice_mbx_init_snapshot(&pf->hw, num_vfs); + if (err) + return err; + + err = ice_pci_sriov_ena(pf, num_vfs); + if (err) { + ice_mbx_deinit_snapshot(&pf->hw); + return err; + } + +#ifdef HAVE_NETDEV_UPPER_INFO + if (pf->lag) + ice_disable_lag(pf->lag); +#endif /* HAVE_NETDEV_UPPER_INFO */ + return num_vfs; +} + +/** + * ice_process_vflr_event - Free VF resources via IRQ calls + * @pf: pointer to the PF structure + * + * called from the VFLR IRQ handler to + * free up VF resources and state variables + */ +void ice_process_vflr_event(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + struct ice_vf *vf; + unsigned int bkt; + u32 reg; + + if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) || + !ice_has_vfs(pf)) + return; + + mutex_lock(&pf->vfs.table_lock); + ice_for_each_vf(pf, bkt, vf) { + u32 reg_idx, bit_idx; + + reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; + bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; + /* read GLGEN_VFLRSTAT register to find out the flr VFs */ + reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx)); + if (reg & BIT(bit_idx)) + /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */ + ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK); + } + mutex_unlock(&pf->vfs.table_lock); +} + +/** + * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in + * @pf: PF used to index all VFs + * @pfq: queue index relative to the PF's function space + * + * If no VF is found who owns the pfq then return NULL, otherwise return a + * pointer to the VF who owns the pfq + * + * If this function returns non-NULL, it acquires a reference count of the VF + * structure. The caller is responsible for calling ice_put_vf() to drop this + * reference. + */ +static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq) +{ + struct ice_vf *vf; + unsigned int bkt; + + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) { + struct ice_vsi *vsi; + u16 rxq_idx; + + vsi = ice_get_vf_vsi(vf); + if (!vsi) + continue; + + ice_for_each_rxq(vsi, rxq_idx) + if (vsi->rxq_map[rxq_idx] == pfq) { + struct ice_vf *found; + + if (kref_get_unless_zero(&vf->refcnt)) + found = vf; + else + found = NULL; + rcu_read_unlock(); + return found; + } + } + rcu_read_unlock(); + + return NULL; +} + +/** + * ice_globalq_to_pfq - convert from global queue index to PF space queue index + * @pf: PF used for conversion + * @globalq: global queue index used to convert to PF space queue index + */ +static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq) +{ + return globalq - pf->hw.func_caps.common_cap.rxq_first_id; +} + +/** + * ice_vf_lan_overflow_event - handle LAN overflow event for a VF + * @pf: PF that the LAN overflow event happened on + * @event: structure holding the event information for the LAN overflow event + * + * Determine if the LAN overflow event was caused by a VF queue. If it was not + * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a + * reset on the offending VF. + */ +void +ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) +{ + u32 gldcb_rtctq, queue; + struct ice_vf *vf; + + gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq); + dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq); + + /* event returns device global Rx queue number */ + queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >> + GLDCB_RTCTQ_RXQNUM_S; + + vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue)); + if (!vf) + return; + + ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK); + ice_put_vf(vf); +} + +/** + * ice_set_vf_spoofchk + * @netdev: network interface device structure + * @vf_id: VF identifier + * @ena: flag to enable or disable feature + * + * Enable or disable VF spoof checking + */ +int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_pf *pf = np->vsi->back; + struct ice_vsi *vf_vsi; + struct device *dev; + struct ice_vf *vf; + int ret; + + dev = ice_pf_to_dev(pf); + + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) + return -EINVAL; + + ret = ice_check_vf_ready_for_cfg(vf); + if (ret) + goto out_put_vf; + + vf_vsi = ice_get_vf_vsi(vf); + if (!vf_vsi) { + netdev_err(netdev, "VSI %d for VF %d is null\n", + vf->lan_vsi_idx, vf->vf_id); + ret = -EINVAL; + goto out_put_vf; + } + + if (vf_vsi->type != ICE_VSI_VF) { + netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n", + vf_vsi->type, vf_vsi->vsi_num, vf->vf_id); + ret = -ENODEV; + goto out_put_vf; + } + + if (ena == vf->spoofchk) { + dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF"); + ret = 0; + goto out_put_vf; + } + + ret = ice_vsi_apply_spoofchk(vf_vsi, ena); + if (ret) + ice_dev_err_errno(dev, ret, + "Failed to set spoofchk %s for VF %d VSI %d", + ena ? "ON" : "OFF", vf->vf_id, + vf_vsi->vsi_num); + else + vf->spoofchk = ena; + +out_put_vf: + ice_put_vf(vf); + return ret; +} + +/** + * ice_get_vf_cfg + * @netdev: network interface device structure + * @vf_id: VF identifier + * @ivi: VF configuration structure + * + * return VF configuration + */ +int +ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_vf *vf; + int ret; + + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) + return -EINVAL; + + ret = ice_check_vf_ready_for_cfg(vf); + if (ret) + goto out_put_vf; + + ivi->vf = vf_id; + ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr); + + /* VF configuration for VLAN and applicable QoS */ + ivi->vlan = ice_vf_get_port_vlan_id(vf); + ivi->qos = ice_vf_get_port_vlan_prio(vf); +#ifdef IFLA_VF_VLAN_INFO_MAX + if (ice_vf_is_port_vlan_ena(vf)) + ivi->vlan_proto = cpu_to_be16(ice_vf_get_port_vlan_tpid(vf)); +#endif /* IFLA_VF_VLAN_INFO_MAX */ + +#ifdef HAVE_NDO_SET_VF_TRUST + ivi->trusted = vf->trusted; +#endif /* HAVE_NDO_SET_VF_TRUST */ + ivi->spoofchk = vf->spoofchk; +#ifdef HAVE_NDO_SET_VF_LINK_STATE + if (!vf->link_forced) + ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; + else if (vf->link_up) + ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; + else + ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; +#endif +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + ivi->max_tx_rate = vf->max_tx_rate; + ivi->min_tx_rate = vf->min_tx_rate; +#else + ivi->tx_rate = vf->max_tx_rate; +#endif + +out_put_vf: + ice_put_vf(vf); + return ret; +} + +/** + * ice_set_vf_mac + * @netdev: network interface device structure + * @vf_id: VF identifier + * @mac: MAC address + * + * program VF MAC address + */ +int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_vf *vf; + int ret; + + if (is_multicast_ether_addr(mac)) { + netdev_err(netdev, "%pM not a valid unicast address\n", mac); + return -EINVAL; + } + + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) + return -EINVAL; + + /* nothing left to do, unicast MAC already set */ + if (ether_addr_equal(vf->dev_lan_addr.addr, mac) && + ether_addr_equal(vf->hw_lan_addr.addr, mac)) { + ret = 0; + goto out_put_vf; + } + + ret = ice_check_vf_ready_for_cfg(vf); + if (ret) + goto out_put_vf; + + if (ice_vf_chnl_dmac_fltr_cnt(vf)) { + netdev_err(netdev, + "can't set mac %pM. VF %d has tc-flower filters, delete them and try again\n", + mac, vf_id); + ret = -EAGAIN; + goto out_put_vf; + } + + mutex_lock(&vf->cfg_lock); + + /* VF is notified of its new MAC via the PF's response to the + * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset + */ + ether_addr_copy(vf->dev_lan_addr.addr, mac); + ether_addr_copy(vf->hw_lan_addr.addr, mac); + if (is_zero_ether_addr(mac)) { + /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */ + vf->pf_set_mac = false; + netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n", + vf->vf_id); + } else { + /* PF will add MAC rule for the VF */ + vf->pf_set_mac = true; + netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n", + mac, vf_id); + } + + ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); + mutex_unlock(&vf->cfg_lock); + +out_put_vf: + ice_put_vf(vf); + return ret; +} + +#ifdef HAVE_NDO_SET_VF_TRUST +/** + * ice_set_vf_trust + * @netdev: network interface device structure + * @vf_id: VF identifier + * @trusted: Boolean value to enable/disable trusted VF + * + * Enable or disable a given VF as trusted + */ +int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_vf *vf; + int ret; + + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) + return -EINVAL; + + if (ice_is_eswitch_mode_switchdev(pf)) { + dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n"); + return -EOPNOTSUPP; + } + + ret = ice_check_vf_ready_for_cfg(vf); + if (ret) + goto out_put_vf; + + /* Check if already trusted */ + if (trusted == vf->trusted) { + ret = 0; + goto out_put_vf; + } + + mutex_lock(&vf->cfg_lock); + + /* If the trust mode of a given DCF is taken away without the DCF + * gracefully relinquishing the DCF functionality, remove ALL switch + * filters that were added by the DCF and treat this VF as any other + * untrusted AVF. + */ + if (ice_is_vf_dcf(vf) && !trusted && + ice_dcf_get_state(pf) != ICE_DCF_STATE_OFF) { + ice_rm_all_dcf_sw_rules(pf); + ice_clear_dcf_acl_cfg(pf); + ice_clear_dcf_udp_tunnel_cfg(pf); + pf->hw.dcf_caps &= ~(DCF_ACL_CAP | DCF_UDP_TUNNEL_CAP); + ice_dcf_set_state(pf, ICE_DCF_STATE_OFF); + pf->dcf.vf = NULL; + vf->driver_caps &= ~VIRTCHNL_VF_CAP_DCF; + } + + vf->trusted = trusted; + ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); + dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n", + vf_id, trusted ? "" : "un"); + + mutex_unlock(&vf->cfg_lock); + +out_put_vf: + ice_put_vf(vf); + return ret; +} + +#endif /* HAVE_NDO_SET_VF_TRUST */ +#ifdef HAVE_NDO_SET_VF_LINK_STATE +/** + * ice_set_vf_link_state + * @netdev: network interface device structure + * @vf_id: VF identifier + * @link_state: required link state + * + * Set VF's link state, irrespective of physical link state status + */ +int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_vf *vf; + int ret; + + /* disallow link state change if eeprom is corrupted */ + if (test_bit(ICE_BAD_EEPROM, pf->state)) + return -EOPNOTSUPP; + + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) + return -EINVAL; + + ret = ice_check_vf_ready_for_cfg(vf); + if (ret) + goto out_put_vf; + + switch (link_state) { + case IFLA_VF_LINK_STATE_AUTO: + vf->link_forced = false; + break; + case IFLA_VF_LINK_STATE_ENABLE: + vf->link_forced = true; + vf->link_up = true; + break; + case IFLA_VF_LINK_STATE_DISABLE: + vf->link_forced = true; + vf->link_up = false; + break; default: - new_state = ICE_MAL_VF_DETECT_STATE_INVALID; - status = ICE_ERR_CFG; + ret = -EINVAL; + goto out_put_vf; } - snap_buf->state = new_state; + if (vf->repr) { + struct net_device *pr_netdev = vf->repr->netdev; + unsigned int flags = pr_netdev->flags; - return status; -} - -/** - * ice_mbx_report_malvf - Track and note malicious VF - * @hw: pointer to the HW struct - * @all_malvfs: all malicious VFs tracked by PF - * @bitmap_len: length of bitmap in bits - * @vf_id: relative virtual function ID of the malicious VF - * @report_malvf: boolean to indicate if malicious VF must be reported - * - * This function will update a bitmap that keeps track of the malicious - * VFs attached to the PF. A malicious VF must be reported only once if - * discovered between VF resets or loading so the function checks - * the input vf_id against the bitmap to verify if the VF has been - * detected in any previous mailbox iterations. - */ -enum ice_status -ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs, - u16 bitmap_len, u16 vf_id, bool *report_malvf) -{ - if (!all_malvfs || !report_malvf) - return ICE_ERR_PARAM; - - *report_malvf = false; - - if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len) - return ICE_ERR_INVAL_SIZE; - - if (vf_id >= bitmap_len) - return ICE_ERR_OUT_OF_RANGE; - - /* If the vf_id is found in the bitmap set bit and boolean to true */ - if (!test_bit(vf_id, all_malvfs)) { - set_bit(vf_id, all_malvfs); - *report_malvf = true; + flags = vf->link_up ? flags | IFF_UP : flags & ~IFF_UP; + dev_change_flags(pr_netdev, flags, NULL); } - return 0; + ice_vc_notify_vf_link_state(vf); + +out_put_vf: + ice_put_vf(vf); + return ret; +} +#endif /* HAVE_NDO_SET_VF_LINK_STATE */ + +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +/** + * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs + * @pf: PF associated with VFs + */ +static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf) +{ + struct ice_vf *vf; + unsigned int bkt; + int rate = 0; + + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) + rate += vf->min_tx_rate; + rcu_read_unlock(); + + return rate; } /** - * ice_mbx_clear_malvf - Clear VF bitmap and counter for VF ID - * @snap: pointer to the mailbox snapshot structure - * @all_malvfs: all malicious VFs tracked by PF - * @bitmap_len: length of bitmap in bits - * @vf_id: relative virtual function ID of the malicious VF + * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription + * @vf: VF trying to configure min_tx_rate + * @min_tx_rate: min Tx rate in Mbps * - * In case of a VF reset, this function can be called to clear - * the bit corresponding to the VF ID in the bitmap tracking all - * malicious VFs attached to the PF. The function also clears the - * VF counter array at the index of the VF ID. This is to ensure - * that the new VF loaded is not considered malicious before going - * through the overflow detection algorithm. + * Check if the min_tx_rate being passed in will cause oversubscription of total + * min_tx_rate based on the current link speed and all other VFs configured + * min_tx_rate + * + * Return true if the passed min_tx_rate would cause oversubscription, else + * return false */ -enum ice_status -ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs, - u16 bitmap_len, u16 vf_id) +static bool +ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate) { - if (!snap || !all_malvfs) - return ICE_ERR_PARAM; + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + int all_vfs_min_tx_rate; + int link_speed_mbps; - if (bitmap_len < snap->mbx_vf.vfcntr_len) - return ICE_ERR_INVAL_SIZE; + if (WARN_ON(!vsi)) + return false; - /* Ensure VF ID value is not larger than bitmap or VF counter length */ - if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len) - return ICE_ERR_OUT_OF_RANGE; + link_speed_mbps = ice_get_link_speed_mbps(vsi); + all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf); - /* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */ - clear_bit(vf_id, all_malvfs); + /* this VF's previous rate is being overwritten */ + all_vfs_min_tx_rate -= vf->min_tx_rate; - /* Clear the VF counter in the mailbox snapshot structure for that VF ID. - * This is to ensure that if a VF is unloaded and a new one brought back - * up with the same VF ID for a snapshot currently in traversal or detect - * state the counter for that VF ID does not increment on top of existing - * values in the mailbox overflow detection algorithm. + if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) { + dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n", + min_tx_rate, vf->vf_id, + all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps, + link_speed_mbps); + return true; + } + + return false; +} +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ + +/** + * ice_set_vf_bw - set min/max VF bandwidth + * @netdev: network interface device structure + * @vf_id: VF identifier + * @min_tx_rate: Minimum Tx rate in Mbps + * @max_tx_rate: Maximum Tx rate in Mbps + */ +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +int +ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, + int max_tx_rate) +#else +int ice_set_vf_bw(struct net_device *netdev, int vf_id, int max_tx_rate) +#endif +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_vsi *vsi; + struct device *dev; + struct ice_vf *vf; + int ret; + + dev = ice_pf_to_dev(pf); + + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) + return -EINVAL; + + ret = ice_check_vf_ready_for_cfg(vf); + if (ret) + goto out_put_vf; + + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + ret = -EINVAL; + goto out_put_vf; + } + +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + /* when max_tx_rate is zero that means no max Tx rate limiting, so only + * check if max_tx_rate is non-zero */ - snap->mbx_vf.vf_cntr[vf_id] = 0; + if (max_tx_rate && min_tx_rate > max_tx_rate) { + dev_err(dev, "Cannot set min Tx rate %d Mbps greater than max Tx rate %d Mbps\n", + min_tx_rate, max_tx_rate); + ret = -EINVAL; + goto out_put_vf; + } - return 0; +#ifdef NETIF_F_HW_TC + if (min_tx_rate && ice_is_adq_active(pf)) { + dev_err(dev, "ADQ on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n"); + ret = -EOPNOTSUPP; + goto out_put_vf; + } +#endif /* NETIF_F_HW_TC */ + + if (min_tx_rate && ice_is_dcb_active(pf)) { + dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n"); + ret = -EOPNOTSUPP; + goto out_put_vf; + } + + if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) { + ret = -EINVAL; + goto out_put_vf; + } + + if (vf->min_tx_rate != (unsigned int)min_tx_rate) { + ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000); + if (ret) { + dev_err(dev, "Unable to set min-tx-rate for VF %d\n", + vf->vf_id); + goto out_put_vf; + } + + vf->min_tx_rate = min_tx_rate; + } + +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ + if (vf->max_tx_rate != (unsigned int)max_tx_rate) { +#ifdef HAVE_TC_SETUP_CLSFLOWER + u64 adq_max_tx_rate; +#endif + ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000); + if (ret) { + dev_err(dev, "Unable to set max-tx-rate for VF %d\n", + vf->vf_id); + goto out_put_vf; + } + + vf->max_tx_rate = max_tx_rate; +#ifdef HAVE_TC_SETUP_CLSFLOWER + adq_max_tx_rate = ice_vf_adq_total_max_tx_rate(vf); + if (vf->max_tx_rate < adq_max_tx_rate) + dev_warn(dev, "Host managed max_tx_rate %u Mpbs for VF %d is less VF ADQ cummulative max_tx_rate %llu Mpbs\n", + vf->vf_id, vf->max_tx_rate, adq_max_tx_rate); +#endif + } + +out_put_vf: + ice_put_vf(vf); + return ret; +} + +#ifdef HAVE_VF_STATS +/** + * ice_get_vf_stats - populate some stats for the VF + * @netdev: the netdev of the PF + * @vf_id: the host OS identifier (0-255) + * @vf_stats: pointer to the OS memory to be initialized + */ +int ice_get_vf_stats(struct net_device *netdev, int vf_id, + struct ifla_vf_stats *vf_stats) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_eth_stats *stats; + struct ice_vsi *vsi; + struct ice_vf *vf; + int ret; + + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) + return -EINVAL; + + ret = ice_check_vf_ready_for_cfg(vf); + if (ret) + goto out_put_vf; + + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + ret = -EINVAL; + goto out_put_vf; + } + + ice_update_eth_stats(vsi); + stats = &vsi->eth_stats; + + memset(vf_stats, 0, sizeof(*vf_stats)); + + vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast + + stats->rx_multicast; + vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast + + stats->tx_multicast; + vf_stats->rx_bytes = stats->rx_bytes; + vf_stats->tx_bytes = stats->tx_bytes; + vf_stats->broadcast = stats->rx_broadcast; + vf_stats->multicast = stats->rx_multicast; +#ifdef HAVE_VF_STATS_DROPPED + vf_stats->rx_dropped = stats->rx_discards; + vf_stats->tx_dropped = stats->tx_discards; +#endif + +out_put_vf: + ice_put_vf(vf); + return ret; +} +#endif /* HAVE_VF_STATS */ + +/** + * ice_is_supported_port_vlan_proto - make sure the vlan_proto is supported + * @hw: hardware structure used to check the VLAN mode + * @vlan_proto: VLAN TPID being checked + * + * If the device is configured in Double VLAN Mode (DVM), then both ETH_P_8021Q + * and ETH_P_8021AD are supported. If the device is configured in Single VLAN + * Mode (SVM), then only ETH_P_8021Q is supported. + */ +static bool +ice_is_supported_port_vlan_proto(struct ice_hw *hw, u16 vlan_proto) +{ + bool is_supported = false; + + switch (vlan_proto) { + case ETH_P_8021Q: + is_supported = true; + break; + case ETH_P_8021AD: + if (ice_is_dvm_ena(hw)) + is_supported = true; + break; + } + + return is_supported; +} + +#ifdef IFLA_VF_VLAN_INFO_MAX +/** + * ice_set_vf_port_vlan + * @netdev: network interface device structure + * @vf_id: VF identifier + * @vlan_id: VLAN ID being set + * @qos: priority setting + * @vlan_proto: VLAN protocol + * + * program VF Port VLAN ID and/or QoS + */ +int +ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, + __be16 vlan_proto) +#else +int +ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos) +#endif /* IFLA_VF_VLAN_INFO_MAX */ +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); +#ifdef IFLA_VF_VLAN_INFO_MAX + u16 local_vlan_proto = ntohs(vlan_proto); +#else + u16 local_vlan_proto = ETH_P_8021Q; +#endif + struct device *dev; + struct ice_vf *vf; + int ret; + + dev = ice_pf_to_dev(pf); + + if (vlan_id >= VLAN_N_VID || qos > 7) { + dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n", + vf_id, vlan_id, qos); + return -EINVAL; + } + + if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) { + dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n", + local_vlan_proto); + return -EPROTONOSUPPORT; + } + + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) + return -EINVAL; + + ret = ice_check_vf_ready_for_cfg(vf); + if (ret) + goto out_put_vf; + + if (ice_vf_get_port_vlan_prio(vf) == qos && + ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto && + ice_vf_get_port_vlan_id(vf) == vlan_id) { + /* duplicate request, so just return success */ + dev_dbg(dev, "Duplicate port VLAN %u, QoS %u, TPID 0x%04x request\n", + vlan_id, qos, local_vlan_proto); + ret = 0; + goto out_put_vf; + } + + mutex_lock(&vf->cfg_lock); + + vf->port_vlan_info = + ICE_VLAN(local_vlan_proto, vlan_id, qos, ICE_FWD_TO_VSI); + if (ice_vf_is_port_vlan_ena(vf)) + dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n", + vlan_id, qos, local_vlan_proto, vf_id); + else + dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id); + + ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); + mutex_unlock(&vf->cfg_lock); + +out_put_vf: + ice_put_vf(vf); + return ret; } /** - * ice_mbx_init_snapshot - Initialize mailbox snapshot structure - * @hw: pointer to the hardware structure - * @vf_count: number of VFs allocated on a PF - * - * Clear the mailbox snapshot structure and allocate memory - * for the VF counter array based on the number of VFs allocated - * on that PF. - * - * Assumption: This function will assume ice_get_caps() has already been - * called to ensure that the vf_count can be compared against the number - * of VFs supported as defined in the functional capabilities of the device. + * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event + * @vf: pointer to the VF structure */ -enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count) +void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { - struct ice_mbx_snapshot *snap = &hw->mbx_snapshot; + struct ice_pf *pf = vf->pf; + struct device *dev; - /* Ensure that the number of VFs allocated is non-zero and - * is not greater than the number of supported VFs defined in - * the functional capabilities of the PF. - */ - if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs) - return ICE_ERR_INVAL_SIZE; + dev = ice_pf_to_dev(pf); - snap->mbx_vf.vf_cntr = devm_kcalloc(ice_hw_to_dev(hw), vf_count, - sizeof(*snap->mbx_vf.vf_cntr), - GFP_KERNEL); - if (!snap->mbx_vf.vf_cntr) - return ICE_ERR_NO_MEMORY; - - /* Setting the VF counter length to the number of allocated - * VFs for given PF's functional capabilities. - */ - snap->mbx_vf.vfcntr_len = vf_count; - - /* Clear mbx_buf in the mailbox snaphot structure and setting the - * mailbox snapshot state to a new capture. - */ - memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf)); - snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; - - return 0; + dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n", + vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id, + vf->dev_lan_addr.addr, + test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags) + ? "on" : "off"); } /** - * ice_mbx_deinit_snapshot - Free mailbox snapshot structure - * @hw: pointer to the hardware structure + * ice_print_vfs_mdd_events - print VFs malicious driver detect event + * @pf: pointer to the PF structure * - * Clear the mailbox snapshot structure and free the VF counter array. + * Called from ice_handle_mdd_event to rate limit and print VFs MDD events. */ -void ice_mbx_deinit_snapshot(struct ice_hw *hw) +void ice_print_vfs_mdd_events(struct ice_pf *pf) { - struct ice_mbx_snapshot *snap = &hw->mbx_snapshot; + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + struct ice_vf *vf; + unsigned int bkt; - /* Free VF counter array and reset vf counter length */ - devm_kfree(ice_hw_to_dev(hw), snap->mbx_vf.vf_cntr); - snap->mbx_vf.vfcntr_len = 0; + /* check that there are pending MDD events to print */ + if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state)) + return; - /* Clear mbx_buf in the mailbox snaphot structure */ - memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf)); + /* VF MDD event logs are rate limited to one second intervals */ + if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1)) + return; + + pf->vfs.last_printed_mdd_jiffies = jiffies; + + mutex_lock(&pf->vfs.table_lock); + ice_for_each_vf(pf, bkt, vf) { + /* only print Rx MDD event message if there are new events */ + if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) { + vf->mdd_rx_events.last_printed = + vf->mdd_rx_events.count; + ice_print_vf_rx_mdd_event(vf); + } + + /* only print Tx MDD event message if there are new events */ + if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) { + vf->mdd_tx_events.last_printed = + vf->mdd_tx_events.count; + + dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n", + vf->mdd_tx_events.count, hw->pf_id, vf->vf_id, + vf->dev_lan_addr.addr); + } + } + mutex_unlock(&pf->vfs.table_lock); +} + +/** + * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR + * @pdev: pointer to a pci_dev structure + * + * Called when recovering from a PF FLR to restore interrupt capability to + * the VFs. + */ +void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) +{ + u16 vf_id; + int pos; + + if (!pci_num_vf(pdev)) + return; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (pos) { + struct pci_dev *vfdev; + + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, + &vf_id); + vfdev = pci_get_device(pdev->vendor, vf_id, NULL); + while (vfdev) { + if (vfdev->is_virtfn && vfdev->physfn == pdev) + pci_restore_msi_state(vfdev); + vfdev = pci_get_device(pdev->vendor, vf_id, + vfdev); + } + } +} + +/** + * ice_is_malicious_vf - helper function to detect a malicious VF + * @pf: ptr to struct ice_pf + * @event: pointer to the AQ event + * @num_msg_proc: the number of messages processed so far + * @num_msg_pending: the number of messages peinding in admin queue + */ +bool +ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event, + u16 num_msg_proc, u16 num_msg_pending) +{ + s16 vf_id = le16_to_cpu(event->desc.retval); + struct device *dev = ice_pf_to_dev(pf); + struct ice_mbx_data mbxdata; + bool malvf = false; + struct ice_vf *vf; + int status; + + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) + return false; + + if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) + goto out_put_vf; + + mbxdata.num_msg_proc = num_msg_proc; + mbxdata.num_pending_arq = num_msg_pending; + mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries; +#define ICE_MBX_OVERFLOW_WATERMARK 64 + mbxdata.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK; + + /* check to see if we have a malicious VF */ + status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf); + if (status) + goto out_put_vf; + + if (malvf) { + bool report_vf = false; + + /* if the VF is malicious and we haven't let the user + * know about it, then let them know now + */ + status = ice_mbx_report_malvf(&pf->hw, pf->vfs.malvfs, + ICE_MAX_SRIOV_VFS, vf_id, + &report_vf); + if (status) + dev_dbg(dev, "Error reporting malicious VF\n"); + + if (report_vf) { + struct ice_vsi *pf_vsi = ice_get_main_vsi(pf); + + if (pf_vsi) + dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n", + &vf->dev_lan_addr.addr[0], + pf_vsi->netdev->dev_addr); + } + } + +out_put_vf: + ice_put_vf(vf); + return malvf; +} + +static void ice_dump_vf(struct ice_vf *vf) +{ + struct ice_vsi *vsi; + struct device *dev; + struct ice_pf *pf; + + if (!vf) + return; + + pf = vf->pf; + vsi = ice_get_vf_vsi(vf); + if (!vsi) + return; + + dev = ice_pf_to_dev(pf); + dev_info(dev, "VF[%d]:\n", vf->vf_id); + dev_info(dev, "\tvf_ver.major = %d vf_ver.minor = %d\n", + vf->vf_ver.major, vf->vf_ver.minor); + dev_info(dev, "\tdriver_caps = 0x%08x\n", vf->driver_caps); + dev_info(dev, "\tvf_caps = 0x%08lx\n", vf->vf_caps); + dev_info(dev, "\tvf_states:\n"); + if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) + dev_info(dev, "\t\tICE_VF_STATE_INIT\n"); + if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) + dev_info(dev, "\t\tICE_VF_STATE_ACTIVE\n"); + if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) + dev_info(dev, "\t\tICE_VF_STATE_QS_ENA\n"); + if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) + dev_info(dev, "\t\tICE_VF_STATE_MC_PROMISC\n"); + if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) + dev_info(dev, "\t\tICE_VF_STATE_UC_PROMISC\n"); + dev_info(dev, "\tvsi = %p, vsi->idx = %d, vsi->vsi_num = %d\n", + vsi, vsi->idx, vsi->vsi_num); + dev_info(dev, "\tlan_vsi_idx = %d\n", vf->lan_vsi_idx); + dev_info(dev, "\tlan_vsi_num = %d\n", vf->lan_vsi_num); + dev_info(dev, "\tnum_mac = %d\n", vf->num_mac); + dev_info(dev, "\tdev_lan_addr = %pM\n", &vf->dev_lan_addr.addr[0]); + dev_info(dev, "\thw_lan_addr = %pM\n", &vf->hw_lan_addr.addr[0]); + dev_info(dev, "\tnum_req_qs = %d\n", vf->num_req_qs); + dev_info(dev, "\trxq_ena = 0x%lx\n", *vf->rxq_ena); + dev_info(dev, "\ttxq_ena = 0x%lx\n", *vf->txq_ena); + dev_info(dev, "\tPort VLAN status: %s\n", + ice_vf_is_port_vlan_ena(vf) ? "enabled" : "disabled"); + dev_info(dev, "\t\tPort VLAN ID = %d\n", ice_vf_get_port_vlan_id(vf)); + dev_info(dev, "\t\tQoS = %d\n", ice_vf_get_port_vlan_prio(vf)); + dev_info(dev, "\t\tTPID = 0x%x", ice_vf_get_port_vlan_tpid(vf)); + dev_info(dev, "\tpf_set_mac = %s\n", vf->pf_set_mac ? "true" : "false"); + dev_info(dev, "\ttrusted = %s\n", vf->trusted ? "true" : "false"); + dev_info(dev, "\tspoofchk = %s\n", vf->spoofchk ? "true" : "false"); +#ifdef HAVE_NDO_SET_VF_LINK_STATE + dev_info(dev, "\tlink_forced = %s, link_up (only valid when link_forced is true) = %s\n", + vf->link_forced ? "true" : "false", + vf->link_up ? "true" : "false"); +#endif + dev_info(dev, "\tmax_tx_rate = %d\n", vf->max_tx_rate); + dev_info(dev, "\tmin_tx_rate = %d\n", vf->min_tx_rate); + dev_info(dev, "\tmdd_rx_events = %u\n", vf->mdd_rx_events.count); + dev_info(dev, "\tmdd_tx_events = %u\n", vf->mdd_tx_events.count); + dev_info(dev, "\tfirst_vector_idx = %d\n", vf->first_vector_idx); + dev_info(dev, "\tvf_sw_id = %p\n", vf->vf_sw_id); + dev_info(dev, "\tadq_enabled = %s\n", + vf->adq_enabled ? "true" : "false"); + dev_info(dev, "\tadq_fltr_ena = %s\n", + vf->adq_fltr_ena ? "true" : "false"); + dev_info(dev, "\tnum_tc = %u\n", vf->num_tc); + dev_info(dev, "\tnum_dmac_chnl_fltrs = %u\n", vf->num_dmac_chnl_fltrs); +} + +void ice_dump_all_vfs(struct ice_pf *pf) +{ + struct ice_vf *vf; + unsigned int bkt; + + mutex_lock(&pf->vfs.table_lock); + ice_for_each_vf(pf, bkt, vf) + ice_dump_vf(vf); + mutex_unlock(&pf->vfs.table_lock); +} + +/** + * ice_get_vf_port_info - populate the iidc_vf_port_info structure + * @pf: ptr to the struct ice_pf + * @vf_id: VF ID used to populate the iidc_vf_port_info + * @vf_port_info: structure to populate with the VF's port information + */ +int ice_get_vf_port_info(struct ice_pf *pf, u16 vf_id, + struct iidc_vf_port_info *vf_port_info) +{ + struct ice_vf *vf; + + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) { + dev_err(ice_pf_to_dev(pf), "Invalid VF ID %u\n", vf_id); + return -EINVAL; + } + + /* don't allow query if VF and/or PF are in reset */ + if (ice_is_vf_disabled(vf)) { + ice_put_vf(vf); + return -EAGAIN; + } + + vf_port_info->vf_id = vf->vf_id; + vf_port_info->vport_id = vf->lan_vsi_num; + if (ice_vf_is_port_vlan_ena(vf)) { + vf_port_info->port_vlan_id = ice_vf_get_port_vlan_id(vf); + vf_port_info->port_vlan_tpid = ice_vf_get_port_vlan_tpid(vf); + } + + ice_put_vf(vf); + return 0; } diff --git a/drivers/thirdparty/ice/ice_sriov.h b/drivers/thirdparty/ice/ice_sriov.h index 15adb2e5aaf2..615a767d83ec 100644 --- a/drivers/thirdparty/ice/ice_sriov.h +++ b/drivers/thirdparty/ice/ice_sriov.h @@ -3,50 +3,234 @@ #ifndef _ICE_SRIOV_H_ #define _ICE_SRIOV_H_ +#include "ice_vf_lib.h" +#include "ice_virtchnl.h" -#include "ice_type.h" -#include "ice_controlq.h" +/* Static VF transaction/status register def */ +#define VF_DEVICE_STATUS 0xAA +#define VF_TRANS_PENDING_M 0x20 -/* Defining the mailbox message threshold as 63 asynchronous - * pending messages. Normal VF functionality does not require - * sending more than 63 asynchronous pending message. +/* wait defines for polling PF_PCI_CIAD register status */ +#define ICE_PCI_CIAD_WAIT_COUNT 100 +#define ICE_PCI_CIAD_WAIT_DELAY_US 1 + +#define ICE_MIN_QS_PER_VF 1 +#define ICE_NONQ_VECS_VF 1 + +#define ICE_NUM_VF_MSIX_MAX 65 +#define ICE_NUM_VF_MSIX_LARGE 33 +#define ICE_NUM_VF_MSIX_MED 17 +#define ICE_NUM_VF_MSIX_SMALL 5 +#define ICE_NUM_VF_MSIX_MULTIQ_MIN 3 +#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1) + +#define ICE_MAX_VF_RESET_TRIES 40 +#define ICE_MAX_VF_RESET_SLEEP_MS 20 + +/** + * ice_vf_chnl_dmac_fltr_cnt - number of dmac based channel filters + * @vf: pointer to the VF info */ -#define ICE_ASYNC_VF_MSG_THRESHOLD 63 +static inline u16 ice_vf_chnl_dmac_fltr_cnt(struct ice_vf *vf) +{ + return vf->num_dmac_chnl_fltrs; +} #ifdef CONFIG_PCI_IOV -enum ice_status -ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, - u8 *msg, u16 msglen, struct ice_sq_cd *cd); +int +ice_get_vf_port_info(struct ice_pf *pf, u16 vf_id, + struct iidc_vf_port_info *vf_port_info); +void ice_dump_all_vfs(struct ice_pf *pf); +void ice_process_vflr_event(struct ice_pf *pf); +int ice_sriov_configure(struct pci_dev *pdev, int num_vfs); +int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); +int +ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi); +void ice_free_vfs(struct ice_pf *pf); +void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event); -u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed); -enum ice_status -ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data, - u16 vf_id, bool *is_mal_vf); -enum ice_status -ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs, - u16 bitmap_len, u16 vf_id); -enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count); -void ice_mbx_deinit_snapshot(struct ice_hw *hw); -enum ice_status -ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs, - u16 bitmap_len, u16 vf_id, bool *report_malvf); +/* VF configuration related iplink handlers */ +void ice_restore_all_vfs_msi_state(struct pci_dev *pdev); +bool +ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event, + u16 num_msg_proc, u16 num_msg_pending); + +#ifdef IFLA_VF_VLAN_INFO_MAX +int +ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, + __be16 vlan_proto); +#else +int +ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos); +#endif + +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +int +ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, + int max_tx_rate); +#else +int ice_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate); +#endif + +#ifdef HAVE_NDO_SET_VF_TRUST +int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted); +#endif + +#ifdef HAVE_NDO_SET_VF_LINK_STATE +int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state); +#endif + +int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena); + +int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector, + u8 tc); + +#ifdef HAVE_VF_STATS +int +ice_get_vf_stats(struct net_device *netdev, int vf_id, + struct ifla_vf_stats *vf_stats); +#endif /* HAVE_VF_STATS */ +void +ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event); +void ice_print_vfs_mdd_events(struct ice_pf *pf); +void ice_print_vf_rx_mdd_event(struct ice_vf *vf); +bool ice_vc_validate_pattern(struct ice_vf *vf, + struct virtchnl_proto_hdrs *proto); +bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id); #else /* CONFIG_PCI_IOV */ -static inline enum ice_status -ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw, - u16 __always_unused vfid, u32 __always_unused v_opcode, - u32 __always_unused v_retval, u8 __always_unused *msg, - u16 __always_unused msglen, - struct ice_sq_cd __always_unused *cd) +static inline void ice_dump_all_vfs(struct ice_pf *pf) { } +static inline void ice_process_vflr_event(struct ice_pf *pf) { } +static inline void ice_free_vfs(struct ice_pf *pf) { } +static inline void +ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) { } +static inline void +ice_vf_lan_overflow_event(struct ice_pf *pf, + struct ice_rq_event_info *event) { } +static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { } +static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { } +static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { } +static inline int +ice_get_vf_port_info(struct ice_pf __always_unused *pf, + u16 __always_unused vf_id, + struct iidc_vf_port_info __always_unused *vf_port_info) +{ + return -EOPNOTSUPP; +} + +static inline bool +ice_is_malicious_vf(struct ice_pf __always_unused *pf, + struct ice_rq_event_info __always_unused *event, + u16 __always_unused num_msg_proc, + u16 __always_unused num_msg_pending) +{ + return false; +} + +static inline int +ice_sriov_configure(struct pci_dev __always_unused *pdev, + int __always_unused num_vfs) +{ + return -EOPNOTSUPP; +} + +static inline int +ice_set_vf_mac(struct net_device __always_unused *netdev, + int __always_unused vf_id, u8 __always_unused *mac) +{ + return -EOPNOTSUPP; +} + +static inline int +ice_get_vf_cfg(struct net_device __always_unused *netdev, + int __always_unused vf_id, + struct ifla_vf_info __always_unused *ivi) +{ + return -EOPNOTSUPP; +} + +#ifdef HAVE_NDO_SET_VF_TRUST +static inline int +ice_set_vf_trust(struct net_device __always_unused *netdev, + int __always_unused vf_id, bool __always_unused trusted) +{ + return -EOPNOTSUPP; +} +#endif /* HAVE_NDO_SET_VF_TRUST */ + +#ifdef IFLA_VF_VLAN_INFO_MAX +static inline int +ice_set_vf_port_vlan(struct net_device __always_unused *netdev, + int __always_unused vf_id, u16 __always_unused vid, + u8 __always_unused qos, __be16 __always_unused v_proto) +{ + return -EOPNOTSUPP; +} +#else +static inline int +ice_set_vf_port_vlan(struct net_device __always_unused *netdev, + int __always_unused vf_id, u16 __always_unused vid, + u8 __always_unused qos) +{ + return -EOPNOTSUPP; +} +#endif /* IFLA_VF_VLAN_INFO_MAX */ + +static inline int +ice_set_vf_spoofchk(struct net_device __always_unused *netdev, + int __always_unused vf_id, bool __always_unused ena) +{ + return -EOPNOTSUPP; +} + +#ifdef HAVE_NDO_SET_VF_LINK_STATE +static inline int +ice_set_vf_link_state(struct net_device __always_unused *netdev, + int __always_unused vf_id, int __always_unused link_state) +{ + return -EOPNOTSUPP; +} +#endif /* HAVE_NDO_SET_VF_LINK_STATE */ + +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +static inline int +ice_set_vf_bw(struct net_device __always_unused *netdev, + int __always_unused vf_id, int __always_unused min_tx_rate, + int __always_unused max_tx_rate) +#else +static inline int +ice_set_vf_bw(struct net_device __always_unused *netdev, + int __always_unused vf_id, int __always_unused max_tx_rate) +#endif +{ + return -EOPNOTSUPP; +} + +static inline int +ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf, + struct ice_q_vector __always_unused *q_vector, + u8 __always_unused tc) { return 0; } -static inline u32 -ice_conv_link_speed_to_virtchnl(bool __always_unused adv_link_support, - u16 __always_unused link_speed) +#ifdef HAVE_VF_STATS +static inline int +ice_get_vf_stats(struct net_device __always_unused *netdev, + int __always_unused vf_id, + struct ifla_vf_stats __always_unused *vf_stats) { - return 0; + return -EOPNOTSUPP; } - +#endif /* HAVE_VF_STATS */ #endif /* CONFIG_PCI_IOV */ + +static inline u16 ice_abs_vf_id(struct ice_hw *hw, u16 rel_vf_id) +{ + return rel_vf_id + hw->func_caps.vf_base_id; +} + +static inline u16 ice_rel_vf_id(struct ice_hw *hw, u16 abs_vf_id) +{ + return abs_vf_id - hw->func_caps.vf_base_id; +} #endif /* _ICE_SRIOV_H_ */ diff --git a/drivers/thirdparty/ice/ice_status.h b/drivers/thirdparty/ice/ice_status.h deleted file mode 100644 index aa279abfe32a..000000000000 --- a/drivers/thirdparty/ice/ice_status.h +++ /dev/null @@ -1,47 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (C) 2018-2021, Intel Corporation. */ - -#ifndef _ICE_STATUS_H_ -#define _ICE_STATUS_H_ - -/* Error Codes */ -enum ice_status { - ICE_SUCCESS = 0, - - /* Generic codes : Range -1..-49 */ - ICE_ERR_PARAM = -1, - ICE_ERR_NOT_IMPL = -2, - ICE_ERR_NOT_READY = -3, - ICE_ERR_NOT_SUPPORTED = -4, - ICE_ERR_BAD_PTR = -5, - ICE_ERR_INVAL_SIZE = -6, - ICE_ERR_DEVICE_NOT_SUPPORTED = -8, - ICE_ERR_RESET_FAILED = -9, - ICE_ERR_FW_API_VER = -10, - ICE_ERR_NO_MEMORY = -11, - ICE_ERR_CFG = -12, - ICE_ERR_OUT_OF_RANGE = -13, - ICE_ERR_ALREADY_EXISTS = -14, - ICE_ERR_DOES_NOT_EXIST = -15, - ICE_ERR_IN_USE = -16, - ICE_ERR_MAX_LIMIT = -17, - ICE_ERR_RESET_ONGOING = -18, - ICE_ERR_HW_TABLE = -19, - ICE_ERR_FW_DDP_MISMATCH = -20, - - /* NVM specific error codes: Range -50..-59 */ - ICE_ERR_NVM = -50, - ICE_ERR_NVM_CHECKSUM = -51, - ICE_ERR_BUF_TOO_SHORT = -52, - ICE_ERR_NVM_BLANK_MODE = -53, - - /* ARQ/ASQ specific error codes. Range -100..-109 */ - ICE_ERR_AQ_ERROR = -100, - ICE_ERR_AQ_TIMEOUT = -101, - ICE_ERR_AQ_FULL = -102, - ICE_ERR_AQ_NO_WORK = -103, - ICE_ERR_AQ_EMPTY = -104, - ICE_ERR_AQ_FW_CRITICAL = -105, -}; - -#endif /* _ICE_STATUS_H_ */ diff --git a/drivers/thirdparty/ice/ice_switch.c b/drivers/thirdparty/ice/ice_switch.c index c0e622d13893..ac3091509a9a 100644 --- a/drivers/thirdparty/ice/ice_switch.c +++ b/drivers/thirdparty/ice/ice_switch.c @@ -1,15 +1,17 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2018-2021, Intel Corporation. */ +#include "ice_common.h" #include "ice_switch.h" #include "ice_flex_type.h" #include "ice_flow.h" - #define ICE_ETH_DA_OFFSET 0 #define ICE_ETH_ETHTYPE_OFFSET 12 #define ICE_ETH_VLAN_TCI_OFFSET 14 #define ICE_MAX_VLAN_ID 0xFFF +#define ICE_IPV6_ETHER_ID 0x86DD +#define ICE_PPP_IPV6_PROTO_ID 0x0057 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem * struct to configure any switch filter rules. @@ -30,18 +32,13 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0, 0x2, 0, 0, 0, 0, 0, 0x81, 0, 0, 0}; - -struct ice_dummy_pkt_offsets { - enum ice_protocol_type type; - u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */ -}; - static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, { ICE_NVGRE, 34 }, { ICE_MAC_IL, 42 }, + { ICE_ETYPE_IL, 54 }, { ICE_IPV4_IL, 56 }, { ICE_TCP_IL, 76 }, { ICE_PROTOCOL_LAST, 0 }, @@ -66,7 +63,8 @@ static const u8 dummy_gre_tcp_packet[] = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x08, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_IL 54 */ 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */ 0x00, 0x00, 0x00, 0x00, @@ -87,6 +85,7 @@ static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = { { ICE_IPV4_OFOS, 14 }, { ICE_NVGRE, 34 }, { ICE_MAC_IL, 42 }, + { ICE_ETYPE_IL, 54 }, { ICE_IPV4_IL, 56 }, { ICE_UDP_ILOS, 76 }, { ICE_PROTOCOL_LAST, 0 }, @@ -111,7 +110,8 @@ static const u8 dummy_gre_udp_packet[] = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x08, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_IL 54 */ 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */ 0x00, 0x00, 0x00, 0x00, @@ -132,6 +132,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = { { ICE_GENEVE, 42 }, { ICE_VXLAN_GPE, 42 }, { ICE_MAC_IL, 50 }, + { ICE_ETYPE_IL, 62 }, { ICE_IPV4_IL, 64 }, { ICE_TCP_IL, 84 }, { ICE_PROTOCOL_LAST, 0 }, @@ -159,7 +160,8 @@ static const u8 dummy_udp_tun_tcp_packet[] = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x08, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_IL 62*/ 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */ 0x00, 0x01, 0x00, 0x00, @@ -183,6 +185,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = { { ICE_GENEVE, 42 }, { ICE_VXLAN_GPE, 42 }, { ICE_MAC_IL, 50 }, + { ICE_ETYPE_IL, 62 }, { ICE_IPV4_IL, 64 }, { ICE_UDP_ILOS, 84 }, { ICE_PROTOCOL_LAST, 0 }, @@ -210,7 +213,8 @@ static const u8 dummy_udp_tun_udp_packet[] = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x08, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_IL 62 */ 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */ 0x00, 0x01, 0x00, 0x00, @@ -222,6 +226,224 @@ static const u8 dummy_udp_tun_udp_packet[] = { 0x00, 0x08, 0x00, 0x00, }; +static const struct ice_dummy_pkt_offsets +dummy_gre_ipv6_tcp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_NVGRE, 34 }, + { ICE_MAC_IL, 42 }, + { ICE_ETYPE_IL, 54 }, + { ICE_IPV6_IL, 56 }, + { ICE_TCP_IL, 96 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_gre_ipv6_tcp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x66, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x2F, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x86, 0xdd, /* ICE_ETYPE_IL 54 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */ + 0x00, 0x08, 0x06, 0x40, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 96 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x02, 0x20, 0x00, + 0x00, 0x00, 0x00, 0x00 +}; + +static const struct ice_dummy_pkt_offsets +dummy_gre_ipv6_udp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_NVGRE, 34 }, + { ICE_MAC_IL, 42 }, + { ICE_ETYPE_IL, 54 }, + { ICE_IPV6_IL, 56 }, + { ICE_UDP_ILOS, 96 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_gre_ipv6_udp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x2F, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x86, 0xdd, /* ICE_ETYPE_IL 54 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */ + 0x00, 0x08, 0x11, 0x40, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 96 */ + 0x00, 0x08, 0x00, 0x00, +}; + +static const struct ice_dummy_pkt_offsets +dummy_udp_tun_ipv6_tcp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_UDP_OF, 34 }, + { ICE_VXLAN, 42 }, + { ICE_GENEVE, 42 }, + { ICE_VXLAN_GPE, 42 }, + { ICE_MAC_IL, 50 }, + { ICE_ETYPE_IL, 62 }, + { ICE_IPV6_IL, 64 }, + { ICE_TCP_IL, 104 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_udp_tun_ipv6_tcp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x6e, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x01, 0x00, 0x00, + 0x40, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */ + 0x00, 0x5a, 0x00, 0x00, + + 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x86, 0xdd, /* ICE_ETYPE_IL 62 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */ + 0x00, 0x08, 0x06, 0x40, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 104 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x02, 0x20, 0x00, + 0x00, 0x00, 0x00, 0x00 +}; + +static const struct ice_dummy_pkt_offsets +dummy_udp_tun_ipv6_udp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_UDP_OF, 34 }, + { ICE_VXLAN, 42 }, + { ICE_GENEVE, 42 }, + { ICE_VXLAN_GPE, 42 }, + { ICE_MAC_IL, 50 }, + { ICE_ETYPE_IL, 62 }, + { ICE_IPV6_IL, 64 }, + { ICE_UDP_ILOS, 104 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_udp_tun_ipv6_udp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x62, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */ + 0x00, 0x4e, 0x00, 0x00, + + 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x86, 0xdd, /* ICE_ETYPE_IL 62 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */ + 0x00, 0x08, 0x11, 0x40, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 104 */ + 0x00, 0x08, 0x00, 0x00, +}; + /* offset info for MAC + IPv4 + UDP dummy packet */ static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = { { ICE_MAC_OFOS, 0 }, @@ -254,8 +476,8 @@ static const u8 dummy_udp_packet[] = { /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */ static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = { { ICE_MAC_OFOS, 0 }, - { ICE_ETYPE_OL, 12 }, - { ICE_VLAN_OFOS, 14 }, + { ICE_VLAN_OFOS, 12 }, + { ICE_ETYPE_OL, 16 }, { ICE_IPV4_OFOS, 18 }, { ICE_UDP_ILOS, 38 }, { ICE_PROTOCOL_LAST, 0 }, @@ -267,9 +489,9 @@ static const u8 dummy_vlan_udp_packet[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x81, 0x00, /* ICE_ETYPE_OL 12 */ + 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ - 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */ + 0x08, 0x00, /* ICE_ETYPE_OL 16 */ 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */ 0x00, 0x01, 0x00, 0x00, @@ -318,8 +540,8 @@ static const u8 dummy_tcp_packet[] = { /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */ static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = { { ICE_MAC_OFOS, 0 }, - { ICE_ETYPE_OL, 12 }, - { ICE_VLAN_OFOS, 14 }, + { ICE_VLAN_OFOS, 12 }, + { ICE_ETYPE_OL, 16 }, { ICE_IPV4_OFOS, 18 }, { ICE_TCP_IL, 38 }, { ICE_PROTOCOL_LAST, 0 }, @@ -331,9 +553,9 @@ static const u8 dummy_vlan_tcp_packet[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x81, 0x00, /* ICE_ETYPE_OL 12 */ + 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ - 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */ + 0x08, 0x00, /* ICE_ETYPE_OL 16 */ 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */ 0x00, 0x01, 0x00, 0x00, @@ -389,8 +611,8 @@ static const u8 dummy_tcp_ipv6_packet[] = { static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_ipv6_packet_offsets[] = { { ICE_MAC_OFOS, 0 }, - { ICE_ETYPE_OL, 12 }, - { ICE_VLAN_OFOS, 14 }, + { ICE_VLAN_OFOS, 12 }, + { ICE_ETYPE_OL, 16 }, { ICE_IPV6_OFOS, 18 }, { ICE_TCP_IL, 58 }, { ICE_PROTOCOL_LAST, 0 }, @@ -402,9 +624,9 @@ static const u8 dummy_vlan_tcp_ipv6_packet[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x81, 0x00, /* ICE_ETYPE_OL 12 */ + 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ - 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */ + 0x86, 0xDD, /* ICE_ETYPE_OL 16 */ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */ 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */ @@ -467,8 +689,8 @@ static const u8 dummy_udp_ipv6_packet[] = { static const struct ice_dummy_pkt_offsets dummy_vlan_udp_ipv6_packet_offsets[] = { { ICE_MAC_OFOS, 0 }, - { ICE_ETYPE_OL, 12 }, - { ICE_VLAN_OFOS, 14 }, + { ICE_VLAN_OFOS, 12 }, + { ICE_ETYPE_OL, 16 }, { ICE_IPV6_OFOS, 18 }, { ICE_UDP_ILOS, 58 }, { ICE_PROTOCOL_LAST, 0 }, @@ -480,9 +702,9 @@ static const u8 dummy_vlan_udp_ipv6_packet[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x81, 0x00, /* ICE_ETYPE_OL 12 */ + 0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */ - 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */ + 0x86, 0xDD, /* ICE_ETYPE_OL 16 */ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */ 0x00, 0x08, 0x11, 0x00, /* Next header UDP */ @@ -1087,6 +1309,416 @@ static const u8 dummy_ipv6_gtpu_ipv6_packet[] = { 0x00, 0x00, }; +static const +struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_UDP_OF, 34 }, + { ICE_GTP_NO_PAY, 42 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const +struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_IPV6_OFOS, 14 }, + { ICE_UDP_OF, 54 }, + { ICE_GTP_NO_PAY, 62 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_ipv6_gtp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x86, 0xdd, + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */ + 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */ + 0x00, 0x00, 0x00, 0x00, + + 0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, +}; + +static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_VLAN_EX, 12 }, + { ICE_VLAN_IN, 16 }, + { ICE_ETYPE_OL, 20 }, + { ICE_IPV4_OFOS, 22 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_qinq_ipv4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */ + 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */ + 0x08, 0x00, /* ICE_ETYPE_OL 20 */ + + 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_OFOS 22 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +static const +struct ice_dummy_pkt_offsets dummy_qinq_ipv4_udp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_VLAN_EX, 12 }, + { ICE_VLAN_IN, 16 }, + { ICE_ETYPE_OL, 20 }, + { ICE_IPV4_OFOS, 22 }, + { ICE_UDP_ILOS, 42 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_qinq_ipv4_udp_pkt[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */ + 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */ + 0x08, 0x00, /* ICE_ETYPE_OL 20 */ + + 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */ + 0x00, 0x08, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +static const +struct ice_dummy_pkt_offsets dummy_qinq_ipv4_tcp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_VLAN_EX, 12 }, + { ICE_VLAN_IN, 16 }, + { ICE_ETYPE_OL, 20 }, + { ICE_IPV4_OFOS, 22 }, + { ICE_TCP_IL, 42 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_qinq_ipv4_tcp_pkt[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */ + 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */ + 0x08, 0x00, /* ICE_ETYPE_OL 20 */ + + 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 22 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x06, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 42 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_VLAN_EX, 12 }, + { ICE_VLAN_IN, 16 }, + { ICE_ETYPE_OL, 20 }, + { ICE_IPV6_OFOS, 22 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_qinq_ipv6_pkt[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */ + 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */ + 0x86, 0xDD, /* ICE_ETYPE_OL 20 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */ + 0x00, 0x00, 0x3b, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +static const +struct ice_dummy_pkt_offsets dummy_qinq_ipv6_udp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_VLAN_EX, 12 }, + { ICE_VLAN_IN, 16 }, + { ICE_ETYPE_OL, 20 }, + { ICE_IPV6_OFOS, 22 }, + { ICE_UDP_ILOS, 62 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_qinq_ipv6_udp_pkt[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */ + 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */ + 0x86, 0xDD, /* ICE_ETYPE_OL 20 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */ + 0x00, 0x08, 0x11, 0x00, /* Next header UDP */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */ + 0x00, 0x08, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +static const +struct ice_dummy_pkt_offsets dummy_qinq_ipv6_tcp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_VLAN_EX, 12 }, + { ICE_VLAN_IN, 16 }, + { ICE_ETYPE_OL, 20 }, + { ICE_IPV6_OFOS, 22 }, + { ICE_TCP_IL, 62 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_qinq_ipv6_tcp_pkt[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */ + 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */ + 0x86, 0xDD, /* ICE_ETYPE_OL 20 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */ + 0x00, 0x14, 0x06, 0x00, /* Next header TCP */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 62 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +static const +struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_VLAN_OFOS, 12 }, + { ICE_ETYPE_OL, 16 }, + { ICE_PPPOE, 18 }, + { ICE_IPV4_OFOS, 26 }, + { ICE_TCP_IL, 46 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_pppoe_ipv4_tcp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ + + 0x88, 0x64, /* ICE_ETYPE_OL 16 */ + + 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */ + 0x00, 0x16, + + 0x00, 0x21, /* PPP Link Layer 24 */ + + 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x06, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 bytes alignment */ +}; + +static const +struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_VLAN_OFOS, 12 }, + { ICE_ETYPE_OL, 16 }, + { ICE_PPPOE, 18 }, + { ICE_IPV4_OFOS, 26 }, + { ICE_UDP_ILOS, 46 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_pppoe_ipv4_udp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ + + 0x88, 0x64, /* ICE_ETYPE_OL 16 */ + + 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */ + 0x00, 0x16, + + 0x00, 0x21, /* PPP Link Layer 24 */ + + 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */ + 0x00, 0x08, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 bytes alignment */ +}; + +static const +struct ice_dummy_pkt_offsets dummy_pppoe_ipv6_tcp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_VLAN_OFOS, 12 }, + { ICE_ETYPE_OL, 16 }, + { ICE_PPPOE, 18 }, + { ICE_IPV6_OFOS, 26 }, + { ICE_TCP_IL, 66 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_pppoe_ipv6_tcp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ + + 0x88, 0x64, /* ICE_ETYPE_OL 16 */ + + 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */ + 0x00, 0x2a, + + 0x00, 0x57, /* PPP Link Layer 24 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */ + 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 bytes alignment */ +}; + +static const +struct ice_dummy_pkt_offsets dummy_pppoe_ipv6_udp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_VLAN_OFOS, 12 }, + { ICE_ETYPE_OL, 16 }, + { ICE_PPPOE, 18 }, + { ICE_IPV6_OFOS, 26 }, + { ICE_UDP_ILOS, 66 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_pppoe_ipv6_udp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ + + 0x88, 0x64, /* ICE_ETYPE_OL 16 */ + + 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */ + 0x00, 0x2a, + + 0x00, 0x57, /* PPP Link Layer 24 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */ + 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */ + 0x00, 0x08, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 bytes alignment */ +}; /* this is a recipe to profile association bitmap */ static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES], @@ -1111,6 +1743,93 @@ static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf, recp->res_idxs); } +/** + * ice_get_tun_type_for_recipe - get tunnel type for the recipe + * @rid: recipe ID that we are populating + * @vlan: flag of vlan protocol + */ +static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan) +{ + u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27}; + u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33}; + u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40}; + u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9}; + enum ice_sw_tunnel_type tun_type; + u16 i, j, profile_num = 0; + bool non_tun_valid = false; + bool pppoe_valid = false; + bool vxlan_valid = false; + bool gre_valid = false; + bool gtp_valid = false; + bool flag_valid = false; + + for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) { + if (!test_bit(j, recipe_to_profile[rid])) + continue; + else + profile_num++; + + for (i = 0; i < 12; i++) { + if (gre_profile[i] == j) + gre_valid = true; + } + + for (i = 0; i < 12; i++) { + if (vxlan_profile[i] == j) + vxlan_valid = true; + } + + for (i = 0; i < 7; i++) { + if (pppoe_profile[i] == j) + pppoe_valid = true; + } + + for (i = 0; i < 6; i++) { + if (non_tun_profile[i] == j) + non_tun_valid = true; + } + } + + if (!non_tun_valid && vxlan_valid) + tun_type = ICE_SW_TUN_VXLAN; + else if (!non_tun_valid && gre_valid) + tun_type = ICE_SW_TUN_NVGRE; + else if (non_tun_valid && + (vxlan_valid || gre_valid || gtp_valid || pppoe_valid)) + tun_type = ICE_SW_TUN_AND_NON_TUN; + else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid && + !pppoe_valid) + tun_type = ICE_NON_TUN; + else + tun_type = ICE_NON_TUN; + + if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) { + for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) { + if (test_bit(j, recipe_to_profile[rid])) { + switch (j) { + case ICE_PROFID_IPV4_TCP: + tun_type = ICE_SW_IPV4_TCP; + break; + case ICE_PROFID_IPV4_UDP: + tun_type = ICE_SW_IPV4_UDP; + break; + case ICE_PROFID_IPV6_TCP: + tun_type = ICE_SW_IPV6_TCP; + break; + case ICE_PROFID_IPV6_UDP: + tun_type = ICE_SW_IPV6_UDP; + break; + default: + break; + } + + return tun_type; + } + } + } + + return tun_type; +} /** * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries @@ -1123,7 +1842,7 @@ static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf, * bookkeeping so that we have a current list of all the recipes that are * programmed in the firmware. */ -static enum ice_status +static int ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, bool *refresh_required) { @@ -1131,9 +1850,10 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, struct ice_aqc_recipe_data_elem *tmp; u16 num_recps = ICE_MAX_NUM_RECIPES; struct ice_prot_lkup_ext *lkup_exts; - enum ice_status status; u8 fv_word_idx = 0; + bool vlan = false; u16 sub_recps; + int status; bitmap_zero(result_bm, ICE_MAX_FV_WORDS); @@ -1141,7 +1861,7 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, tmp = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); if (!tmp) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; tmp[0].recipe_indx = rid; status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL); @@ -1178,7 +1898,7 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry), GFP_KERNEL); if (!rg_entry) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_unroll; } @@ -1191,8 +1911,8 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, result_bm); /* get the first profile that is associated with rid */ - prof = find_first_bit(recipe_to_profile[idx], - ICE_MAX_NUM_PROFILES); + prof = (u8) find_first_bit(recipe_to_profile[idx], + ICE_MAX_NUM_PROFILES); for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) { u8 lkup_indx = root_bufs.content.lkup_indx[i + 1]; @@ -1220,6 +1940,9 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, lkup_exts->fv_words[fv_word_idx].off = off; lkup_exts->field_mask[fv_word_idx] = rg_entry->fv_mask[i]; + if (prot == ICE_META_DATA_ID_HW && + off == ICE_TUN_FLAG_MDID_OFF(1)) + vlan = true; fv_word_idx++; } /* populate rg_list with the data from the child entry of this @@ -1254,6 +1977,7 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, lkup_exts->n_val_words = fv_word_idx; recps[rid].big_recp = (num_recps > 1); recps[rid].n_grp_count = (u8)num_recps; + recps[rid].tun_type = ice_get_tun_type_for_recipe(rid, vlan); recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp, recps[rid].n_grp_count * sizeof(*recps[rid].root_buf), GFP_KERNEL); @@ -1296,6 +2020,9 @@ static void ice_get_recp_to_prof_map(struct ice_hw *hw) } } +static bool +ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle); + /** * ice_init_def_sw_recp - initialize the recipe book keeping tables * @hw: pointer to the HW struct @@ -1304,7 +2031,7 @@ static void ice_get_recp_to_prof_map(struct ice_hw *hw) * Allocate memory for the entire recipe table and initialize the structures/ * entries corresponding to basic recipes. */ -enum ice_status +int ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list) { struct ice_sw_recipe *recps; @@ -1313,7 +2040,7 @@ ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list) recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES, sizeof(*recps), GFP_KERNEL); if (!recps) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { recps[i].root_rid = i; @@ -1353,14 +2080,14 @@ ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list) * in response buffer. The caller of this function to use *num_elems while * parsing the response buffer. */ -enum ice_status +int ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf, u16 buf_size, u16 *req_desc, u16 *num_elems, struct ice_sq_cd *cd) { struct ice_aqc_get_sw_cfg *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg); cmd = &desc.params.get_sw_conf; @@ -1379,18 +2106,18 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf, * ice_dump_sw_cfg - get and print switch config as seen by firmware * @hw: ice hardware struct */ -enum ice_status ice_dump_sw_cfg(struct ice_hw *hw) +int ice_dump_sw_cfg(struct ice_hw *hw) { struct ice_aqc_get_sw_cfg_resp_elem *rbuf; - enum ice_status ret; u16 req_desc = 0; u16 num_elems; + int ret; u16 i; rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN, GFP_KERNEL); if (!rbuf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Multiple calls to ice_aq_get_sw_cfg may be required * to get all the switch configuration information. The need @@ -1436,7 +2163,7 @@ enum ice_status ice_dump_sw_cfg(struct ice_hw *hw) default: dev_info(ice_hw_to_dev(hw), "\tincorrect vsi/port type\n"); - ret = ICE_ERR_CFG; + ret = -EIO; break; } @@ -1462,16 +2189,16 @@ enum ice_status ice_dump_sw_cfg(struct ice_hw *hw) * @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource * @global_lut_id: output parameter for the RSS global LUT's ID */ -enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id) +int ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id) { struct ice_aqc_alloc_free_res_elem *sw_buf; - enum ice_status status; + int status; u16 buf_len; buf_len = struct_size(sw_buf, elem, 1); sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); if (!sw_buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; sw_buf->num_elems = cpu_to_le16(1); sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH | @@ -1497,16 +2224,16 @@ ice_alloc_global_lut_exit: * @hw: pointer to the HW struct * @global_lut_id: ID of the RSS global LUT to free */ -enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id) +int ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id) { struct ice_aqc_alloc_free_res_elem *sw_buf; u16 buf_len, num_elems = 1; - enum ice_status status; + int status; buf_len = struct_size(sw_buf, elem, num_elems); sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); if (!sw_buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; sw_buf->num_elems = cpu_to_le16(num_elems); sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH); @@ -1531,19 +2258,19 @@ enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id) * * allocates switch resources (SWID and VEB counter) (0x0208) */ -enum ice_status +int ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id, u16 *counter_id) { struct ice_aqc_alloc_free_res_elem *sw_buf; struct ice_aqc_res_elem *sw_ele; - enum ice_status status; u16 buf_len; + int status; buf_len = struct_size(sw_buf, elem, 1); sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); if (!sw_buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Prepare buffer for switch ID. * The number of resource entries in buffer is passed as 1 since only a @@ -1574,7 +2301,7 @@ ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id, counter_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); if (!counter_buf) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto ice_alloc_sw_exit; } @@ -1615,16 +2342,16 @@ ice_alloc_sw_exit: * releasing other resources even after it encounters error. * The error code returned is the last error it encountered. */ -enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id) +int ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id) { struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf; - enum ice_status status, ret_status; + int status, ret_status; u16 buf_len; buf_len = struct_size(sw_buf, elem, 1); sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); if (!sw_buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Prepare buffer to free for switch ID res. * The number of resource entries in buffer is passed as 1 since only a @@ -1645,7 +2372,7 @@ enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id) counter_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); if (!counter_buf) { devm_kfree(ice_hw_to_dev(hw), sw_buf); - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } /* The number of resource entries in buffer is passed as 1 since only a @@ -1676,14 +2403,14 @@ enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id) * * Add a VSI context to the hardware (0x0210) */ -static enum ice_status +static int ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { struct ice_aqc_add_update_free_vsi_resp *res; struct ice_aqc_add_get_update_free_vsi *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.vsi_cmd; res = &desc.params.add_update_free_vsi_res; @@ -1720,14 +2447,14 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, * * Free VSI context info from hardware (0x0213) */ -static enum ice_status +static int ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, bool keep_vsi_alloc, struct ice_sq_cd *cd) { struct ice_aqc_add_update_free_vsi_resp *resp; struct ice_aqc_add_get_update_free_vsi *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.vsi_cmd; resp = &desc.params.add_update_free_vsi_res; @@ -1755,14 +2482,14 @@ ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, * * Update VSI context in the hardware (0x0211) */ -static enum ice_status +static int ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { struct ice_aqc_add_update_free_vsi_resp *resp; struct ice_aqc_add_get_update_free_vsi *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.vsi_cmd; resp = &desc.params.add_update_free_vsi_res; @@ -1902,15 +2629,15 @@ void ice_clear_all_vsi_ctx(struct ice_hw *hw) * If this function gets called after reset for existing VSIs then update * with the new HW VSI number in the corresponding VSI handle list entry. */ -enum ice_status +int ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { struct ice_vsi_ctx *tmp_vsi_ctx; - enum ice_status status; + int status; if (vsi_handle >= ICE_MAX_VSI) - return ICE_ERR_PARAM; + return -EINVAL; status = ice_aq_add_vsi(hw, vsi_ctx, cd); if (status) return status; @@ -1921,7 +2648,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, sizeof(*tmp_vsi_ctx), GFP_KERNEL); if (!tmp_vsi_ctx) { ice_aq_free_vsi(hw, vsi_ctx, false, cd); - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } *tmp_vsi_ctx = *vsi_ctx; @@ -1944,14 +2671,14 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, * * Free VSI context info from hardware as well as from VSI handle list */ -enum ice_status +int ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, bool keep_vsi_alloc, struct ice_sq_cd *cd) { - enum ice_status status; + int status; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd); if (!status) @@ -1968,12 +2695,12 @@ ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, * * Update VSI context in the hardware */ -enum ice_status +int ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); return ice_aq_update_vsi(hw, vsi_ctx, cd); } @@ -1984,21 +2711,39 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, * @vsi_handle: VSI SW index * @enable: boolean for enable/disable */ -enum ice_status +int ice_cfg_iwarp_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable) { - struct ice_vsi_ctx *ctx; + struct ice_vsi_ctx *ctx, *cached_ctx; + int status; - ctx = ice_get_vsi_ctx(hw, vsi_handle); + cached_ctx = ice_get_vsi_ctx(hw, vsi_handle); + if (!cached_ctx) + return -ENOENT; + + ctx = devm_kcalloc(ice_hw_to_dev(hw), 1, sizeof(*ctx), GFP_KERNEL); if (!ctx) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOMEM; + + ctx->info.q_opt_rss = cached_ctx->info.q_opt_rss; + ctx->info.q_opt_tc = cached_ctx->info.q_opt_tc; + ctx->info.q_opt_flags = cached_ctx->info.q_opt_flags; + + ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); if (enable) ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; else ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; - return ice_update_vsi(hw, vsi_handle, ctx, NULL); + status = ice_update_vsi(hw, vsi_handle, ctx, NULL); + if (!status) { + cached_ctx->info.q_opt_flags = ctx->info.q_opt_flags; + cached_ctx->info.valid_sections |= ctx->info.valid_sections; + } + + devm_kfree(ice_hw_to_dev(hw), ctx); + return status; } /** @@ -2009,14 +2754,14 @@ ice_cfg_iwarp_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable) * * Get VSI context info from hardware (0x0212) */ -enum ice_status +int ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { struct ice_aqc_add_get_update_free_vsi *cmd; struct ice_aqc_get_vsi_resp *resp; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.vsi_cmd; resp = &desc.params.get_vsi_resp; @@ -2050,29 +2795,29 @@ ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, * * Add/Update Mirror Rule (0x260). */ -enum ice_status +int ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi, u16 count, struct ice_mir_rule_buf *mr_buf, struct ice_sq_cd *cd, u16 *rule_id) { struct ice_aqc_add_update_mir_rule *cmd; struct ice_aq_desc desc; - enum ice_status status; __le16 *mr_list = NULL; u16 buf_size = 0; + int status; switch (rule_type) { case ICE_AQC_RULE_TYPE_VPORT_INGRESS: case ICE_AQC_RULE_TYPE_VPORT_EGRESS: /* Make sure count and mr_buf are set for these rule_types */ if (!(count && mr_buf)) - return ICE_ERR_PARAM; + return -EINVAL; buf_size = count * sizeof(__le16); mr_list = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); if (!mr_list) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; break; case ICE_AQC_RULE_TYPE_PPORT_INGRESS: case ICE_AQC_RULE_TYPE_PPORT_EGRESS: @@ -2080,11 +2825,11 @@ ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi, * rule_types */ if (count || mr_buf) - return ICE_ERR_PARAM; + return -EINVAL; break; default: ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type); - return ICE_ERR_OUT_OF_RANGE; + return -EIO; } ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule); @@ -2108,7 +2853,7 @@ ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi, ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n", id); devm_kfree(ice_hw_to_dev(hw), mr_list); - return ICE_ERR_OUT_OF_RANGE; + return -EIO; } /* add VSI to mirror rule */ @@ -2118,6 +2863,8 @@ ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi, else /* remove VSI from mirror rule */ mr_list[i] = cpu_to_le16(id); } + + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); } cmd = &desc.params.add_update_rule; @@ -2147,7 +2894,7 @@ ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi, * * Delete Mirror Rule (0x261). */ -enum ice_status +int ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd, struct ice_sq_cd *cd) { @@ -2156,7 +2903,7 @@ ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd, /* rule_id should be in the range 0...63 */ if (rule_id >= ICE_MAX_NUM_MIRROR_RULES) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule); @@ -2179,20 +2926,20 @@ ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd, * * allocates or free a VSI list resource */ -static enum ice_status +static int ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type, enum ice_adminq_opc opc) { struct ice_aqc_alloc_free_res_elem *sw_buf; struct ice_aqc_res_elem *vsi_ele; - enum ice_status status; u16 buf_len; + int status; buf_len = struct_size(sw_buf, elem, 1); sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); if (!sw_buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; sw_buf->num_elems = cpu_to_le16(1); if (lkup_type == ICE_SW_LKUP_MAC || @@ -2201,13 +2948,20 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || lkup_type == ICE_SW_LKUP_PROMISC || lkup_type == ICE_SW_LKUP_PROMISC_VLAN || + lkup_type == ICE_SW_LKUP_DFLT || lkup_type == ICE_SW_LKUP_LAST) { sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP); } else if (lkup_type == ICE_SW_LKUP_VLAN) { - sw_buf->res_type = - cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE); + /* Set SHARED bit on alloc calls for LAG functionality */ + if (opc == ice_aqc_opc_alloc_res) + sw_buf->res_type = + cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE | + ICE_AQC_RES_TYPE_FLAG_SHARED); + else + sw_buf->res_type = + cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE); } else { - status = ICE_ERR_PARAM; + status = -EINVAL; goto ice_aq_alloc_free_vsi_list_exit; } @@ -2237,7 +2991,7 @@ ice_aq_alloc_free_vsi_list_exit: * * Sets the storm control configuration (0x0280) */ -enum ice_status +int ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh, u32 ctl_bitmask) { @@ -2264,12 +3018,12 @@ ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh, * * Gets the storm control configuration (0x0281) */ -enum ice_status +int ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh, u32 *ctl_bitmask) { - enum ice_status status; struct ice_aq_desc desc; + int status; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg); @@ -2301,17 +3055,17 @@ ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh, * * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware */ -enum ice_status __maybe_unused +int ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd) { struct ice_aq_desc desc; - enum ice_status status; + int status; if (opc != ice_aqc_opc_add_sw_rules && opc != ice_aqc_opc_update_sw_rules && opc != ice_aqc_opc_remove_sw_rules) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, opc); @@ -2321,7 +3075,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd); if (opc != ice_aqc_opc_add_sw_rules && hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; return status; } @@ -2335,7 +3089,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, * * Add(0x0290) */ -enum ice_status +int ice_aq_add_recipe(struct ice_hw *hw, struct ice_aqc_recipe_data_elem *s_recipe_list, u16 num_recipes, struct ice_sq_cd *cd) @@ -2372,18 +3126,18 @@ ice_aq_add_recipe(struct ice_hw *hw, * The caller must supply enough space in s_recipe_list to hold all possible * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES. */ -enum ice_status +int ice_aq_get_recipe(struct ice_hw *hw, struct ice_aqc_recipe_data_elem *s_recipe_list, u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd) { struct ice_aqc_add_get_recipe *cmd; struct ice_aq_desc desc; - enum ice_status status; u16 buf_size; + int status; if (*num_recipes != ICE_MAX_NUM_RECIPES) - return ICE_ERR_PARAM; + return -EINVAL; cmd = &desc.params.add_get_recipe; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe); @@ -2412,18 +3166,18 @@ ice_aq_get_recipe(struct ice_hw *hw, * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update * the pre-existing recipe with the modifications. */ -enum ice_status +int ice_update_recipe_lkup_idx(struct ice_hw *hw, struct ice_update_recipe_lkup_idx_params *params) { struct ice_aqc_recipe_data_elem *rcp_list; u16 num_recps = ICE_MAX_NUM_RECIPES; - enum ice_status status; + int status; rcp_list = devm_kzalloc(ice_hw_to_dev(hw), num_recps * sizeof(*rcp_list), GFP_KERNEL); if (!rcp_list) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* read current recipe list from firmware */ rcp_list->recipe_indx = params->rid; @@ -2466,7 +3220,7 @@ error_out: * @cd: pointer to command details structure or NULL * Recipe to profile association (0x0291) */ -enum ice_status +int ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, struct ice_sq_cd *cd) { @@ -2492,13 +3246,13 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, * @cd: pointer to command details structure or NULL * Associate profile ID with given recipe (0x0293) */ -enum ice_status +int ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, struct ice_sq_cd *cd) { struct ice_aqc_recipe_to_profile *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.recipe_to_profile; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile); @@ -2516,16 +3270,16 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, * @hw: pointer to the hardware structure * @rid: recipe ID returned as response to AQ call */ -enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid) +int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) { struct ice_aqc_alloc_free_res_elem *sw_buf; - enum ice_status status; u16 buf_len; + int status; buf_len = struct_size(sw_buf, elem, 1); sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); if (!sw_buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; sw_buf->num_elems = cpu_to_le16(1); sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE << @@ -2558,8 +3312,6 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type, pi->sw_id = swid; pi->pf_vf_num = pf_vf_num; pi->is_vf = is_vf; - pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL; - pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; break; default: ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n"); @@ -2570,13 +3322,13 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type, /* ice_get_initial_sw_cfg - Get initial port and default VSI data * @hw: pointer to the hardware structure */ -enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw) +int ice_get_initial_sw_cfg(struct ice_hw *hw) { struct ice_aqc_get_sw_cfg_resp_elem *rbuf; - enum ice_status status; u8 num_total_ports; u16 req_desc = 0; u16 num_elems; + int status; u8 j = 0; u16 i; @@ -2586,7 +3338,7 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw) GFP_KERNEL); if (!rbuf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Multiple calls to ice_aq_get_sw_cfg may be required * to get all the switch configuration information. The need @@ -2627,7 +3379,7 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw) case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT: if (j == num_total_ports) { ice_debug(hw, ICE_DBG_SW, "more ports than expected\n"); - status = ICE_ERR_CFG; + status = -EIO; goto out; } ice_init_port_info(hw->port_info, @@ -2641,12 +3393,58 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw) } } while (req_desc && !status); - out: devm_kfree(ice_hw_to_dev(hw), rbuf); return status; } +/** + * ice_dump_lkup_filters + * @hw: pointer to the hardware structure + * @rule_lock: pointer to lock that's protecting filter list + * @rule_head: pointer to head of filter list + * @lkup: type of switch lookup rule + * + * Helper function that prints all filters for a particular switch + * lookup type + */ +static void ice_dump_lkup_filters(struct ice_hw *hw, struct mutex *rule_lock, + struct list_head *rule_head, + enum ice_sw_lkup_type lkup) +{ + struct ice_fltr_mgmt_list_entry *fm_entry; + char extra_param[128] = {'\0'}; + char mac_addr[ETH_ALEN]; + + memset(mac_addr, 0, ETH_ALEN); + mutex_lock(rule_lock); + list_for_each_entry(fm_entry, rule_head, list_entry) { + struct ice_fltr_info *fi = &fm_entry->fltr_info; + + if (lkup == ICE_SW_LKUP_MAC || lkup == ICE_SW_LKUP_PROMISC) { + ether_addr_copy(mac_addr, fi->l_data.mac.mac_addr); + } else if (lkup == ICE_SW_LKUP_MAC_VLAN) { + ether_addr_copy(mac_addr, + fi->l_data.mac_vlan.mac_addr); + snprintf(extra_param, sizeof(extra_param), + "vlan_id = %d", fi->l_data.mac_vlan.vlan_id); + } else if (lkup == ICE_SW_LKUP_ETHERTYPE_MAC) { + ether_addr_copy(mac_addr, + fi->l_data.ethertype_mac.mac_addr); + snprintf(extra_param, sizeof(extra_param), + "ethertype = %d", + fi->l_data.ethertype_mac.ethertype); + } + + dev_info(ice_hw_to_dev(hw), + "\tmac: %pM, vsi_count = %d, fw_act_flag = %d, lb_en = %d, lan_en = %d, filt_act = %d, filt_rule_id = %d %s\n", + mac_addr, fm_entry->vsi_count, fi->flag, fi->lb_en, + fi->lan_en, fi->fltr_act, fi->fltr_rule_id, + extra_param); + } + mutex_unlock(rule_lock); +} + /** * ice_dump_all_sw_rules * @hw: pointer to the hardware structure @@ -2673,16 +3471,7 @@ ice_dump_all_sw_rules(struct ice_hw *hw, enum ice_sw_lkup_type lkup, /* dump MAC hash list */ dev_info(ice_hw_to_dev(hw), "\tDump MAC hash list of lookup type %d\n", lkup); - mutex_lock(rule_lock); - list_for_each_entry(fm_entry, rule_head, list_entry) { - fi = &fm_entry->fltr_info; - dev_info(ice_hw_to_dev(hw), - "\tmac: %pM, vsi_count = %d, fw_act_flag = %d, lb_en = %d, lan_en = %d, filt_act = %d, filt_rule_id = %d\n", - fi->l_data.mac.mac_addr, fm_entry->vsi_count, - fi->flag, fi->lb_en, fi->lan_en, - fi->fltr_act, fi->fltr_rule_id); - } - mutex_unlock(rule_lock); + ice_dump_lkup_filters(hw, rule_lock, rule_head, lkup); break; case ICE_SW_LKUP_VLAN: /* dump VLAN hash list */ @@ -2706,17 +3495,7 @@ ice_dump_all_sw_rules(struct ice_hw *hw, enum ice_sw_lkup_type lkup, dev_info(ice_hw_to_dev(hw), "\tDump MAC VLAN hash list of lookup type %d\n", lkup); - mutex_lock(rule_lock); - list_for_each_entry(fm_entry, rule_head, list_entry) { - fi = &fm_entry->fltr_info; - dev_info(ice_hw_to_dev(hw), - "\tmac: %pM, vlan_id = %d, vsi_count = %d, fw_act_flag = %d, lb_en = %d, lan_en = %d, filt_act = %d, filt_rule_id = %d\n", - fi->l_data.mac_vlan.mac_addr, - fi->l_data.mac_vlan.vlan_id, - fm_entry->vsi_count, fi->flag, fi->lb_en, - fi->lan_en, fi->fltr_act, fi->fltr_rule_id); - } - mutex_unlock(rule_lock); + ice_dump_lkup_filters(hw, rule_lock, rule_head, lkup); break; case ICE_SW_LKUP_ETHERTYPE: /* dump Ethertype/Ethertype MAC hash list */ @@ -2739,17 +3518,7 @@ ice_dump_all_sw_rules(struct ice_hw *hw, enum ice_sw_lkup_type lkup, dev_info(ice_hw_to_dev(hw), "\tDump Ethertype MAC hash list of lookup type %d\n", lkup); - mutex_lock(rule_lock); - list_for_each_entry(fm_entry, rule_head, list_entry) { - fi = &fm_entry->fltr_info; - dev_info(ice_hw_to_dev(hw), - "\tmac: %pM, ethertype = %d, vsi_count = %d, fw_act_flag = %d, filt_act = %d, lb_en = %d, lan_en = %d, filt_rule_id = %d\n", - fi->l_data.ethertype_mac.mac_addr, - fi->l_data.ethertype_mac.ethertype, - fm_entry->vsi_count, fi->flag, fi->lb_en, - fi->lan_en, fi->fltr_act, fi->fltr_rule_id); - } - mutex_unlock(rule_lock); + ice_dump_lkup_filters(hw, rule_lock, rule_head, lkup); break; case ICE_SW_LKUP_PROMISC: /* dump Promisc mode hash list */ @@ -2758,33 +3527,13 @@ ice_dump_all_sw_rules(struct ice_hw *hw, enum ice_sw_lkup_type lkup, lkup); dev_info(ice_hw_to_dev(hw), "\tNote: Ignore VLAN in case of Promisc only lookup type & ignore MAC in case of Promisc VLAN lookup type\n"); - mutex_lock(rule_lock); - list_for_each_entry(fm_entry, rule_head, list_entry) { - fi = &fm_entry->fltr_info; - dev_info(ice_hw_to_dev(hw), - "\tmac: %pM, vlan_id = %d, vsi_count = %d, fw_act_flag = %d, lb_en = %d, lan_en = %d, filt_act = %d, filt_rule_id = %d\n", - fi->l_data.mac_vlan.mac_addr, - fi->l_data.mac_vlan.vlan_id, - fm_entry->vsi_count, fi->flag, fi->lb_en, - fi->lan_en, fi->fltr_act, fi->fltr_rule_id); - } - mutex_unlock(rule_lock); + ice_dump_lkup_filters(hw, rule_lock, rule_head, lkup); break; case ICE_SW_LKUP_DFLT: - /* dump default VSI filter rule */ - if (hw->port_info->dflt_tx_vsi_num != ICE_DFLT_VSI_INVAL) - dev_info(ice_hw_to_dev(hw), - "\tDefault VSI filter (lookup type %d): tx_vsi_id = %d, filt_act = %d, tx_filt_rule_id = %d\n", - lkup, hw->port_info->dflt_tx_vsi_num, - ICE_FWD_TO_VSI, - hw->port_info->dflt_tx_vsi_rule_id); - - if (hw->port_info->dflt_rx_vsi_num != ICE_DFLT_VSI_INVAL) - dev_info(ice_hw_to_dev(hw), - "\tDefault VSI filter (lookup type %d): rx_vsi_id = %d, filt_act = %d, rx_filt_rule_id = %d\n", - lkup, hw->port_info->dflt_rx_vsi_num, - ICE_FWD_TO_VSI, - hw->port_info->dflt_rx_vsi_rule_id); + dev_info(ice_hw_to_dev(hw), + "\tDump Default VSI's VLAN mode hash list of lookup type %d\n", + lkup); + ice_dump_lkup_filters(hw, rule_lock, rule_head, lkup); break; case ICE_SW_LKUP_PROMISC_VLAN: case ICE_SW_LKUP_LAST: @@ -2794,7 +3543,6 @@ ice_dump_all_sw_rules(struct ice_hw *hw, enum ice_sw_lkup_type lkup, } } - /** * ice_fill_sw_info - Helper function to populate lb_en and lan_en * @hw: pointer to the hardware structure @@ -2948,7 +3696,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, break; case ICE_SW_LKUP_ETHERTYPE_MAC: daddr = f_info->l_data.ethertype_mac.mac_addr; - /* fall-through */ + fallthrough; case ICE_SW_LKUP_ETHERTYPE: off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET); *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype); @@ -2959,7 +3707,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, break; case ICE_SW_LKUP_PROMISC_VLAN: vlan_id = f_info->l_data.mac_vlan.vlan_id; - /* fall-through */ + fallthrough; case ICE_SW_LKUP_PROMISC: daddr = f_info->l_data.mac_vlan.mac_addr; break; @@ -2991,7 +3739,6 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz); } - /** * ice_dump_sw_rules - Function to dump sw rules * @hw: pointer to the hardware structure @@ -3012,7 +3759,7 @@ void ice_dump_sw_rules(struct ice_hw *hw, enum ice_sw_lkup_type lkup) * Create a large action to hold software marker and update the switch rule * entry pointed by m_ent with newly created large action */ -static enum ice_status +static int ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, u16 sw_marker, u16 l_id) { @@ -3023,14 +3770,14 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, * 3. GENERIC VALUE action to hold the software marker ID */ const u16 num_lg_acts = 3; - enum ice_status status; u16 lg_act_size; u16 rules_size; + int status; u32 act; u16 id; if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC) - return ICE_ERR_PARAM; + return -EINVAL; /* Create two back-to-back switch rules and submit them to the HW using * one memory buffer: @@ -3041,7 +3788,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL); if (!lg_act) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size); @@ -3113,23 +3860,23 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, * @counter_id: VLAN counter ID returned as part of allocate resource * @l_id: large action resource ID */ -static enum ice_status +static int ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, u16 counter_id, u16 l_id) { struct ice_aqc_sw_rules_elem *lg_act; struct ice_aqc_sw_rules_elem *rx_tx; - enum ice_status status; /* 2 actions will be added while adding a large action counter */ const int num_acts = 2; u16 lg_act_size; u16 rules_size; u16 f_rule_id; u32 act; + int status; u16 id; if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC) - return ICE_ERR_PARAM; + return -EINVAL; /* Create two back-to-back switch rules and submit them to the HW using * one memory buffer: @@ -3140,7 +3887,7 @@ ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL); if (!lg_act) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size); @@ -3187,7 +3934,7 @@ ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, ice_aqc_opc_update_sw_rules, NULL); if (!status) { m_ent->lg_act_idx = l_id; - m_ent->counter_index = counter_id; + m_ent->counter_index = (u8)counter_id; } devm_kfree(ice_hw_to_dev(hw), lg_act); @@ -3238,19 +3985,19 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, * Call AQ command to add a new switch rule or update existing switch rule * using the given VSI list ID */ -static enum ice_status +static int ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, u16 vsi_list_id, bool remove, enum ice_adminq_opc opc, enum ice_sw_lkup_type lkup_type) { struct ice_aqc_sw_rules_elem *s_rule; - enum ice_status status; u16 s_rule_size; u16 rule_type; + int status; int i; if (!num_vsi) - return ICE_ERR_PARAM; + return -EINVAL; if (lkup_type == ICE_SW_LKUP_MAC || lkup_type == ICE_SW_LKUP_MAC_VLAN || @@ -3258,6 +4005,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || lkup_type == ICE_SW_LKUP_PROMISC || lkup_type == ICE_SW_LKUP_PROMISC_VLAN || + lkup_type == ICE_SW_LKUP_DFLT || lkup_type == ICE_SW_LKUP_LAST) rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR : ICE_AQC_SW_RULES_T_VSI_LIST_SET; @@ -3265,15 +4013,15 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR : ICE_AQC_SW_RULES_T_PRUNE_LIST_SET; else - return ICE_ERR_PARAM; + return -EINVAL; s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi); s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; for (i = 0; i < num_vsi; i++) { if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto exit; } /* AQ call requires hw_vsi_id(s) */ @@ -3300,11 +4048,11 @@ exit: * @vsi_list_id: stores the ID of the VSI list to be created * @lkup_type: switch rule filter's lookup type */ -static enum ice_status +static int ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type) { - enum ice_status status; + int status; status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type, ice_aqc_opc_alloc_res); @@ -3327,22 +4075,22 @@ ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, * to the corresponding filter management list to track this switch rule * and VSI mapping */ -static enum ice_status +static int ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list, struct ice_fltr_list_entry *f_entry) { struct ice_fltr_mgmt_list_entry *fm_entry; struct ice_aqc_sw_rules_elem *s_rule; - enum ice_status status; + int status; s_rule = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry), GFP_KERNEL); if (!fm_entry) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto ice_create_pkt_fwd_rule_exit; } @@ -3387,16 +4135,16 @@ ice_create_pkt_fwd_rule_exit: * Call AQ command to update a previously created switch rule with a * VSI list ID */ -static enum ice_status +static int ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info) { struct ice_aqc_sw_rules_elem *s_rule; - enum ice_status status; + int status; s_rule = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules); @@ -3416,13 +4164,15 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info) * * Updates unicast switch filter rules based on VEB/VEPA mode */ -enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw) +int ice_update_sw_rule_bridge_mode(struct ice_hw *hw) { - struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_mgmt_list_entry *fm_entry; - enum ice_status status = 0; struct list_head *rule_head; struct mutex *rule_lock; /* Lock to protect filter rule list */ + struct ice_switch_info *sw; + int status = 0; + + sw = hw->switch_info; rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; @@ -3472,24 +4222,24 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw) * Add the new VSI to the previously created VSI list set * using the update switch rule command */ -static enum ice_status +static int ice_add_update_vsi_list(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_entry, struct ice_fltr_info *cur_fltr, struct ice_fltr_info *new_fltr) { - enum ice_status status = 0; u16 vsi_list_id = 0; + int status = 0; if ((cur_fltr->fltr_act == ICE_FWD_TO_Q || cur_fltr->fltr_act == ICE_FWD_TO_QGRP)) - return ICE_ERR_NOT_IMPL; + return -EOPNOTSUPP; if ((new_fltr->fltr_act == ICE_FWD_TO_Q || new_fltr->fltr_act == ICE_FWD_TO_QGRP) && (cur_fltr->fltr_act == ICE_FWD_TO_VSI || cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST)) - return ICE_ERR_NOT_IMPL; + return -EOPNOTSUPP; if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { /* Only one entry existed in the mapping and it was not already @@ -3501,7 +4251,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, /* A rule already exists with the new VSI being added */ if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) - return ICE_ERR_ALREADY_EXISTS; + return -EEXIST; vsi_handle_arr[0] = cur_fltr->vsi_handle; vsi_handle_arr[1] = new_fltr->vsi_handle; @@ -3529,7 +4279,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, vsi_list_id); if (!m_entry->vsi_list_info) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* If this entry was large action then the large action needs * to be updated to point to FWD to VSI list @@ -3544,7 +4294,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, enum ice_adminq_opc opcode; if (!m_entry->vsi_list_info) - return ICE_ERR_CFG; + return -EIO; /* A rule already exists with the new VSI being added */ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) @@ -3603,7 +4353,7 @@ ice_find_rule_entry(struct list_head *list_head, * handle element. This can be extended further to search VSI list with more * than 1 vsi_count. Returns pointer to VSI list entry if found. */ -static struct ice_vsi_list_map_info * +struct ice_vsi_list_map_info * ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle, u16 *vsi_list_id) { @@ -3649,17 +4399,17 @@ ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle, * * Adds or updates the rule lists for a given recipe */ -static enum ice_status +static int ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list, u8 lport, struct ice_fltr_list_entry *f_entry) { struct ice_fltr_info *new_fltr, *cur_fltr; struct ice_fltr_mgmt_list_entry *m_entry; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status status = 0; + int status = 0; if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; /* Load the hw_vsi_id only if the fwd action is fwd to VSI */ if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI) @@ -3699,7 +4449,7 @@ exit_add_rule_internal: * The VSI list should be emptied before this function is called to remove the * VSI list. */ -static enum ice_status +static int ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, enum ice_sw_lkup_type lkup_type) { @@ -3717,21 +4467,21 @@ ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, * @fm_list: filter management entry for which the VSI list management needs to * be done */ -static enum ice_status +static int ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, struct ice_fltr_mgmt_list_entry *fm_list) { enum ice_sw_lkup_type lkup_type; - enum ice_status status = 0; u16 vsi_list_id; + int status = 0; if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST || fm_list->vsi_count == 0) - return ICE_ERR_PARAM; + return -EINVAL; /* A rule with the VSI being removed does not exist */ if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; lkup_type = fm_list->fltr_info.lkup_type; vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id; @@ -3753,7 +4503,7 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, ICE_MAX_VSI); if (!ice_is_vsi_valid(hw, rem_vsi_handle)) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; /* Make sure VSI list is empty before removing it below */ status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, @@ -3805,18 +4555,18 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, * @recp_list: recipe list for which the rule needs to removed * @f_entry: rule entry containing filter information */ -static enum ice_status +static int ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list, struct ice_fltr_list_entry *f_entry) { struct ice_fltr_mgmt_list_entry *list_elem; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status status = 0; bool remove_rule = false; + int status = 0; u16 vsi_handle; if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; f_entry->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); @@ -3825,14 +4575,14 @@ ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list, list_elem = ice_find_rule_entry(&recp_list->filt_rules, &f_entry->fltr_info); if (!list_elem) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto exit; } if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) { remove_rule = true; } else if (!list_elem->vsi_list_info) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto exit; } else if (list_elem->vsi_list_info->ref_cnt > 1) { /* a ref_cnt > 1 indicates that the vsi_list is being @@ -3865,7 +4615,7 @@ ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list, ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL); if (!s_rule) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto exit; } @@ -3902,20 +4652,20 @@ exit: * information for all resource types. Each resource type is an * ice_aqc_get_res_resp_elem structure. */ -enum ice_status +int ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, struct ice_aqc_get_res_resp_elem *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_get_res_alloc *resp; - enum ice_status status; struct ice_aq_desc desc; + int status; if (!buf) - return ICE_ERR_BAD_PTR; + return -EINVAL; if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN) - return ICE_ERR_INVAL_SIZE; + return -EINVAL; resp = &desc.params.get_res; @@ -3939,22 +4689,22 @@ ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, * @desc_id: input - first desc ID to start; output - next desc ID * @cd: pointer to command details structure or NULL */ -enum ice_status +int ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries, struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id, struct ice_sq_cd *cd) { struct ice_aqc_get_allocd_res_desc *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.get_res_desc; if (!buf) - return ICE_ERR_PARAM; + return -EINVAL; if (buf_size != (num_entries * sizeof(*buf))) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc); @@ -4102,7 +4852,7 @@ bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle) * check for duplicates in this case, removing duplicates from a given * list should be taken care of in the caller of this function. */ -static enum ice_status +static int ice_add_mac_rule(struct ice_hw *hw, struct list_head *m_list, struct ice_switch_info *sw, u8 lport) { @@ -4112,8 +4862,8 @@ ice_add_mac_rule(struct ice_hw *hw, struct list_head *m_list, struct list_head *rule_head; u16 total_elem_left, s_rule_size; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status status = 0; u16 num_unicast = 0; + int status = 0; u8 elem_sent; s_rule = NULL; @@ -4128,23 +4878,24 @@ ice_add_mac_rule(struct ice_hw *hw, struct list_head *m_list, m_list_itr->fltr_info.flag = ICE_FLTR_TX; vsi_handle = m_list_itr->fltr_info.vsi_handle; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); - m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id; + if (m_list_itr->fltr_info.fltr_act == ICE_FWD_TO_VSI) + m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id; /* update the src in case it is VSI num */ if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI) - return ICE_ERR_PARAM; + return -EINVAL; m_list_itr->fltr_info.src = hw_vsi_id; if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC || is_zero_ether_addr(add)) - return ICE_ERR_PARAM; + return -EINVAL; if (is_unicast_ether_addr(add) && !hw->umac_shared) { /* Don't overwrite the unicast address */ mutex_lock(rule_lock); if (ice_find_rule_entry(rule_head, &m_list_itr->fltr_info)) { mutex_unlock(rule_lock); - return ICE_ERR_ALREADY_EXISTS; + continue; } mutex_unlock(rule_lock); num_unicast++; @@ -4170,7 +4921,7 @@ ice_add_mac_rule(struct ice_hw *hw, struct list_head *m_list, s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size, GFP_KERNEL); if (!s_rule) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto ice_add_mac_exit; } @@ -4220,7 +4971,7 @@ ice_add_mac_rule(struct ice_hw *hw, struct list_head *m_list, fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry), GFP_KERNEL); if (!fm_entry) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto ice_add_mac_exit; } fm_entry->fltr_info = *f_info; @@ -4242,7 +4993,6 @@ ice_add_mac_exit: return status; } - /** * ice_add_mac - Add a MAC address based filter rule * @hw: pointer to the hardware structure @@ -4250,10 +5000,10 @@ ice_add_mac_exit: * * Function add MAC rule for logical port from HW struct */ -enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list) +int ice_add_mac(struct ice_hw *hw, struct list_head *m_list) { if (!m_list || !hw) - return ICE_ERR_PARAM; + return -EINVAL; return ice_add_mac_rule(hw, m_list, hw->switch_info, hw->port_info->lport); @@ -4265,7 +5015,7 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list) * @recp_list: recipe list for which rule has to be added * @f_entry: filter entry containing one VLAN information */ -static enum ice_status +static int ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list, struct ice_fltr_list_entry *f_entry) { @@ -4274,10 +5024,10 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list, enum ice_sw_lkup_type lkup_type; u16 vsi_list_id = 0, vsi_handle; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status status = 0; + int status = 0; if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; f_entry->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); @@ -4285,10 +5035,10 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list, /* VLAN ID should only be 12 bits */ if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID) - return ICE_ERR_PARAM; + return -EINVAL; if (new_fltr->src_id != ICE_SRC_ID_VSI) - return ICE_ERR_PARAM; + return -EINVAL; new_fltr->src = new_fltr->fwd_id.hw_vsi_id; lkup_type = new_fltr->lkup_type; @@ -4327,7 +5077,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list, v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr); if (!v_list_itr) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto exit; } /* reuse VSI list for new rule and increment ref_cnt */ @@ -4363,7 +5113,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list, if (v_list_itr->vsi_count > 1 && v_list_itr->vsi_list_info->ref_cnt > 1) { ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n"); - status = ICE_ERR_CFG; + status = -EIO; goto exit; } @@ -4373,7 +5123,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list, /* A rule already exists with the new VSI being added */ if (cur_handle == vsi_handle) { - status = ICE_ERR_ALREADY_EXISTS; + status = -EEXIST; goto exit; } @@ -4419,7 +5169,7 @@ exit: * @v_list: list of VLAN entries and forwarding information * @sw: pointer to switch info struct for which function add rule */ -static enum ice_status +static int ice_add_vlan_rule(struct ice_hw *hw, struct list_head *v_list, struct ice_switch_info *sw) { @@ -4429,7 +5179,7 @@ ice_add_vlan_rule(struct ice_hw *hw, struct list_head *v_list, recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN]; list_for_each_entry(v_list_itr, v_list, list_entry) { if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN) - return ICE_ERR_PARAM; + return -EINVAL; v_list_itr->fltr_info.flag = ICE_FLTR_TX; v_list_itr->status = ice_add_vlan_internal(hw, recp_list, v_list_itr); @@ -4439,7 +5189,6 @@ ice_add_vlan_rule(struct ice_hw *hw, struct list_head *v_list, return 0; } - /** * ice_add_vlan - Add a VLAN based filter rule * @hw: pointer to the hardware structure @@ -4447,10 +5196,10 @@ ice_add_vlan_rule(struct ice_hw *hw, struct list_head *v_list, * * Function add VLAN rule for logical port from HW struct */ -enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) +int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) { if (!v_list || !hw) - return ICE_ERR_PARAM; + return -EINVAL; return ice_add_vlan_rule(hw, v_list, hw->switch_info); } @@ -4467,7 +5216,7 @@ enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) * sure to add a VLAN only filter on the same VSI. Packets belonging to that * VLAN won't be received on that VSI otherwise. */ -static enum ice_status +static int ice_add_mac_vlan_rule(struct ice_hw *hw, struct list_head *mv_list, struct ice_switch_info *sw, u8 lport) { @@ -4475,7 +5224,7 @@ ice_add_mac_vlan_rule(struct ice_hw *hw, struct list_head *mv_list, struct ice_sw_recipe *recp_list; if (!mv_list || !hw) - return ICE_ERR_PARAM; + return -EINVAL; recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN]; list_for_each_entry(mv_list_itr, mv_list, list_entry) { @@ -4483,7 +5232,7 @@ ice_add_mac_vlan_rule(struct ice_hw *hw, struct list_head *mv_list, mv_list_itr->fltr_info.lkup_type; if (l_type != ICE_SW_LKUP_MAC_VLAN) - return ICE_ERR_PARAM; + return -EINVAL; mv_list_itr->fltr_info.flag = ICE_FLTR_TX; mv_list_itr->status = ice_add_rule_internal(hw, recp_list, lport, @@ -4494,7 +5243,6 @@ ice_add_mac_vlan_rule(struct ice_hw *hw, struct list_head *mv_list, return 0; } - /** * ice_add_mac_vlan - Add a MAC VLAN address based filter rule * @hw: pointer to the hardware structure @@ -4502,11 +5250,11 @@ ice_add_mac_vlan_rule(struct ice_hw *hw, struct list_head *mv_list, * * Function add MAC VLAN rule for logical port from HW struct */ -enum ice_status +int ice_add_mac_vlan(struct ice_hw *hw, struct list_head *mv_list) { if (!mv_list || !hw) - return ICE_ERR_PARAM; + return -EINVAL; return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info, hw->port_info->lport); @@ -4523,7 +5271,7 @@ ice_add_mac_vlan(struct ice_hw *hw, struct list_head *mv_list) * the filter list with the necessary fields (including flags to * indicate Tx or Rx rules). */ -static enum ice_status +static int ice_add_eth_mac_rule(struct ice_hw *hw, struct list_head *em_list, struct ice_switch_info *sw, u8 lport) { @@ -4538,7 +5286,7 @@ ice_add_eth_mac_rule(struct ice_hw *hw, struct list_head *em_list, if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && l_type != ICE_SW_LKUP_ETHERTYPE) - return ICE_ERR_PARAM; + return -EINVAL; em_list_itr->status = ice_add_rule_internal(hw, recp_list, lport, @@ -4549,7 +5297,6 @@ ice_add_eth_mac_rule(struct ice_hw *hw, struct list_head *em_list, return 0; } - /** * ice_add_eth_mac - Add a ethertype based filter rule * @hw: pointer to the hardware structure @@ -4557,11 +5304,11 @@ ice_add_eth_mac_rule(struct ice_hw *hw, struct list_head *em_list, * * Function add ethertype rule for logical port from HW struct */ -enum ice_status +int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list) { if (!em_list || !hw) - return ICE_ERR_PARAM; + return -EINVAL; return ice_add_eth_mac_rule(hw, em_list, hw->switch_info, hw->port_info->lport); @@ -4573,7 +5320,7 @@ ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list) * @em_list: list of ethertype or ethertype MAC entries * @sw: pointer to switch info struct for which function add rule */ -static enum ice_status +static int ice_remove_eth_mac_rule(struct ice_hw *hw, struct list_head *em_list, struct ice_switch_info *sw) { @@ -4587,7 +5334,7 @@ ice_remove_eth_mac_rule(struct ice_hw *hw, struct list_head *em_list, if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && l_type != ICE_SW_LKUP_ETHERTYPE) - return ICE_ERR_PARAM; + return -EINVAL; recp_list = &sw->recp_list[l_type]; em_list_itr->status = ice_remove_rule_internal(hw, recp_list, @@ -4598,22 +5345,97 @@ ice_remove_eth_mac_rule(struct ice_hw *hw, struct list_head *em_list, return 0; } - /** * ice_remove_eth_mac - remove a ethertype based filter rule * @hw: pointer to the hardware structure * @em_list: list of ethertype and forwarding information * */ -enum ice_status +int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list) { if (!em_list || !hw) - return ICE_ERR_PARAM; + return -EINVAL; return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info); } +/** + * ice_get_lg_act_aqc_res_type - get resource type for a large action + * @res_type: resource type to be filled in case of function success + * @num_acts: number of actions to hold with a large action entry + * + * Get resource type for a large action depending on the number + * of single actions that it contains. + */ +static int +ice_get_lg_act_aqc_res_type(u16 *res_type, int num_acts) +{ + if (!res_type) + return -EINVAL; + + /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1. + * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3. + * If num_acts is greater than 2, then use + * ICE_AQC_RES_TYPE_WIDE_TABLE_4. + * The num_acts cannot be equal to 0 or greater than 4. + */ + switch (num_acts) { + case 1: + *res_type = ICE_AQC_RES_TYPE_WIDE_TABLE_1; + break; + case 2: + *res_type = ICE_AQC_RES_TYPE_WIDE_TABLE_2; + break; + case 3: + case 4: + *res_type = ICE_AQC_RES_TYPE_WIDE_TABLE_4; + break; + default: + return -EINVAL; + } + + return 0; +} + +/** + * ice_alloc_res_lg_act - add large action resource + * @hw: pointer to the hardware structure + * @l_id: large action ID to fill it in + * @num_acts: number of actions to hold with a large action entry + */ +static int +ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts) +{ + struct ice_aqc_alloc_free_res_elem *sw_buf; + u16 buf_len, res_type; + int status; + + if (!l_id) + return -EINVAL; + + status = ice_get_lg_act_aqc_res_type(&res_type, num_acts); + if (status) + return status; + + /* Allocate resource for large action */ + buf_len = struct_size(sw_buf, elem, 1); + sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); + if (!sw_buf) + return -ENOMEM; + + sw_buf->res_type = cpu_to_le16(res_type); + sw_buf->num_elems = cpu_to_le16(1); + + status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, + ice_aqc_opc_alloc_res, NULL); + if (!status) + *l_id = le16_to_cpu(sw_buf->elem[0].e.sw_resp); + + devm_kfree(ice_hw_to_dev(hw), sw_buf); + + return status; +} /** * ice_rem_sw_rule_info @@ -4688,83 +5510,87 @@ void ice_rem_all_sw_rules_info(struct ice_hw *hw) * add filter rule to set/unset given VSI as default VSI for the switch * (represented by swid) */ -enum ice_status +int ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set, u8 direction) { - struct ice_aqc_sw_rules_elem *s_rule; + struct ice_fltr_list_entry f_list_entry; + struct ice_sw_recipe *recp_list; struct ice_fltr_info f_info; struct ice_hw *hw = pi->hw; - enum ice_adminq_opc opcode; - enum ice_status status; - u16 s_rule_size; + u8 lport = pi->lport; u16 hw_vsi_id; + int status; + + recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT]; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; + hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); - s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE : - ICE_SW_RULE_RX_TX_NO_HDR_SIZE; - - s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); - if (!s_rule) - return ICE_ERR_NO_MEMORY; - memset(&f_info, 0, sizeof(f_info)); f_info.lkup_type = ICE_SW_LKUP_DFLT; f_info.flag = direction; f_info.fltr_act = ICE_FWD_TO_VSI; f_info.fwd_id.hw_vsi_id = hw_vsi_id; + f_info.vsi_handle = vsi_handle; if (f_info.flag & ICE_FLTR_RX) { f_info.src = pi->lport; f_info.src_id = ICE_SRC_ID_LPORT; - if (!set) - f_info.fltr_rule_id = - pi->dflt_rx_vsi_rule_id; } else if (f_info.flag & ICE_FLTR_TX) { f_info.src_id = ICE_SRC_ID_VSI; f_info.src = hw_vsi_id; - if (!set) - f_info.fltr_rule_id = - pi->dflt_tx_vsi_rule_id; } + f_list_entry.fltr_info = f_info; if (set) - opcode = ice_aqc_opc_add_sw_rules; + status = ice_add_rule_internal(hw, recp_list, lport, + &f_list_entry); else - opcode = ice_aqc_opc_remove_sw_rules; + status = ice_remove_rule_internal(hw, recp_list, + &f_list_entry); - ice_fill_sw_rule(hw, &f_info, s_rule, opcode); + return status; +} - status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL); - if (status || !(f_info.flag & ICE_FLTR_TX_RX)) - goto out; - if (set) { - u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); +/** + * ice_check_if_dflt_vsi - check if VSI is default VSI + * @pi: pointer to the port_info structure + * @vsi_handle: vsi handle to check for in filter list + * @rule_exists: indicates if there are any VSI's in the rule list + * + * checks if the VSI is in a default VSI list, and also indicates + * if the default VSI list is empty + */ +bool ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, + bool *rule_exists) +{ + struct ice_fltr_mgmt_list_entry *fm_entry; + struct list_head *rule_head; + struct ice_sw_recipe *recp_list; + struct mutex *rule_lock; + bool ret = false; + recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT]; + rule_lock = &recp_list->filt_rule_lock; + rule_head = &recp_list->filt_rules; - if (f_info.flag & ICE_FLTR_TX) { - pi->dflt_tx_vsi_num = hw_vsi_id; - pi->dflt_tx_vsi_rule_id = index; - } else if (f_info.flag & ICE_FLTR_RX) { - pi->dflt_rx_vsi_num = hw_vsi_id; - pi->dflt_rx_vsi_rule_id = index; - } - } else { - if (f_info.flag & ICE_FLTR_TX) { - pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL; - pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT; - } else if (f_info.flag & ICE_FLTR_RX) { - pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; - pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT; + mutex_lock(rule_lock); + + if (rule_exists && !list_empty(rule_head)) + *rule_exists = true; + + list_for_each_entry(fm_entry, rule_head, list_entry) { + if (ice_vsi_uses_fltr(fm_entry, vsi_handle)) { + ret = true; + break; } } -out: - devm_kfree(ice_hw_to_dev(hw), s_rule); - return status; + mutex_unlock(rule_lock); + return ret; } /** @@ -4804,12 +5630,12 @@ ice_find_ucast_rule_entry(struct list_head *list_head, * This function removes either a MAC filter rule or a specific VSI from a * VSI list for a multicast MAC address. * - * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by + * Returns -ENOENT if a given entry was not added by * ice_add_mac. Caller should be aware that this call will only work if all * the entries passed into m_list were added previously. It will not attempt to * do a partial remove of entries that were found. */ -static enum ice_status +static int ice_remove_mac_rule(struct ice_hw *hw, struct list_head *m_list, struct ice_sw_recipe *recp_list) { @@ -4817,7 +5643,7 @@ ice_remove_mac_rule(struct ice_hw *hw, struct list_head *m_list, struct mutex *rule_lock; /* Lock to protect filter rule list */ if (!m_list) - return ICE_ERR_PARAM; + return -EINVAL; rule_lock = &recp_list->filt_rule_lock; list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) { @@ -4826,11 +5652,11 @@ ice_remove_mac_rule(struct ice_hw *hw, struct list_head *m_list, u16 vsi_handle; if (l_type != ICE_SW_LKUP_MAC) - return ICE_ERR_PARAM; + return -EINVAL; vsi_handle = list_itr->fltr_info.vsi_handle; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; list_itr->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); @@ -4843,7 +5669,7 @@ ice_remove_mac_rule(struct ice_hw *hw, struct list_head *m_list, if (!ice_find_ucast_rule_entry(&recp_list->filt_rules, &list_itr->fltr_info)) { mutex_unlock(rule_lock); - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } mutex_unlock(rule_lock); } @@ -4861,7 +5687,7 @@ ice_remove_mac_rule(struct ice_hw *hw, struct list_head *m_list, * @m_list: list of MAC addresses and forwarding information * */ -enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) +int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) { struct ice_sw_recipe *recp_list; @@ -4869,14 +5695,13 @@ enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) return ice_remove_mac_rule(hw, m_list, recp_list); } - /** * ice_remove_vlan_rule - Remove VLAN based filter rule * @hw: pointer to the hardware structure * @v_list: list of VLAN entries and forwarding information * @recp_list: list from which function remove VLAN */ -static enum ice_status +static int ice_remove_vlan_rule(struct ice_hw *hw, struct list_head *v_list, struct ice_sw_recipe *recp_list) { @@ -4886,7 +5711,7 @@ ice_remove_vlan_rule(struct ice_hw *hw, struct list_head *v_list, enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type; if (l_type != ICE_SW_LKUP_VLAN) - return ICE_ERR_PARAM; + return -EINVAL; v_list_itr->status = ice_remove_rule_internal(hw, recp_list, v_list_itr); if (v_list_itr->status) @@ -4895,20 +5720,19 @@ ice_remove_vlan_rule(struct ice_hw *hw, struct list_head *v_list, return 0; } - /** * ice_remove_vlan - remove a VLAN address based filter rule * @hw: pointer to the hardware structure * @v_list: list of VLAN and forwarding information * */ -enum ice_status +int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list) { struct ice_sw_recipe *recp_list; if (!v_list || !hw) - return ICE_ERR_PARAM; + return -EINVAL; recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN]; return ice_remove_vlan_rule(hw, v_list, recp_list); @@ -4920,7 +5744,7 @@ ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list) * @v_list: list of MAC VLAN entries and forwarding information * @recp_list: list from which function remove MAC VLAN */ -static enum ice_status +static int ice_remove_mac_vlan_rule(struct ice_hw *hw, struct list_head *v_list, struct ice_sw_recipe *recp_list) { @@ -4931,7 +5755,7 @@ ice_remove_mac_vlan_rule(struct ice_hw *hw, struct list_head *v_list, enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type; if (l_type != ICE_SW_LKUP_MAC_VLAN) - return ICE_ERR_PARAM; + return -EINVAL; v_list_itr->status = ice_remove_rule_internal(hw, recp_list, v_list_itr); @@ -4941,25 +5765,23 @@ ice_remove_mac_vlan_rule(struct ice_hw *hw, struct list_head *v_list, return 0; } - /** * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule * @hw: pointer to the hardware structure * @mv_list: list of MAC VLAN and forwarding information */ -enum ice_status +int ice_remove_mac_vlan(struct ice_hw *hw, struct list_head *mv_list) { struct ice_sw_recipe *recp_list; if (!mv_list || !hw) - return ICE_ERR_PARAM; + return -EINVAL; recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN]; return ice_remove_mac_vlan_rule(hw, mv_list, recp_list); } - /** * ice_vsi_uses_fltr - Determine if given VSI uses specified filter * @fm_entry: filter entry to inspect @@ -4988,7 +5810,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle) * fltr_info.fwd_id fields. These are set such that later logic can * extract which VSI to remove the fltr from, and pass on that information. */ -static enum ice_status +static int ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, struct list_head *vsi_list_head, struct ice_fltr_info *fi) @@ -5000,7 +5822,7 @@ ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, */ tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL); if (!tmp) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; tmp->fltr_info = *fi; @@ -5031,17 +5853,17 @@ ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, * Note that this means all entries in vsi_list_head must be explicitly * deallocated by the caller when done with list. */ -static enum ice_status +static int ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, struct list_head *lkup_list_head, struct list_head *vsi_list_head) { struct ice_fltr_mgmt_list_entry *fm_entry; - enum ice_status status = 0; + int status = 0; /* check to make sure VSI ID is valid and within boundary */ if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; list_for_each_entry(fm_entry, lkup_list_head, list_entry) { if (!ice_vsi_uses_fltr(fm_entry, vsi_handle)) @@ -5056,7 +5878,6 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, return status; } - /** * ice_determine_promisc_mask * @fi: filter info to parse @@ -5090,7 +5911,6 @@ static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi) return promisc_mask; } - /** * _ice_get_vsi_promisc - get promiscuous mode of given VSI * @hw: pointer to the hardware structure @@ -5098,22 +5918,25 @@ static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi) * @promisc_mask: pointer to mask to be filled in * @vid: VLAN ID of promisc VLAN VSI * @sw: pointer to switch info struct for which function add rule + * @lkup: switch rule filter lookup type */ -static enum ice_status +static int _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask, - u16 *vid, struct ice_switch_info *sw) + u16 *vid, struct ice_switch_info *sw, + enum ice_sw_lkup_type lkup) { struct ice_fltr_mgmt_list_entry *itr; struct list_head *rule_head; struct mutex *rule_lock; /* Lock to protect filter rule list */ - if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + if (!ice_is_vsi_valid(hw, vsi_handle) || + (lkup != ICE_SW_LKUP_PROMISC && lkup != ICE_SW_LKUP_PROMISC_VLAN)) + return -EINVAL; *vid = 0; *promisc_mask = 0; - rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules; - rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock; + rule_head = &sw->recp_list[lkup].filt_rules; + rule_lock = &sw->recp_list[lkup].filt_rule_lock; mutex_lock(rule_lock); list_for_each_entry(itr, rule_head, list_entry) { @@ -5137,51 +5960,12 @@ _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask, * @promisc_mask: pointer to mask to be filled in * @vid: VLAN ID of promisc VLAN VSI */ -enum ice_status +int ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask, u16 *vid) { return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask, - vid, hw->switch_info); -} - -/** - * _ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI - * @hw: pointer to the hardware structure - * @vsi_handle: VSI handle to retrieve info from - * @promisc_mask: pointer to mask to be filled in - * @vid: VLAN ID of promisc VLAN VSI - * @sw: pointer to switch info struct for which function add rule - */ -static enum ice_status -_ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask, - u16 *vid, struct ice_switch_info *sw) -{ - struct ice_fltr_mgmt_list_entry *itr; - struct list_head *rule_head; - struct mutex *rule_lock; /* Lock to protect filter rule list */ - - if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; - - *vid = 0; - *promisc_mask = 0; - rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules; - rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock; - - mutex_lock(rule_lock); - list_for_each_entry(itr, rule_head, list_entry) { - /* Continue if this filter doesn't apply to this VSI or the - * VSI ID is not in the VSI map for this filter - */ - if (!ice_vsi_uses_fltr(itr, vsi_handle)) - continue; - - *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info); - } - mutex_unlock(rule_lock); - - return 0; + vid, hw->switch_info, ICE_SW_LKUP_PROMISC); } /** @@ -5191,12 +5975,13 @@ _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask, * @promisc_mask: pointer to mask to be filled in * @vid: VLAN ID of promisc VLAN VSI */ -enum ice_status +int ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask, u16 *vid) { - return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask, - vid, hw->switch_info); + return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask, + vid, hw->switch_info, + ICE_SW_LKUP_PROMISC_VLAN); } /** @@ -5205,7 +5990,7 @@ ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask, * @recp_id: recipe ID for which the rule needs to removed * @v_list: list of promisc entries */ -static enum ice_status +static int ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list) { @@ -5230,7 +6015,7 @@ ice_remove_promisc(struct ice_hw *hw, u8 recp_id, * @vid: VLAN ID to clear VLAN promiscuous * @sw: pointer to switch info struct for which function add rule */ -static enum ice_status +static int _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid, struct ice_switch_info *sw) { @@ -5239,11 +6024,11 @@ _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, struct ice_fltr_mgmt_list_entry *itr; struct list_head *rule_head; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status status = 0; + int status = 0; u8 recipe_id; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) recipe_id = ICE_SW_LKUP_PROMISC_VLAN; @@ -5302,7 +6087,7 @@ free_fltr_list: * @promisc_mask: mask of promiscuous config bits to clear * @vid: VLAN ID to clear VLAN promiscuous */ -enum ice_status +int ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) { @@ -5319,21 +6104,21 @@ ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, * @lport: logical port number to configure promisc mode * @sw: pointer to switch info struct for which function add rule */ -static enum ice_status +static int _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid, u8 lport, struct ice_switch_info *sw) { enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR }; struct ice_fltr_list_entry f_list_entry; struct ice_fltr_info new_fltr; - enum ice_status status = 0; bool is_tx_fltr; + int status = 0; u16 hw_vsi_id; int pkt_type; u8 recipe_id; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); memset(&new_fltr, 0, sizeof(new_fltr)); @@ -5435,7 +6220,7 @@ set_promisc_exit: * @promisc_mask: mask of promiscuous config bits * @vid: VLAN ID to set VLAN promiscuous */ -enum ice_status +int ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) { @@ -5455,7 +6240,7 @@ ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, * * Configure VSI with all associated VLANs to given promiscuous mode(s) */ -static enum ice_status +static int _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, bool rm_vlan_promisc, u8 lport, struct ice_switch_info *sw) @@ -5464,7 +6249,7 @@ _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, struct list_head vsi_list_head; struct list_head *vlan_head; struct mutex *vlan_lock; /* Lock to protect filter rule list */ - enum ice_status status; + int status; u16 vlan_id; INIT_LIST_HEAD(&vsi_list_head); @@ -5478,6 +6263,13 @@ _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, goto free_fltr_list; list_for_each_entry(list_itr, &vsi_list_head, list_entry) { + /* Avoid enabling or disabling vlan zero twice when in double + * vlan mode + */ + if (ice_is_dvm_ena(hw) && + list_itr->fltr_info.l_data.vlan.tpid == 0) + continue; + vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id; if (rm_vlan_promisc) status = _ice_clear_vsi_promisc(hw, vsi_handle, @@ -5487,7 +6279,7 @@ _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, status = _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vlan_id, lport, sw); - if (status) + if (status && status != -EEXIST) break; } @@ -5508,7 +6300,7 @@ free_fltr_list: * * Configure VSI with all associated VLANs to given promiscuous mode(s) */ -enum ice_status +int ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, bool rm_vlan_promisc) { @@ -5534,7 +6326,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle, struct list_head *rule_head; struct ice_fltr_list_entry *tmp; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status status; + int status; INIT_LIST_HEAD(&remove_list_head); rule_lock = &recp_list[lkup].filt_rule_lock; @@ -5555,7 +6347,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle, break; case ICE_SW_LKUP_PROMISC: case ICE_SW_LKUP_PROMISC_VLAN: - ice_remove_promisc(hw, lkup, &remove_list_head); + ice_remove_promisc(hw, (u8)lkup, &remove_list_head); break; case ICE_SW_LKUP_MAC_VLAN: ice_remove_mac_vlan(hw, &remove_list_head); @@ -5607,7 +6399,6 @@ ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle, sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN); } - /** * ice_remove_vsi_fltr - Remove all filters for a VSI * @hw: pointer to the hardware structure @@ -5626,19 +6417,19 @@ void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle) * @num_items: number of entries requested for FD resource type * @counter_id: counter index returned by AQ call */ -enum ice_status +int ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 *counter_id) { struct ice_aqc_alloc_free_res_elem *buf; - enum ice_status status; u16 buf_len; + int status; /* Allocate resource */ buf_len = struct_size(buf, elem, 1); buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; buf->num_elems = cpu_to_le16(num_items); buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & @@ -5664,19 +6455,19 @@ exit: * @num_items: number of entries to be freed for FD resource type * @counter_id: counter ID resource which needs to be freed */ -enum ice_status +int ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 counter_id) { struct ice_aqc_alloc_free_res_elem *buf; - enum ice_status status; u16 buf_len; + int status; /* Free resource */ buf_len = struct_size(buf, elem, 1); buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; buf->num_elems = cpu_to_le16(num_items); buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & @@ -5697,7 +6488,7 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, * @hw: pointer to the hardware structure * @counter_id: returns counter index */ -enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id) +int ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id) { return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER, ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, @@ -5709,67 +6500,20 @@ enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id) * @hw: pointer to the hardware structure * @counter_id: counter index to be freed */ -enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id) +int ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id) { return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER, ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, counter_id); } -/** - * ice_alloc_res_lg_act - add large action resource - * @hw: pointer to the hardware structure - * @l_id: large action ID to fill it in - * @num_acts: number of actions to hold with a large action entry - */ -static enum ice_status -ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts) -{ - struct ice_aqc_alloc_free_res_elem *sw_buf; - enum ice_status status; - u16 buf_len; - - if (num_acts > ICE_MAX_LG_ACT || num_acts == 0) - return ICE_ERR_PARAM; - - /* Allocate resource for large action */ - buf_len = struct_size(sw_buf, elem, 1); - sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); - if (!sw_buf) - return ICE_ERR_NO_MEMORY; - - sw_buf->num_elems = cpu_to_le16(1); - - /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1. - * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3. - * If num_acts is greater than 2, then use - * ICE_AQC_RES_TYPE_WIDE_TABLE_4. - * The num_acts cannot exceed 4. This was ensured at the - * beginning of the function. - */ - if (num_acts == 1) - sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_WIDE_TABLE_1); - else if (num_acts == 2) - sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_WIDE_TABLE_2); - else - sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_WIDE_TABLE_4); - - status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, - ice_aqc_opc_alloc_res, NULL); - if (!status) - *l_id = le16_to_cpu(sw_buf->elem[0].e.sw_resp); - - devm_kfree(ice_hw_to_dev(hw), sw_buf); - return status; -} - /** * ice_add_mac_with_sw_marker - add filter with sw marker * @hw: pointer to the hardware structure * @f_info: filter info structure containing the MAC filter information * @sw_marker: sw marker to tag the Rx descriptor with */ -enum ice_status +int ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info, u16 sw_marker) { @@ -5778,21 +6522,21 @@ ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info, struct ice_sw_recipe *recp_list; struct list_head l_head; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status ret; bool entry_exists; u16 lg_act_id; + int ret; if (f_info->fltr_act != ICE_FWD_TO_VSI) - return ICE_ERR_PARAM; + return -EINVAL; if (f_info->lkup_type != ICE_SW_LKUP_MAC) - return ICE_ERR_PARAM; + return -EINVAL; if (sw_marker == ICE_INVAL_SW_MARKER_ID) - return ICE_ERR_PARAM; + return -EINVAL; if (!ice_is_vsi_valid(hw, f_info->vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle); /* Add filter if it doesn't exist so then the adding of large @@ -5806,7 +6550,7 @@ ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info, entry_exists = false; ret = ice_add_mac_rule(hw, &l_head, hw->switch_info, hw->port_info->lport); - if (ret == ICE_ERR_ALREADY_EXISTS) + if (ret == -EEXIST) entry_exists = true; else if (ret) return ret; @@ -5823,13 +6567,13 @@ ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info, * sw marker large action */ if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) { - ret = ICE_ERR_PARAM; + ret = -EINVAL; goto exit_error; } /* if same marker was added before */ if (m_entry->sw_marker_id == sw_marker) { - ret = ICE_ERR_ALREADY_EXISTS; + ret = -EEXIST; goto exit_error; } @@ -5865,7 +6609,7 @@ exit_error: * @f_info: pointer to filter info structure containing the MAC filter * information */ -enum ice_status +int ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info) { struct ice_fltr_mgmt_list_entry *m_entry; @@ -5873,19 +6617,19 @@ ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info) struct ice_sw_recipe *recp_list; struct list_head l_head; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status ret; bool entry_exist; u16 counter_id; u16 lg_act_id; + int ret; if (f_info->fltr_act != ICE_FWD_TO_VSI) - return ICE_ERR_PARAM; + return -EINVAL; if (f_info->lkup_type != ICE_SW_LKUP_MAC) - return ICE_ERR_PARAM; + return -EINVAL; if (!ice_is_vsi_valid(hw, f_info->vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle); recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC]; @@ -5903,7 +6647,7 @@ ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info) ret = ice_add_mac_rule(hw, &l_head, hw->switch_info, hw->port_info->lport); - if (ret == ICE_ERR_ALREADY_EXISTS) + if (ret == -EEXIST) entry_exist = true; else if (ret) return ret; @@ -5911,19 +6655,19 @@ ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info) mutex_lock(rule_lock); m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info); if (!m_entry) { - ret = ICE_ERR_BAD_PTR; + ret = -EINVAL; goto exit_error; } /* Don't enable counter for a filter for which sw marker was enabled */ if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) { - ret = ICE_ERR_PARAM; + ret = -EINVAL; goto exit_error; } /* If a counter was already enabled then don't need to add again */ if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) { - ret = ICE_ERR_ALREADY_EXISTS; + ret = -EEXIST; goto exit_error; } @@ -5971,7 +6715,8 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = { { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } }, { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } }, { ICE_ETYPE_OL, { 0 } }, - { ICE_VLAN_OFOS, { 0, 2 } }, + { ICE_ETYPE_IL, { 0 } }, + { ICE_VLAN_OFOS, { 2, 0 } }, { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } }, { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } }, { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, @@ -5987,6 +6732,10 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = { { ICE_VXLAN_GPE, { 8, 10, 12, 14 } }, { ICE_NVGRE, { 0, 2, 4, 6 } }, { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20, 22 } }, + { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } }, + { ICE_PPPOE, { 0, 2, 4, 6 } }, + { ICE_VLAN_EX, { 2, 0 } }, + { ICE_VLAN_IN, { 2, 0 } }, }; /* The following table describes preferred grouping of recipes. @@ -5999,6 +6748,7 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { { ICE_MAC_OFOS, ICE_MAC_OFOS_HW }, { ICE_MAC_IL, ICE_MAC_IL_HW }, { ICE_ETYPE_OL, ICE_ETYPE_OL_HW }, + { ICE_ETYPE_IL, ICE_ETYPE_IL_HW }, { ICE_VLAN_OFOS, ICE_VLAN_OL_HW }, { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW }, { ICE_IPV4_IL, ICE_IPV4_IL_HW }, @@ -6013,16 +6763,29 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { { ICE_VXLAN_GPE, ICE_UDP_OF_HW }, { ICE_NVGRE, ICE_GRE_OF_HW }, { ICE_GTP, ICE_UDP_OF_HW }, + { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW }, + { ICE_PPPOE, ICE_PPPOE_HW }, + { ICE_PFCP, ICE_UDP_ILOS_HW }, + { ICE_L2TPV3, ICE_L2TPV3_HW }, + { ICE_ESP, ICE_ESP_HW }, + { ICE_AH, ICE_AH_HW }, + { ICE_NAT_T, ICE_UDP_ILOS_HW }, + { ICE_VLAN_EX, ICE_VLAN_OF_HW }, + { ICE_VLAN_IN, ICE_VLAN_OL_HW }, + { ICE_FLG_DIR, ICE_META_DATA_ID_HW}, }; /** * ice_find_recp - find a recipe * @hw: pointer to the hardware structure * @lkup_exts: extension sequence to match + * @tun_type: tunnel type of switch filter + * @priority: priority of switch filter * * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found. */ -static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts) +static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, + enum ice_sw_tunnel_type tun_type, u32 priority) { bool refresh_required = true; struct ice_sw_recipe *recp; @@ -6084,7 +6847,8 @@ static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts) /* If for "i"th recipe the found was never set to false * then it means we found our match */ - if (found) + if (tun_type == recp[i].tun_type && found && + priority == recp[i].priority) return i; /* Return the recipe ID */ } } @@ -6166,8 +6930,6 @@ ice_fill_valid_words(struct ice_adv_lkup_elem *rule, return ret_val; } - - /** * ice_create_first_fit_recp_def - Create a recipe grouping * @hw: pointer to the hardware structure @@ -6179,7 +6941,7 @@ ice_fill_valid_words(struct ice_adv_lkup_elem *rule, * and start grouping them in 4-word groups. Each group makes up one * recipe. */ -static enum ice_status +static int ice_create_first_fit_recp_def(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, struct list_head *rg_list, @@ -6188,9 +6950,21 @@ ice_create_first_fit_recp_def(struct ice_hw *hw, struct ice_pref_recipe_group *grp = NULL; u8 j; - *recp_cnt = 0; + if (!lkup_exts->n_val_words) { + struct ice_recp_grp_entry *entry; + + entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*entry), + GFP_KERNEL); + if (!entry) + return -ENOMEM; + list_add(&entry->l_entry, rg_list); + grp = &entry->r_group; + (*recp_cnt)++; + grp->n_val_pairs = 0; + } + /* Walk through every word in the rule to check if it is not done. If so * then this word needs to be part of a new recipe. */ @@ -6204,7 +6978,7 @@ ice_create_first_fit_recp_def(struct ice_hw *hw, sizeof(*entry), GFP_KERNEL); if (!entry) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; list_add(&entry->l_entry, rg_list); grp = &entry->r_group; (*recp_cnt)++; @@ -6230,7 +7004,7 @@ ice_create_first_fit_recp_def(struct ice_hw *hw, * Helper function to fill in the field vector indices for protocol-offset * pairs. These indexes are then ultimately programmed into a recipe. */ -static enum ice_status +static int ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list, struct list_head *rg_list) { @@ -6272,7 +7046,7 @@ ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list, * invalid pair */ if (!found) - return ICE_ERR_PARAM; + return -EINVAL; } } @@ -6348,26 +7122,36 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles, return (u16) bitmap_weight(free_idx, ICE_MAX_FV_WORDS); } +static void ice_set_recipe_index(unsigned long idx, u8 *bitmap) +{ + u32 byte = idx / BITS_PER_BYTE; + u32 bit = idx % BITS_PER_BYTE; + + if (byte >= 8) + return; + + bitmap[byte] |= 1 << bit; +} + /** * ice_add_sw_recipe - function to call AQ calls to create switch recipe * @hw: pointer to hardware structure * @rm: recipe management list entry - * @match_tun_mask: tunnel mask that needs to be programmed * @profiles: bitmap of profiles that will be associated. */ -static enum ice_status +static int ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, - u16 match_tun_mask, unsigned long *profiles) + unsigned long *profiles) { DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS); struct ice_aqc_recipe_data_elem *tmp; struct ice_aqc_recipe_data_elem *buf; struct ice_recp_grp_entry *entry; - enum ice_status status; u16 free_res_idx; u16 recipe_count; u8 chain_idx; u8 recps = 0; + int status; /* When more than one recipe are required, another recipe is needed to * chain them together. Matching a tunnel metadata ID takes up one of @@ -6383,23 +7167,23 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, if (rm->n_grp_count > 1) { if (rm->n_grp_count > free_res_idx) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; rm->n_grp_count++; } if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; tmp = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); if (!tmp) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf), GFP_KERNEL); if (!buf) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_mem; } @@ -6413,7 +7197,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, /* Allocate the recipe resources, and configure them according to the * match fields from protocol headers and extracted field vectors. */ - chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS); + chain_idx = (u8) find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS); list_for_each_entry(entry, &rm->rg_list, l_entry) { u8 i; @@ -6448,7 +7232,8 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, } for (i = 0; i < entry->r_group.n_val_pairs; i++) { - buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i]; + buf[recps].content.lkup_indx[i + 1] = + (u8)entry->fv_idx[i]; buf[recps].content.mask[i + 1] = cpu_to_le16(entry->fv_mask[i]); } @@ -6459,7 +7244,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, */ if (chain_idx >= ICE_MAX_FV_WORDS) { ice_debug(hw, ICE_DBG_SW, "No chain index available\n"); - status = ICE_ERR_MAX_LIMIT; + status = -ENOSPC; goto err_unroll; } @@ -6469,15 +7254,15 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) & ICE_AQ_RECIPE_RESULT_DATA_M); clear_bit(chain_idx, result_idx_bm); - chain_idx = find_first_bit(result_idx_bm, - ICE_MAX_FV_WORDS); + chain_idx = (u8) find_first_bit(result_idx_bm, + ICE_MAX_FV_WORDS); } /* fill recipe dependencies */ - bitmap_zero((unsigned long *)buf[recps].recipe_bitmap, - ICE_MAX_NUM_RECIPES); - set_bit(buf[recps].recipe_indx, - (unsigned long *)buf[recps].recipe_bitmap); + memset(buf[recps].recipe_bitmap, 0, + sizeof(buf[recps].recipe_bitmap)); + ice_set_recipe_index(buf[recps].recipe_indx, + buf[recps].recipe_bitmap); buf[recps].content.act_ctrl_fwd_priority = rm->priority; recps++; } @@ -6490,7 +7275,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, memcpy(buf[0].recipe_bitmap, rm->r_bitmap, sizeof(buf[0].recipe_bitmap)); } else { - status = ICE_ERR_BAD_PTR; + status = -EINVAL; goto err_unroll; } /* Applicable only for ROOT_RECIPE, set the fwd_priority for @@ -6526,7 +7311,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, sizeof(*last_chain_entry), GFP_KERNEL); if (!last_chain_entry) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_unroll; } last_chain_entry->rid = rid; @@ -6550,7 +7335,6 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, */ last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND; list_for_each_entry(entry, &rm->rg_list, l_entry) { - last_chain_entry->fv_idx[i] = entry->chain_idx; buf[recps].content.lkup_indx[i] = entry->chain_idx; buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF); set_bit(entry->rid, rm->r_bitmap); @@ -6561,20 +7345,11 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, memcpy(buf[recps].recipe_bitmap, rm->r_bitmap, sizeof(buf[recps].recipe_bitmap)); } else { - status = ICE_ERR_BAD_PTR; + status = -EINVAL; goto err_unroll; } buf[recps].content.act_ctrl_fwd_priority = rm->priority; - /* To differentiate among different UDP tunnels, a meta data ID - * flag is used. - */ - if (match_tun_mask) { - buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND; - buf[recps].content.mask[i] = - cpu_to_le16(match_tun_mask); - } - recps++; rm->root_rid = (u8)rid; } @@ -6604,7 +7379,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, } if (!idx_found) { - status = ICE_ERR_OUT_OF_RANGE; + status = -EIO; goto err_unroll; } @@ -6612,7 +7387,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, is_root = (rm->root_rid == entry->rid); recp->is_root = is_root; - recp->root_rid = entry->rid; + recp->root_rid = (u8)entry->rid; recp->big_recp = (is_root && rm->n_grp_count > 1); memcpy(&recp->ext_words, entry->r_group.pairs, @@ -6657,12 +7432,12 @@ err_mem: * @rm: recipe management list entry * @lkup_exts: lookup elements */ -static enum ice_status +static int ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm, struct ice_prot_lkup_ext *lkup_exts) { - enum ice_status status; u8 recp_count = 0; + int status; rm->n_grp_count = 0; @@ -6688,45 +7463,27 @@ ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm, * @hw: pointer to hardware structure * @lkups: lookup elements or match criteria for the advanced recipe, one * structure per protocol header - * @lkups_cnt: number of protocols * @bm: bitmap of field vectors to consider * @fv_list: pointer to a list that holds the returned field vectors */ -static enum ice_status -ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, +static int +ice_get_fv(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups, unsigned long *bm, struct list_head *fv_list) { - enum ice_status status; - u8 *prot_ids; - u16 i; - - prot_ids = devm_kcalloc(ice_hw_to_dev(hw), lkups_cnt, - sizeof(*prot_ids), GFP_KERNEL); - if (!prot_ids) - return ICE_ERR_NO_MEMORY; - - for (i = 0; i < lkups_cnt; i++) - if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) { - status = ICE_ERR_CFG; - goto free_mem; - } - /* Find field vectors that include all specified protocol types */ - status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list); - -free_mem: - devm_kfree(ice_hw_to_dev(hw), prot_ids); - return status; + return ice_get_sw_fv_list(hw, lkups, bm, fv_list); } /** * ice_tun_type_match_word - determine if tun type needs a match mask - * @tun_type: tunnel type + * @rinfo: other information regarding the rule e.g. priority and action info + * @off: offset of packet flag * @mask: mask to be used for the tunnel */ -static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask) +static bool +ice_tun_type_match_word(struct ice_adv_rule_info *rinfo, u16 *off, u16 *mask) { - switch (tun_type) { + switch (rinfo->tun_type) { case ICE_SW_TUN_VXLAN_GPE: case ICE_SW_TUN_GENEVE: case ICE_SW_TUN_VXLAN: @@ -6754,16 +7511,31 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask) case ICE_SW_TUN_GTP_IPV4_UDP: case ICE_SW_TUN_GTP_IPV6_TCP: case ICE_SW_TUN_GTP_IPV6_UDP: + case ICE_SW_TUN_GTPU: + case ICE_SW_TUN_GTPC: *mask = ICE_TUN_FLAG_MASK; + *off = ICE_TUN_FLAG_MDID_OFF(1); return true; + case ICE_SW_TUN_AND_NON_TUN: + if (rinfo->add_dir_lkup) { + *mask = ICE_DIR_FLAG_MASK; + *off = ICE_TUN_FLAG_MDID_OFF(0); + return true; + } + *mask = 0; + *off = 0; + return false; + case ICE_SW_TUN_GENEVE_VLAN: case ICE_SW_TUN_VXLAN_VLAN: *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK; + *off = ICE_TUN_FLAG_MDID_OFF(1); return true; default: *mask = 0; + *off = 0; return false; } } @@ -6773,24 +7545,26 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask) * @rinfo: other information regarding the rule e.g. priority and action info * @lkup_exts: lookup word structure */ -static enum ice_status +static int ice_add_special_words(struct ice_adv_rule_info *rinfo, struct ice_prot_lkup_ext *lkup_exts) { u16 mask; + u16 off; /* If this is a tunneled packet, then add recipe index to match the - * tunnel bit in the packet metadata flags. + * tunnel bit in the packet metadata flags. If this is a tun_and_non_tun + * packet, then add recipe index to match the direction bit in the flag. */ - if (ice_tun_type_match_word(rinfo->tun_type, &mask)) { + if (ice_tun_type_match_word(rinfo, &off, &mask)) { if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) { u8 word = lkup_exts->n_val_words++; lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW; - lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF; + lkup_exts->fv_words[word].off = off; lkup_exts->field_mask[word] = mask; } else { - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; } } @@ -6847,10 +7621,28 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, case ICE_SW_TUN_GTP_IPV6_UDP: prof_type = ICE_PROF_TUN_UDP; break; + case ICE_SW_TUN_GTPU: + prof_type = ICE_PROF_TUN_GTPU; + break; + case ICE_SW_TUN_GTPC: + prof_type = ICE_PROF_TUN_GTPC; + break; case ICE_SW_TUN_NVGRE: prof_type = ICE_PROF_TUN_GRE; break; + case ICE_SW_IPV4_TCP: + set_bit(ICE_PROFID_IPV4_TCP, bm); + return; + case ICE_SW_IPV4_UDP: + set_bit(ICE_PROFID_IPV4_UDP, bm); + return; + case ICE_SW_IPV6_TCP: + set_bit(ICE_PROFID_IPV6_TCP, bm); + return; + case ICE_SW_IPV6_UDP: + set_bit(ICE_PROFID_IPV6_UDP, bm); + return; case ICE_SW_TUN_AND_NON_TUN: default: prof_type = ICE_PROF_ALL; @@ -6869,7 +7661,7 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, * @rinfo: other information regarding the rule e.g. priority and action info * @rid: return the recipe ID of the recipe created */ -static enum ice_status +int ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid) { @@ -6880,19 +7672,17 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, struct ice_sw_fv_list_entry *fvit; struct ice_recp_grp_entry *r_tmp; struct ice_sw_fv_list_entry *tmp; - enum ice_status status = 0; struct ice_sw_recipe *rm; - u16 match_tun_mask = 0; - u16 mask; + int status = 0; u8 i; if (!lkups_cnt) - return ICE_ERR_PARAM; + return -EINVAL; lkup_exts = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*lkup_exts), GFP_KERNEL); if (!lkup_exts) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Determine the number of words to be matched and if it exceeds a * recipe's restrictions @@ -6901,20 +7691,20 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 count; if (lkups[i].type >= ICE_PROTOCOL_LAST) { - status = ICE_ERR_CFG; + status = -EIO; goto err_free_lkup_exts; } count = ice_fill_valid_words(&lkups[i], lkup_exts); if (!count) { - status = ICE_ERR_CFG; + status = -EIO; goto err_free_lkup_exts; } } rm = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rm), GFP_KERNEL); if (!rm) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_free_lkup_exts; } @@ -6930,10 +7720,16 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, */ ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap); - status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list); + status = ice_get_fv(hw, lkup_exts, fv_bitmap, &rm->fv_list); if (status) goto err_unroll; + /* Create any special protocol/offset pairs, such as looking at tunnel + * bits by extracting metadata + */ + status = ice_add_special_words(rinfo, lkup_exts); + if (status) + goto err_free_lkup_exts; /* Group match words into recipes using preferred recipe grouping * criteria. @@ -6942,14 +7738,6 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, if (status) goto err_unroll; - /* For certain tunnel types it is necessary to use a metadata ID flag to - * differentiate different tunnel types. A separate recipe needs to be - * used for the metadata. - */ - if (ice_tun_type_match_word(rinfo->tun_type, &mask) && - rm->n_grp_count > 1) - match_tun_mask = mask; - /* set the recipe priority if specified */ rm->priority = (u8)rinfo->priority; @@ -6960,6 +7748,24 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, if (status) goto err_unroll; + /* An empty FV list means to use all the profiles returned in the + * profile bitmap + */ + if (list_empty(&rm->fv_list)) { + u16 j; + + for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) { + struct ice_sw_fv_list_entry *fvl; + + fvl = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fvl), + GFP_KERNEL); + if (!fvl) + goto err_unroll; + fvl->fv_ptr = NULL; + fvl->profile_id = j; + list_add(&fvl->list_entry, &rm->fv_list); + } + } /* get bitmap of all profiles the recipe will be associated with */ bitmap_zero(profiles, ICE_MAX_NUM_PROFILES); @@ -6968,21 +7774,15 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, set_bit((u16)fvit->profile_id, profiles); } - /* Create any special protocol/offset pairs, such as looking at tunnel - * bits by extracting metadata - */ - status = ice_add_special_words(rinfo, lkup_exts); - if (status) - goto err_free_lkup_exts; - /* Look for a recipe which matches our requested fv / mask list */ - *rid = ice_find_recp(hw, lkup_exts); + *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type, rinfo->priority); if (*rid < ICE_MAX_NUM_RECIPES) /* Success if found a recipe that match the existing criteria */ goto err_unroll; + rm->tun_type = rinfo->tun_type; /* Recipe we need does not exist, add a recipe */ - status = ice_add_sw_recipe(hw, rm, match_tun_mask, profiles); + status = ice_add_sw_recipe(hw, rm, profiles); if (status) goto err_unroll; @@ -7057,13 +7857,16 @@ err_free_lkup_exts: * @pkt_len: packet length of dummy packet * @offsets: pointer to receive the pointer to the offsets for the packet */ -static void +void ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, enum ice_sw_tunnel_type tun_type, const u8 **pkt, u16 *pkt_len, const struct ice_dummy_pkt_offsets **offsets) { - bool tcp = false, udp = false, ipv6 = false, vlan = false; + bool tcp = false, udp = false, outer_ipv6 = false, vlan = false; + bool inner_ipv6 = false, pppoe = false; + bool gtp_no_pay = false; + bool cvlan = false; u16 i; for (i = 0; i < lkups_cnt; i++) { @@ -7072,9 +7875,107 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, else if (lkups[i].type == ICE_TCP_IL) tcp = true; else if (lkups[i].type == ICE_IPV6_OFOS) - ipv6 = true; - else if (lkups[i].type == ICE_VLAN_OFOS) + outer_ipv6 = true; + else if (lkups[i].type == ICE_VLAN_OFOS || + lkups[i].type == ICE_VLAN_EX) vlan = true; + + else if (lkups[i].type == ICE_VLAN_IN) + cvlan = true; + else if (lkups[i].type == ICE_ETYPE_OL && + lkups[i].h_u.ethertype.ethtype_id == + cpu_to_be16(ICE_IPV6_ETHER_ID) && + lkups[i].m_u.ethertype.ethtype_id == + cpu_to_be16(0xFFFF)) + outer_ipv6 = true; + else if (lkups[i].type == ICE_ETYPE_IL && + lkups[i].h_u.ethertype.ethtype_id == + cpu_to_be16(ICE_IPV6_ETHER_ID) && + lkups[i].m_u.ethertype.ethtype_id == + cpu_to_be16(0xFFFF)) + inner_ipv6 = true; + else if (lkups[i].type == ICE_PPPOE) { + pppoe = true; + if (lkups[i].h_u.pppoe_hdr.ppp_prot_id == + cpu_to_be16(ICE_PPP_IPV6_PROTO_ID) && + lkups[i].m_u.pppoe_hdr.ppp_prot_id == + cpu_to_be16(0xFFFF)) + outer_ipv6 = true; + } + else if (lkups[i].type == ICE_IPV6_IL) + inner_ipv6 = true; + else if (lkups[i].type == ICE_GTP_NO_PAY) + gtp_no_pay = true; + } + + if (cvlan && vlan) { + if (outer_ipv6) { + if (tcp) { + *pkt = dummy_qinq_ipv6_tcp_pkt; + *pkt_len = sizeof(dummy_qinq_ipv6_tcp_pkt); + *offsets = dummy_qinq_ipv6_tcp_packet_offsets; + return; + } + + if (udp) { + *pkt = dummy_qinq_ipv6_udp_pkt; + *pkt_len = sizeof(dummy_qinq_ipv6_udp_pkt); + *offsets = dummy_qinq_ipv6_udp_packet_offsets; + return; + } + + *pkt = dummy_qinq_ipv6_pkt; + *pkt_len = sizeof(dummy_qinq_ipv6_pkt); + *offsets = dummy_qinq_ipv6_packet_offsets; + return; + } else { + if (tcp) { + *pkt = dummy_qinq_ipv4_tcp_pkt; + *pkt_len = sizeof(dummy_qinq_ipv4_tcp_pkt); + *offsets = dummy_qinq_ipv4_tcp_packet_offsets; + return; + } + + if (udp) { + *pkt = dummy_qinq_ipv4_udp_pkt; + *pkt_len = sizeof(dummy_qinq_ipv4_udp_pkt); + *offsets = dummy_qinq_ipv4_udp_packet_offsets; + return; + } + + *pkt = dummy_qinq_ipv4_pkt; + *pkt_len = sizeof(dummy_qinq_ipv4_pkt); + *offsets = dummy_qinq_ipv4_packet_offsets; + return; + } + } + + if (tun_type == ICE_SW_IPV4_TCP) { + *pkt = dummy_tcp_packet; + *pkt_len = sizeof(dummy_tcp_packet); + *offsets = dummy_tcp_packet_offsets; + return; + } + + if (tun_type == ICE_SW_IPV4_UDP) { + *pkt = dummy_udp_packet; + *pkt_len = sizeof(dummy_udp_packet); + *offsets = dummy_udp_packet_offsets; + return; + } + + if (tun_type == ICE_SW_IPV6_TCP) { + *pkt = dummy_tcp_ipv6_packet; + *pkt_len = sizeof(dummy_tcp_ipv6_packet); + *offsets = dummy_tcp_ipv6_packet_offsets; + return; + } + + if (tun_type == ICE_SW_IPV6_UDP) { + *pkt = dummy_udp_ipv6_packet; + *pkt_len = sizeof(dummy_udp_ipv6_packet); + *offsets = dummy_udp_ipv6_packet_offsets; + return; } /* figure out which dummy packet and dummy offset to use if user @@ -7170,6 +8071,86 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, return; } + if (tun_type == ICE_SW_TUN_GTPU) { + if (outer_ipv6) { + if (gtp_no_pay) { + *pkt = dummy_ipv6_gtp_packet; + *pkt_len = sizeof(dummy_ipv6_gtp_packet); + *offsets = dummy_ipv6_gtp_no_pay_packet_offsets; + return; + } + if (inner_ipv6 && udp) { + *pkt = dummy_ipv6_gtpu_ipv6_udp_packet; + *pkt_len = + sizeof(dummy_ipv6_gtpu_ipv6_udp_packet); + *offsets = + dummy_ipv6_gtpu_ipv6_udp_packet_offsets; + return; + } + if (inner_ipv6) { + *pkt = dummy_ipv6_gtpu_ipv6_tcp_packet; + *pkt_len = + sizeof(dummy_ipv6_gtpu_ipv6_tcp_packet); + *offsets = + dummy_ipv6_gtpu_ipv6_tcp_packet_offsets; + return; + } + if (udp) { + *pkt = dummy_ipv6_gtpu_ipv4_udp_packet; + *pkt_len = + sizeof(dummy_ipv6_gtpu_ipv4_udp_packet); + *offsets = + dummy_ipv6_gtpu_ipv4_udp_packet_offsets; + return; + } + *pkt = dummy_ipv6_gtpu_ipv4_tcp_packet; + *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_tcp_packet); + *offsets = dummy_ipv6_gtpu_ipv4_tcp_packet_offsets; + return; + } + if (gtp_no_pay) { + *pkt = dummy_ipv4_gtpu_ipv4_packet; + *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet); + *offsets = dummy_ipv4_gtp_no_pay_packet_offsets; + return; + } + if (inner_ipv6 && udp) { + *pkt = dummy_ipv4_gtpu_ipv6_udp_packet; + *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_udp_packet); + *offsets = dummy_ipv4_gtpu_ipv6_udp_packet_offsets; + return; + } + if (inner_ipv6) { + *pkt = dummy_ipv4_gtpu_ipv6_tcp_packet; + *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_tcp_packet); + *offsets = dummy_ipv4_gtpu_ipv6_tcp_packet_offsets; + return; + } + if (udp) { + *pkt = dummy_ipv4_gtpu_ipv4_udp_packet; + *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_udp_packet); + *offsets = dummy_ipv4_gtpu_ipv4_udp_packet_offsets; + return; + } + *pkt = dummy_ipv4_gtpu_ipv4_tcp_packet; + *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_tcp_packet); + *offsets = dummy_ipv4_gtpu_ipv4_tcp_packet_offsets; + return; + } + + if (tun_type == ICE_SW_TUN_GTPC) { + if (outer_ipv6) { + *pkt = dummy_ipv6_gtp_packet; + *pkt_len = sizeof(dummy_ipv6_gtp_packet); + *offsets = dummy_ipv6_gtp_no_pay_packet_offsets; + return; + } + *pkt = dummy_ipv4_gtpu_ipv4_packet; + *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet); + *offsets = dummy_ipv4_gtp_no_pay_packet_offsets; + return; + } + if (tun_type == ICE_ALL_TUNNELS) { *pkt = dummy_gre_udp_packet; *pkt_len = sizeof(dummy_gre_udp_packet); @@ -7178,6 +8159,13 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, } if (tun_type == ICE_SW_TUN_NVGRE) { + if (tcp && inner_ipv6) { + *pkt = dummy_gre_ipv6_tcp_packet; + *pkt_len = sizeof(dummy_gre_ipv6_tcp_packet); + *offsets = dummy_gre_ipv6_tcp_packet_offsets; + return; + } + if (tcp) { *pkt = dummy_gre_tcp_packet; *pkt_len = sizeof(dummy_gre_tcp_packet); @@ -7185,6 +8173,13 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, return; } + if (inner_ipv6) { + *pkt = dummy_gre_ipv6_udp_packet; + *pkt_len = sizeof(dummy_gre_ipv6_udp_packet); + *offsets = dummy_gre_ipv6_udp_packet_offsets; + return; + } + *pkt = dummy_gre_udp_packet; *pkt_len = sizeof(dummy_gre_udp_packet); *offsets = dummy_gre_udp_packet_offsets; @@ -7195,6 +8190,13 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP || tun_type == ICE_SW_TUN_GENEVE_VLAN || tun_type == ICE_SW_TUN_VXLAN_VLAN) { + if (tcp && inner_ipv6) { + *pkt = dummy_udp_tun_ipv6_tcp_packet; + *pkt_len = sizeof(dummy_udp_tun_ipv6_tcp_packet); + *offsets = dummy_udp_tun_ipv6_tcp_packet_offsets; + return; + } + if (tcp) { *pkt = dummy_udp_tun_tcp_packet; *pkt_len = sizeof(dummy_udp_tun_tcp_packet); @@ -7202,40 +8204,62 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, return; } + if (inner_ipv6) { + *pkt = dummy_udp_tun_ipv6_udp_packet; + *pkt_len = sizeof(dummy_udp_tun_ipv6_udp_packet); + *offsets = dummy_udp_tun_ipv6_udp_packet_offsets; + return; + } + *pkt = dummy_udp_tun_udp_packet; *pkt_len = sizeof(dummy_udp_tun_udp_packet); *offsets = dummy_udp_tun_udp_packet_offsets; return; } - if (udp && !ipv6) { + if (udp && !outer_ipv6) { if (vlan) { *pkt = dummy_vlan_udp_packet; *pkt_len = sizeof(dummy_vlan_udp_packet); *offsets = dummy_vlan_udp_packet_offsets; return; + } else if (pppoe) { + *pkt = dummy_pppoe_ipv4_udp_packet; + *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet); + *offsets = dummy_pppoe_ipv4_udp_packet_offsets; + return; } *pkt = dummy_udp_packet; *pkt_len = sizeof(dummy_udp_packet); *offsets = dummy_udp_packet_offsets; return; - } else if (udp && ipv6) { + } else if (udp && outer_ipv6) { if (vlan) { *pkt = dummy_vlan_udp_ipv6_packet; *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet); *offsets = dummy_vlan_udp_ipv6_packet_offsets; return; + } else if (pppoe) { + *pkt = dummy_pppoe_ipv6_udp_packet; + *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet); + *offsets = dummy_pppoe_ipv6_udp_packet_offsets; + return; } *pkt = dummy_udp_ipv6_packet; *pkt_len = sizeof(dummy_udp_ipv6_packet); *offsets = dummy_udp_ipv6_packet_offsets; return; - } else if ((tcp && ipv6) || ipv6) { + } else if ((tcp && outer_ipv6) || outer_ipv6) { if (vlan) { *pkt = dummy_vlan_tcp_ipv6_packet; *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet); *offsets = dummy_vlan_tcp_ipv6_packet_offsets; return; + } else if (pppoe) { + *pkt = dummy_pppoe_ipv6_tcp_packet; + *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet); + *offsets = dummy_pppoe_ipv6_tcp_packet_offsets; + return; } *pkt = dummy_tcp_ipv6_packet; *pkt_len = sizeof(dummy_tcp_ipv6_packet); @@ -7247,6 +8271,11 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, *pkt = dummy_vlan_tcp_packet; *pkt_len = sizeof(dummy_vlan_tcp_packet); *offsets = dummy_vlan_tcp_packet_offsets; + } else if (pppoe) { + *pkt = dummy_pppoe_ipv4_tcp_packet; + *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet); + *offsets = dummy_pppoe_ipv4_tcp_packet_offsets; + return; } else { *pkt = dummy_tcp_packet; *pkt_len = sizeof(dummy_tcp_packet); @@ -7265,7 +8294,7 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, * @pkt_len: packet length of dummy packet * @offsets: offset info for the dummy packet */ -static enum ice_status +int ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, struct ice_aqc_sw_rules_elem *s_rule, const u8 *dummy_pkt, u16 pkt_len, @@ -7299,7 +8328,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, } /* this should never happen in a correct calling sequence */ if (!found) - return ICE_ERR_PARAM; + return -EINVAL; switch (lkups[i].type) { case ICE_MAC_OFOS: @@ -7307,9 +8336,15 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, len = sizeof(struct ice_ether_hdr); break; case ICE_ETYPE_OL: + case ICE_ETYPE_IL: len = sizeof(struct ice_ethtype_hdr); break; + case ICE_PPPOE: + len = sizeof(struct ice_pppoe_hdr); + break; case ICE_VLAN_OFOS: + case ICE_VLAN_EX: + case ICE_VLAN_IN: len = sizeof(struct ice_vlan_hdr); break; case ICE_IPV4_OFOS: @@ -7338,15 +8373,16 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, break; case ICE_GTP: + case ICE_GTP_NO_PAY: len = sizeof(struct ice_udp_gtp_hdr); break; default: - return ICE_ERR_PARAM; + return -EINVAL; } /* the length should be a word multiple */ if (len % ICE_BYTES_PER_WORD) - return ICE_ERR_CFG; + return -EIO; /* We have the offset to the header start, the length, the * caller's header values and mask. Use this information to @@ -7356,15 +8392,23 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, * over any significant packet data. */ for (j = 0; j < len / sizeof(u16); j++) +#ifdef __CHECKER__ /* cppcheck-suppress objectIndex */ +#endif /* __CHECKER__ */ if (((u16 *)&lkups[i].m_u)[j]) ((u16 *)(pkt + offset))[j] = (((u16 *)(pkt + offset))[j] & +#ifdef __CHECKER__ /* cppcheck-suppress objectIndex */ +#endif /* __CHECKER__ */ ~((u16 *)&lkups[i].m_u)[j]) | +#ifdef __CHECKER__ /* cppcheck-suppress objectIndex */ +#endif /* __CHECKER__ */ (((u16 *)&lkups[i].h_u)[j] & +#ifdef __CHECKER__ /* cppcheck-suppress objectIndex */ +#endif /* __CHECKER__ */ ((u16 *)&lkups[i].m_u)[j]); } @@ -7380,7 +8424,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, * @pkt: dummy packet to fill in * @offsets: offset info for the dummy packet */ -static enum ice_status +static int ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type, u8 *pkt, const struct ice_dummy_pkt_offsets *offsets) { @@ -7393,13 +8437,13 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type, case ICE_SW_TUN_VXLAN_VLAN: case ICE_SW_TUN_UDP: if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port)) - return ICE_ERR_CFG; + return -EIO; break; case ICE_SW_TUN_GENEVE: case ICE_SW_TUN_GENEVE_VLAN: if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port)) - return ICE_ERR_CFG; + return -EIO; break; default: @@ -7421,7 +8465,7 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type, } } - return ICE_ERR_CFG; + return -EIO; } /** @@ -7436,7 +8480,7 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type, * Helper function to search for a given advance rule entry * Returns pointer to entry storing the rule if found */ -static struct ice_adv_fltr_mgmt_list_entry * +struct ice_adv_fltr_mgmt_list_entry * ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, u16 recp_id, struct ice_adv_rule_info *rinfo) @@ -7486,25 +8530,25 @@ ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, * Add the new VSI to the previously created VSI list set * using the update switch rule command */ -static enum ice_status +int ice_adv_add_update_vsi_list(struct ice_hw *hw, struct ice_adv_fltr_mgmt_list_entry *m_entry, struct ice_adv_rule_info *cur_fltr, struct ice_adv_rule_info *new_fltr) { - enum ice_status status; u16 vsi_list_id = 0; + int status; if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP || cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET) - return ICE_ERR_NOT_IMPL; + return -EOPNOTSUPP; if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) && (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI || cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)) - return ICE_ERR_NOT_IMPL; + return -EOPNOTSUPP; if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { /* Only one entry existed in the mapping and it was not already @@ -7517,7 +8561,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw, /* A rule already exists with the new VSI being added */ if (cur_fltr->sw_act.fwd_id.hw_vsi_id == new_fltr->sw_act.fwd_id.hw_vsi_id) - return ICE_ERR_ALREADY_EXISTS; + return -EEXIST; vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle; vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle; @@ -7550,7 +8594,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw, u16 vsi_handle = new_fltr->sw_act.vsi_handle; if (!m_entry->vsi_list_info) - return ICE_ERR_CFG; + return -EIO; /* A rule already exists with the new VSI being added */ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) @@ -7592,7 +8636,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw, * rinfo describes other information related to this rule such as forwarding * IDs, priority of this rule, etc. */ -enum ice_status +int ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, struct ice_adv_rule_info *rinfo, struct ice_rule_query_data *added_entry) @@ -7603,10 +8647,10 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, struct ice_aqc_sw_rules_elem *s_rule = NULL; struct list_head *rule_head; struct ice_switch_info *sw; - enum ice_status status; const u8 *pkt = NULL; u16 word_cnt; u32 act = 0; + int status; u8 q_rgn; /* Initialize profile to result index bitmap */ @@ -7616,7 +8660,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, } if (!lkups_cnt) - return ICE_ERR_PARAM; + return -EINVAL; /* get # of words we need to match */ word_cnt = 0; @@ -7625,19 +8669,24 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, ptr = (u16 *)&lkups[i].m_u; for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++) +#ifdef __CHECKER__ /* cppcheck-suppress objectIndex */ +#endif /* __CHECKER__ */ if (ptr[j] != 0) word_cnt++; } - if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS) - return ICE_ERR_PARAM; + if (!word_cnt) + return -EINVAL; + + if (word_cnt > ICE_MAX_CHAIN_WORDS) + return -ENOSPC; /* make sure that we can locate a dummy packet */ ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len, &pkt_offsets); if (!pkt) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto err_ice_add_adv_rule; } @@ -7645,11 +8694,11 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, rinfo->sw_act.fltr_act == ICE_FWD_TO_Q || rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP || rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) - return ICE_ERR_CFG; + return -EIO; vsi_handle = rinfo->sw_act.vsi_handle; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) rinfo->sw_act.fwd_id.hw_vsi_id = @@ -7683,8 +8732,13 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len; s_rule = devm_kzalloc(ice_hw_to_dev(hw), rule_buf_sz, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; - act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE; + return -ENOMEM; + if (!rinfo->flags_info.act_valid) + act |= ICE_SINGLE_ACT_LAN_ENABLE; + else + act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE | + ICE_SINGLE_ACT_LB_ENABLE); + switch (rinfo->sw_act.fltr_act) { case ICE_FWD_TO_VSI: act |= (rinfo->sw_act.fwd_id.hw_vsi_id << @@ -7710,7 +8764,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, ICE_SINGLE_ACT_VALID_BIT; break; default: - status = ICE_ERR_CFG; + status = -EIO; goto err_ice_add_adv_rule; } @@ -7756,14 +8810,19 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, sizeof(struct ice_adv_fltr_mgmt_list_entry), GFP_KERNEL); if (!adv_fltr) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_ice_add_adv_rule; } - adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups, - lkups_cnt * sizeof(*lkups), GFP_KERNEL); + if (lkups_cnt) { + adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups, + lkups_cnt * sizeof(*lkups), + GFP_KERNEL); + } else { + adv_fltr->lkups = NULL; + } if (!adv_fltr->lkups) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_ice_add_adv_rule; } @@ -7803,22 +8862,22 @@ err_ice_add_adv_rule: * @fm_list: filter management entry for which the VSI list management needs to * be done */ -static enum ice_status +static int ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, struct ice_adv_fltr_mgmt_list_entry *fm_list) { struct ice_vsi_list_map_info *vsi_list_info; enum ice_sw_lkup_type lkup_type; - enum ice_status status; u16 vsi_list_id; + int status; if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST || fm_list->vsi_count == 0) - return ICE_ERR_PARAM; + return -EINVAL; /* A rule with the VSI being removed does not exist */ if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; lkup_type = ICE_SW_LKUP_LAST; vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id; @@ -7838,7 +8897,7 @@ ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, ICE_MAX_VSI); if (!ice_is_vsi_valid(hw, rem_vsi_handle)) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; /* Make sure VSI list is empty before removing it below */ status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, @@ -7902,27 +8961,27 @@ ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, * header. rinfo describes other information related to this rule such as * forwarding IDs, priority of this rule, etc. */ -enum ice_status +int ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, struct ice_adv_rule_info *rinfo) { struct ice_adv_fltr_mgmt_list_entry *list_elem; struct ice_prot_lkup_ext lkup_exts; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status status = 0; bool remove_rule = false; u16 i, rid, vsi_handle; + int status = 0; memset(&lkup_exts, 0, sizeof(lkup_exts)); for (i = 0; i < lkups_cnt; i++) { u16 count; if (lkups[i].type >= ICE_PROTOCOL_LAST) - return ICE_ERR_CFG; + return -EIO; count = ice_fill_valid_words(&lkups[i], &lkup_exts); if (!count) - return ICE_ERR_CFG; + return -EIO; } /* Create any special protocol/offset pairs, such as looking at tunnel @@ -7932,10 +8991,10 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, if (status) return status; - rid = ice_find_recp(hw, &lkup_exts); + rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type, rinfo->priority); /* If did not find a recipe that match the existing criteria */ if (rid == ICE_MAX_NUM_RECIPES) - return ICE_ERR_PARAM; + return -EINVAL; rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock; list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo); @@ -7968,7 +9027,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, s_rule = devm_kzalloc(ice_hw_to_dev(hw), rule_buf_sz, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; s_rule->pdata.lkup_tx_rx.act = 0; s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(list_elem->rule_info.fltr_rule_id); @@ -7976,7 +9035,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, rule_buf_sz, 1, ice_aqc_opc_remove_sw_rules, NULL); - if (!status || status == ICE_ERR_DOES_NOT_EXIST) { + if (!status || status == -ENOENT) { struct ice_switch_info *sw = hw->switch_info; mutex_lock(rule_lock); @@ -8001,7 +9060,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, * the remove_entry parameter. This function will remove rule for a given * vsi_handle with a given rule_id which is passed as parameter in remove_entry */ -enum ice_status +int ice_rem_adv_rule_by_id(struct ice_hw *hw, struct ice_rule_query_data *remove_entry) { @@ -8012,7 +9071,7 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw, sw = hw->switch_info; if (!sw->recp_list[remove_entry->rid].recp_created) - return ICE_ERR_PARAM; + return -EINVAL; list_head = &sw->recp_list[remove_entry->rid].filt_rules; list_for_each_entry(list_itr, list_head, list_entry) { if (list_itr->rule_info.fltr_rule_id == @@ -8024,7 +9083,7 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw, } } /* either list is empty or unable to find rule */ - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } /** @@ -8035,16 +9094,16 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw, * * This function is used to remove all the rules for a given VSI and as soon * as removing a rule fails, it will return immediately with the error code, - * else it will return ICE_SUCCESS + * else it will return 0 */ -enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle) +int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle) { struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry; struct ice_vsi_list_map_info *map_info; struct list_head *list_head; struct ice_adv_rule_info rinfo; struct ice_switch_info *sw; - enum ice_status status; + int status; u8 rid; sw = hw->switch_info; @@ -8081,7 +9140,6 @@ enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle) return 0; } - /** * ice_replay_vsi_fltr - Replay filters for requested VSI * @hw: pointer to the hardware structure @@ -8094,14 +9152,14 @@ enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle) * Replays the filter of recipe recp_id for a VSI represented via vsi_handle. * It is required to pass valid VSI handle. */ -static enum ice_status +static int ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi, struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id, struct list_head *list_head) { struct ice_fltr_mgmt_list_entry *itr; - enum ice_status status = 0; struct ice_sw_recipe *recp_list; + int status = 0; u16 hw_vsi_id; if (list_empty(list_head)) @@ -8156,13 +9214,13 @@ end: * * Replay the advanced rule for the given VSI. */ -static enum ice_status +static int ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle, struct list_head *list_head) { struct ice_rule_query_data added_entry = { 0 }; struct ice_adv_fltr_mgmt_list_entry *adv_fltr; - enum ice_status status = 0; + int status = 0; if (list_empty(list_head)) return status; @@ -8188,14 +9246,16 @@ ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle, * * Replays filters for requested VSI via vsi_handle. */ -enum ice_status +int ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi, u16 vsi_handle) { - struct ice_switch_info *sw = hw->switch_info; - enum ice_status status; +struct ice_switch_info *sw; + int status; u8 i; + sw = hw->switch_info; + /* Update the recipes that were created */ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { struct list_head *head; diff --git a/drivers/thirdparty/ice/ice_switch.h b/drivers/thirdparty/ice/ice_switch.h index adc07b10c1dc..4f2e2e5289f8 100644 --- a/drivers/thirdparty/ice/ice_switch.h +++ b/drivers/thirdparty/ice/ice_switch.h @@ -4,7 +4,7 @@ #ifndef _ICE_SWITCH_H_ #define _ICE_SWITCH_H_ -#include "ice_common.h" +#include "ice_type.h" #include "ice_protocol_type.h" #define ICE_SW_CFG_MAX_BUF_LEN 2048 @@ -14,6 +14,18 @@ #define ICE_FLTR_TX BIT(1) #define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX) +/* Switch Profile IDs for Profile related switch rules */ +#define ICE_PROFID_IPV4_TCP 4 +#define ICE_PROFID_IPV4_UDP 5 +#define ICE_PROFID_IPV6_TCP 7 +#define ICE_PROFID_IPV6_UDP 8 +#define ICE_PROFID_IPV4_GTPC_TEID 41 +#define ICE_PROFID_IPV4_GTPC_NO_TEID 42 +#define ICE_PROFID_IPV4_GTPU_TEID 43 +#define ICE_PROFID_IPV6_GTPC_TEID 44 +#define ICE_PROFID_IPV6_GTPC_NO_TEID 45 +#define ICE_PROFID_IPV6_GTPU_TEID 46 +#define ICE_PROFID_IPV6_GTPU_IPV6_TCP 70 #define DUMMY_ETH_HDR_LEN 16 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \ @@ -29,7 +41,6 @@ (offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \ ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0]))) - /* Worst case buffer length for ice_aqc_opc_get_res_alloc */ #define ICE_MAX_RES_TYPES 0x80 #define ICE_AQ_GET_RES_ALLOC_BUF_LEN \ @@ -168,7 +179,6 @@ struct ice_adv_lkup_elem { union ice_prot_hdr m_u; /* Mask of header values to match */ }; - struct ice_sw_act_ctrl { /* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */ u16 src; @@ -198,12 +208,26 @@ struct ice_rule_query_data { u16 vsi_handle; }; +/* + * This structure allows to pass info about lb_en and lan_en + * flags to ice_add_adv_rule. Values in act would be used + * only if act_valid was set to true, otherwise dflt + * values would be used. + */ +struct ice_adv_rule_flags_info { + u32 act; + u8 act_valid; /* indicate if flags in act are valid */ +}; + struct ice_adv_rule_info { enum ice_sw_tunnel_type tun_type; struct ice_sw_act_ctrl sw_act; u32 priority; u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */ + u8 add_dir_lkup; u16 fltr_rule_id; + u16 lg_id; + struct ice_adv_rule_flags_info flags_info; }; /* A collection of one or more four word recipe */ @@ -283,11 +307,10 @@ struct ice_vsi_list_map_info { struct ice_fltr_list_entry { struct list_head list_entry; - enum ice_status status; + int status; struct ice_fltr_info fltr_info; }; - /* This defines an entry in the list that maintains MAC or VLAN membership * to HW list mapping, since multiple VSIs can subscribe to the same MAC or * VLAN. As an optimization the VSI list should be created only when a @@ -329,166 +352,203 @@ enum ice_promisc_flags { ICE_PROMISC_VLAN_TX = 0x80, }; +struct ice_dummy_pkt_offsets { + enum ice_protocol_type type; + u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */ +}; + +void +ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, + enum ice_sw_tunnel_type tun_type, const u8 **pkt, + u16 *pkt_len, + const struct ice_dummy_pkt_offsets **offsets); + +int +ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, + struct ice_aqc_sw_rules_elem *s_rule, + const u8 *dummy_pkt, u16 pkt_len, + const struct ice_dummy_pkt_offsets *offsets); + +int +ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, + u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid); + +struct ice_adv_fltr_mgmt_list_entry * +ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, + u16 lkups_cnt, u16 recp_id, + struct ice_adv_rule_info *rinfo); + +int +ice_adv_add_update_vsi_list(struct ice_hw *hw, + struct ice_adv_fltr_mgmt_list_entry *m_entry, + struct ice_adv_rule_info *cur_fltr, + struct ice_adv_rule_info *new_fltr); + +struct ice_vsi_list_map_info * +ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle, + u16 *vsi_list_id); + /* VSI related commands */ -enum ice_status +int ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd); -enum ice_status +int ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, bool keep_vsi_alloc, struct ice_sq_cd *cd); -enum ice_status +int ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd); struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle); void ice_clear_all_vsi_ctx(struct ice_hw *hw); -enum ice_status +int ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi, u16 count, struct ice_mir_rule_buf *mr_buf, struct ice_sq_cd *cd, u16 *rule_id); -enum ice_status +int ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh, u32 *ctl_bitmask); -enum ice_status +int ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh, u32 ctl_bitmask); /* Switch config */ -enum ice_status +int ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf, u16 buf_size, u16 *req_desc, u16 *num_elems, struct ice_sq_cd *cd); -enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw); +int ice_get_initial_sw_cfg(struct ice_hw *hw); -enum ice_status +int ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id); -enum ice_status +int ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id); -enum ice_status +int ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 *counter_id); -enum ice_status +int ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 counter_id); -enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw); -enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id); -enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id); -enum ice_status +int ice_update_sw_rule_bridge_mode(struct ice_hw *hw); +int ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id); +int ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id); +int ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id, u16 *counter_id); -enum ice_status +int ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id); -enum ice_status +int ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, struct ice_aqc_get_res_resp_elem *buf, u16 buf_size, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries, struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id, struct ice_sq_cd *cd); -enum ice_status +int ice_add_vlan(struct ice_hw *hw, struct list_head *m_list); -enum ice_status +int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list); void ice_rem_all_sw_rules_info(struct ice_hw *hw); -enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst); -enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst); +int ice_add_mac(struct ice_hw *hw, struct list_head *m_lst); +int ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst); bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle); bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle); -enum ice_status +int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list); -enum ice_status +int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list); void ice_dump_sw_rules(struct ice_hw *hw, enum ice_sw_lkup_type lookup); -enum ice_status +int ice_cfg_iwarp_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable); -enum ice_status +int ice_add_mac_vlan(struct ice_hw *hw, struct list_head *m_list); -enum ice_status +int ice_remove_mac_vlan(struct ice_hw *hw, struct list_head *v_list); -enum ice_status +int ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info, u16 sw_marker); -enum ice_status +int ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info); void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle); - /* Promisc/defport setup for VSIs */ -enum ice_status +int ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set, u8 direction); -enum ice_status +bool ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, + bool *rule_exists); +int ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid); -enum ice_status +int ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid); -enum ice_status +int ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, bool rm_vlan_promisc); /* Get VSIs Promisc/defport settings */ -enum ice_status +int ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask, u16 *vid); -enum ice_status +int ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask, u16 *vid); -enum ice_status +int ice_aq_add_recipe(struct ice_hw *hw, struct ice_aqc_recipe_data_elem *s_recipe_list, u16 num_recipes, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_get_recipe(struct ice_hw *hw, struct ice_aqc_recipe_data_elem *s_recipe_list, u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, struct ice_sq_cd *cd); -enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *recipe_id); -enum ice_status +int ice_alloc_recipe(struct ice_hw *hw, u16 *recipe_id); +int ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, struct ice_adv_rule_info *rinfo, struct ice_rule_query_data *added_entry); -enum ice_status +int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle); -enum ice_status +int ice_rem_adv_rule_by_id(struct ice_hw *hw, struct ice_rule_query_data *remove_entry); -enum ice_status +int ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, struct ice_adv_rule_info *rinfo); -enum ice_status ice_dump_sw_cfg(struct ice_hw *hw); +int ice_dump_sw_cfg(struct ice_hw *hw); -enum ice_status +int ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list); u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle); bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle); -enum ice_status +int ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi, u16 vsi_handle); void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw); void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw); -enum ice_status +int ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd); -enum ice_status +int ice_update_recipe_lkup_idx(struct ice_hw *hw, struct ice_update_recipe_lkup_idx_params *params); void ice_change_proto_id_to_dvm(void); diff --git a/drivers/thirdparty/ice/ice_tc_lib.c b/drivers/thirdparty/ice/ice_tc_lib.c index 62e6546ceeda..98a4b4932d9b 100644 --- a/drivers/thirdparty/ice/ice_tc_lib.c +++ b/drivers/thirdparty/ice/ice_tc_lib.c @@ -6,344 +6,11 @@ #include "ice_lib.h" #include "ice_fltr.h" +#ifdef HAVE_GRETAP_TYPE +#include +#endif /* HAVE_GRETAP_TYPE */ + #ifdef HAVE_TC_SETUP_CLSFLOWER -/** - * ice_detect_filter_conflict - detect filter conflict across TC - * @pf: Pointer to PF structure - * @tc_fltr: Pointer to TC flower filter structure - * - * This function detects filter mismatch type but using same port_number - * across TC and allow/deny desired filter combination. Example is, - * filter 1, dest_ip + dest_port (80) -> action is forward to TC 1 - * filter 2: dest_ip + src_port (80) -> action is forward to TC 2 - * - * We do not want to support such config, to avoid situation where - * packets are getting duplicated across both the TCs if incoming Rx - * packet has same dest_ip + src_port (80) + dst_port (80). - * Due to both filter being same high prio filter in HW, both rule - * can match (whereas that is not expectation) and cause unexpected - * packet mirroring. - */ -static int -ice_detect_filter_conflict(struct ice_pf *pf, - struct ice_tc_flower_fltr *tc_fltr) -{ - struct ice_tc_flower_lyr_2_4_hdrs *headers; - struct device *dev = ice_pf_to_dev(pf); - struct ice_tc_flower_fltr *fltr; - struct ice_tc_l4_hdr *l4_key; - u16 sport = 0, dport = 0; - - /* header = outer header for non-tunnel filter, - * otherwise inner_headers - */ - headers = &tc_fltr->outer_headers; - if (tc_fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) - headers = &tc_fltr->inner_headers; - - l4_key = &headers->l4_key; - if (tc_fltr->flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT) - sport = be16_to_cpu(l4_key->src_port); - if (tc_fltr->flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) - dport = be16_to_cpu(l4_key->dst_port); - - hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node) { - struct ice_tc_flower_lyr_2_4_hdrs *fltr_headers; - struct ice_tc_l4_hdr *fltr_l4_key; - u16 dst_port = 0, src_port = 0; - - /* if tc_class is same, skip, no check needed */ - if (fltr->action.tc_class == tc_fltr->action.tc_class) - continue; - - /* if only either of them are set, skip it */ - if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) ^ - (tc_fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID)) - continue; - - /* if this is tunnel filter, make sure tunnel ID is not same */ - if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) && - (tc_fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID)) { - if (fltr->tenant_id && tc_fltr->tenant_id && - fltr->tenant_id == tc_fltr->tenant_id) { - NL_SET_ERR_MSG_MOD(tc_fltr->extack, - "Unsupported filter combination across TC, filter exist with same tunnel key for other TC(see dmesg log)"); - dev_err(dev, "Unsupported filter combination across TC, TC %d has filter using same tunnel key (%u)\n", - fltr->action.tc_class, - be32_to_cpu(fltr->tenant_id)); - return -EOPNOTSUPP; - } - } - - fltr_headers = &fltr->outer_headers; - if (fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) - fltr_headers = &fltr->inner_headers; - - /* access L4 params */ - fltr_l4_key = &fltr_headers->l4_key; - if (fltr->flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) - dst_port = be16_to_cpu(fltr_l4_key->dst_port); - if (fltr->flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT) - src_port = be16_to_cpu(fltr_l4_key->src_port); - - /* proceed only if tc_class is different and filter types - * are different but actual value(s) of say port number are - * same, flag warning to user. - * e.g if filter one is like dest port = 80 -> tc_class(1) - * and second filter is like, src_port = 80 -> tc_class(2) - * Invariably packet can match both the filter and user - * will get expected packet mirroring to both the destination - * (means tc_class(1) and tc_class(2)). To avoid such - * behavior, block user from adding such conficting filter - */ - if (tc_fltr->flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) { - if (dport && dst_port && dport == dst_port) { - NL_SET_ERR_MSG_MOD(tc_fltr->extack, - "Unsupported filter combination across TC, filter exist with same destination port for other TC, as destination port based filter(see dmesg log)"); - dev_err(dev, "Unsupported filter combination across TC, TC %d has filter using same port number (%u) as destination port based filter. This is to avoid unexpected packet mirroring.\n", - fltr->action.tc_class, dst_port); - return -EOPNOTSUPP; - } - if (dport && src_port && dport == src_port) { - NL_SET_ERR_MSG_MOD(tc_fltr->extack, - "Unsupported filter combination across TC, filter exist with same destination port for other TC, as source port based filter(see dmesg log)"); - dev_err(dev, "Unsupported filter combination across TC, TC %d has filter using same port number (%u) as source port based filter. This is to avoid unexpected packet mirroring.\n", - fltr->action.tc_class, src_port); - return -EOPNOTSUPP; - } - } - - if (tc_fltr->flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT) { - if (sport && dst_port && sport == dst_port) { - NL_SET_ERR_MSG_MOD(tc_fltr->extack, - "Unsupported filter combination across TC, filter exist with same source port for other TC, as destination port based filter (see dmesg log)"); - dev_err(dev, "Unsupported filter combination across TC, TC %d has filter using same port number (%u) as destination port based filter. This is to avoid unexpected packet mirroring.\n", - fltr->action.tc_class, dst_port); - return -EOPNOTSUPP; - } - if (sport && src_port && sport == src_port) { - NL_SET_ERR_MSG_MOD(tc_fltr->extack, - "Unsupported filter combination across TC, filter exist with same source port for other TC, as source port based filter (see dmesg log)"); - dev_err(dev, "Unsupported filter combination across TC, TC %d has filter using same port number (%u) as source port based filter. This is to avoid unexpected packet mirroring.\n", - fltr->action.tc_class, src_port); - return -EOPNOTSUPP; - } - } - } - - return 0; -} - -/** - * ice_chnl_fltr_type_chk - filter type check - * @pf: Pointer to PF - * @tc_fltr: Pointer to TC flower filter structure - * @final_fltr_type: Ptr to filter type (dest/src/dest+src port) - * - * This function is used to determine if given filter (based on input params) - * should be allowed or not. For a given channel (aka ADQ VSI), supported - * filter types are src port, dest port , src+dest port. SO this function - * checks if any filter exist for specified channel (if so, channel specific - * filter_type will be set), and see if it matches with the filter being added. - * It returns 0 (upon success) or POSIX error code - */ -static int -ice_chnl_fltr_type_chk(struct ice_pf *pf, struct ice_tc_flower_fltr *tc_fltr, - enum ice_channel_fltr_type *final_fltr_type) -{ - enum ice_channel_fltr_type fltr_type = *final_fltr_type; - struct device *dev = ice_pf_to_dev(pf); - - if (fltr_type == ICE_CHNL_FLTR_TYPE_INVALID) { - /* L4 based filter, more granular, hence should be checked - * beore L3 - */ - if ((tc_fltr->flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) && - (tc_fltr->flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT)) - fltr_type = ICE_CHNL_FLTR_TYPE_SRC_DEST_PORT; - else if (tc_fltr->flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) - fltr_type = ICE_CHNL_FLTR_TYPE_DEST_PORT; - else if (tc_fltr->flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT) - fltr_type = ICE_CHNL_FLTR_TYPE_SRC_PORT; - /* L3 (IPv4) based filter check */ - else if ((tc_fltr->flags & ICE_TC_FLWR_FIELD_DEST_IPV4) && - (tc_fltr->flags & ICE_TC_FLWR_FIELD_SRC_IPV4)) - fltr_type = ICE_CHNL_FLTR_TYPE_SRC_DEST_IPV4; - else if (tc_fltr->flags & ICE_TC_FLWR_FIELD_DEST_IPV4) - fltr_type = ICE_CHNL_FLTR_TYPE_DEST_IPV4; - else if (tc_fltr->flags & ICE_TC_FLWR_FIELD_SRC_IPV4) - fltr_type = ICE_CHNL_FLTR_TYPE_SRC_IPV4; - /* L3 (IPv6) based filter check */ - else if ((tc_fltr->flags & ICE_TC_FLWR_FIELD_DEST_IPV6) && - (tc_fltr->flags & ICE_TC_FLWR_FIELD_SRC_IPV6)) - fltr_type = ICE_CHNL_FLTR_TYPE_SRC_DEST_IPV6; - else if (tc_fltr->flags & ICE_TC_FLWR_FIELD_DEST_IPV6) - fltr_type = ICE_CHNL_FLTR_TYPE_DEST_IPV6; - else if (tc_fltr->flags & ICE_TC_FLWR_FIELD_SRC_IPV6) - fltr_type = ICE_CHNL_FLTR_TYPE_SRC_IPV6; - /* Tunnel filter check, inner criteria is open: - * any combination of inner L3 and/or L4 - */ - else if (tc_fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) - fltr_type = ICE_CHNL_FLTR_TYPE_TENANT_ID; - else - return -EOPNOTSUPP; - } else if (fltr_type == ICE_CHNL_FLTR_TYPE_SRC_PORT) { - if ((tc_fltr->flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) && - (tc_fltr->flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT)) { - dev_dbg(dev, - "Changing filter type for action (tc_class %d) from SRC_PORT to SRC + DEST_PORT\n", - tc_fltr->action.tc_class); - fltr_type = ICE_CHNL_FLTR_TYPE_SRC_DEST_PORT; - } else if (tc_fltr->flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) { - dev_dbg(dev, - "Changing filter type for action (tc_class %d) from SRC_PORT to DEST_PORT\n", - tc_fltr->action.tc_class); - fltr_type = ICE_CHNL_FLTR_TYPE_DEST_PORT; - } - } else if (fltr_type == ICE_CHNL_FLTR_TYPE_DEST_PORT) { - if ((tc_fltr->flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) && - (tc_fltr->flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT)) { - dev_dbg(dev, - "Changing filter type for action (tc_class %d) from DEST_PORT to SRC + DEST_PORT\n", - tc_fltr->action.tc_class); - fltr_type = ICE_CHNL_FLTR_TYPE_SRC_DEST_PORT; - } else if (tc_fltr->flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT) { - dev_dbg(dev, - "Changing filter type for action (tc_class %d) from DEST_PORT to SRC_PORT\n", - tc_fltr->action.tc_class); - fltr_type = ICE_CHNL_FLTR_TYPE_SRC_PORT; - } - } else if (fltr_type == ICE_CHNL_FLTR_TYPE_SRC_DEST_PORT) { - /* must to have src/dest/src+dest port as part of filter - * criteria - */ - if ((!(tc_fltr->flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT)) && - (!(tc_fltr->flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT))) - return -EOPNOTSUPP; - - if ((tc_fltr->flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) && - (!(tc_fltr->flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT))) { - dev_dbg(dev, - "Changing filter type for action (tc_class %d) from SRC+DEST_PORT to DEST_PORT\n", - tc_fltr->action.tc_class); - fltr_type = ICE_CHNL_FLTR_TYPE_DEST_PORT; - } else if ((tc_fltr->flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT) && - (!(tc_fltr->flags & - ICE_TC_FLWR_FIELD_DEST_L4_PORT))) { - dev_dbg(dev, - "Changing filter type for action (tc_class %d) from SRC+DEST_PORT to SRC_PORT\n", - tc_fltr->action.tc_class); - fltr_type = ICE_CHNL_FLTR_TYPE_SRC_PORT; - } - } else if (fltr_type == ICE_CHNL_FLTR_TYPE_TENANT_ID) { - /* Now only allow filters which has VNI */ - if (!(tc_fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID)) - return -EOPNOTSUPP; - } else if (fltr_type == ICE_CHNL_FLTR_TYPE_SRC_DEST_IPV4) { - /* must to have src/dest/src+dest IPv4 addr as part of filter - * criteria - */ - if ((!(tc_fltr->flags & ICE_TC_FLWR_FIELD_DEST_IPV4)) && - (!(tc_fltr->flags & ICE_TC_FLWR_FIELD_SRC_IPV4))) - return -EOPNOTSUPP; - - if ((tc_fltr->flags & ICE_TC_FLWR_FIELD_DEST_IPV4) && - (!(tc_fltr->flags & ICE_TC_FLWR_FIELD_SRC_IPV4))) { - dev_dbg(dev, - "Changing filter type for action (tc_class %d) from SRC+DEST IPv4 addr to DEST IPv4 addr\n", - tc_fltr->action.tc_class); - fltr_type = ICE_CHNL_FLTR_TYPE_DEST_IPV4; - } else if ((tc_fltr->flags & ICE_TC_FLWR_FIELD_SRC_IPV4) && - (!(tc_fltr->flags & ICE_TC_FLWR_FIELD_DEST_IPV4))) { - dev_dbg(dev, - "Changing filter type for action (tc_class %d) from SRC+DEST IPv4 to SRC IPv4 addr\n", - tc_fltr->action.tc_class); - fltr_type = ICE_CHNL_FLTR_TYPE_SRC_IPV4; - } - } else if (fltr_type == ICE_CHNL_FLTR_TYPE_DEST_IPV4) { - if ((tc_fltr->flags & ICE_TC_FLWR_FIELD_DEST_IPV4) && - (tc_fltr->flags & ICE_TC_FLWR_FIELD_SRC_IPV4)) { - dev_dbg(dev, - "Changing filter type for action (tc_class %d) from DEST IPv4 addr to SRC + DEST IPv4 addr\n", - tc_fltr->action.tc_class); - fltr_type = ICE_CHNL_FLTR_TYPE_SRC_DEST_IPV4; - } else if (tc_fltr->flags & ICE_TC_FLWR_FIELD_SRC_IPV4) { - dev_dbg(dev, - "Changing filter type for action (tc_class %d) from DEST IPv4 addr to SRC IPv4 addr\n", - tc_fltr->action.tc_class); - fltr_type = ICE_CHNL_FLTR_TYPE_SRC_IPV4; - } - } else if (fltr_type == ICE_CHNL_FLTR_TYPE_SRC_IPV4) { - if ((tc_fltr->flags & ICE_TC_FLWR_FIELD_DEST_IPV4) && - (tc_fltr->flags & ICE_TC_FLWR_FIELD_SRC_IPV4)) { - dev_dbg(dev, - "Changing filter type for action (tc_class %d) from SRC IPv4 addr to SRC + DEST IPv4 addr\n", - tc_fltr->action.tc_class); - fltr_type = ICE_CHNL_FLTR_TYPE_SRC_DEST_IPV4; - } else if (tc_fltr->flags & ICE_TC_FLWR_FIELD_DEST_IPV4) { - dev_dbg(dev, - "Changing filter type for action (tc_class %d) from SRC IPv4 addr to DEST IPv4 addr\n", - tc_fltr->action.tc_class); - fltr_type = ICE_CHNL_FLTR_TYPE_DEST_IPV4; - } - } else if (fltr_type == ICE_CHNL_FLTR_TYPE_SRC_DEST_IPV6) { - /* must to have src/dest/src+dest IPv6 addr as part of filter - * criteria - */ - if ((!(tc_fltr->flags & ICE_TC_FLWR_FIELD_DEST_IPV6)) && - (!(tc_fltr->flags & ICE_TC_FLWR_FIELD_SRC_IPV6))) - return -EOPNOTSUPP; - - if ((tc_fltr->flags & ICE_TC_FLWR_FIELD_DEST_IPV6) && - (!(tc_fltr->flags & ICE_TC_FLWR_FIELD_SRC_IPV6))) { - dev_dbg(dev, - "Changing filter type for action (tc_class %d) from SRC+DEST IPv6 addr to DEST IPv6 addr\n", - tc_fltr->action.tc_class); - fltr_type = ICE_CHNL_FLTR_TYPE_DEST_IPV6; - } else if ((tc_fltr->flags & ICE_TC_FLWR_FIELD_SRC_IPV6) && - (!(tc_fltr->flags & ICE_TC_FLWR_FIELD_DEST_IPV6))) { - dev_dbg(dev, - "Changing filter type for action (tc_class %d) from SRC+DEST IPv6 to SRC IPv6 addr\n", - tc_fltr->action.tc_class); - fltr_type = ICE_CHNL_FLTR_TYPE_SRC_IPV6; - } - } else if (fltr_type == ICE_CHNL_FLTR_TYPE_DEST_IPV6) { - if ((tc_fltr->flags & ICE_TC_FLWR_FIELD_DEST_IPV6) && - (tc_fltr->flags & ICE_TC_FLWR_FIELD_SRC_IPV6)) { - dev_dbg(dev, - "Changing filter type for action (tc_class %d) from DEST IPv6 addr to SRC + DEST IPv6 addr\n", - tc_fltr->action.tc_class); - fltr_type = ICE_CHNL_FLTR_TYPE_SRC_DEST_IPV6; - } else if (tc_fltr->flags & ICE_TC_FLWR_FIELD_SRC_IPV6) { - dev_dbg(dev, - "Changing filter type for action (tc_class %d) from DEST IPv6 addr to SRC IPv6 addr\n", - tc_fltr->action.tc_class); - fltr_type = ICE_CHNL_FLTR_TYPE_SRC_IPV6; - } - } else if (fltr_type == ICE_CHNL_FLTR_TYPE_SRC_IPV6) { - if ((tc_fltr->flags & ICE_TC_FLWR_FIELD_DEST_IPV6) && - (tc_fltr->flags & ICE_TC_FLWR_FIELD_SRC_IPV6)) { - dev_dbg(dev, - "Changing filter type for action (tc_class %d) from SRC IPv6 addr to SRC + DEST IPv6 addr\n", - tc_fltr->action.tc_class); - fltr_type = ICE_CHNL_FLTR_TYPE_SRC_DEST_IPV6; - } else if (tc_fltr->flags & ICE_TC_FLWR_FIELD_DEST_IPV6) { - dev_dbg(dev, - "Changing filter type for action (tc_class %d) from SRC IPv6 addr to DEST IPv6 addr\n", - tc_fltr->action.tc_class); - fltr_type = ICE_CHNL_FLTR_TYPE_DEST_IPV6; - } - } else { - return -EINVAL; /* unsupported filter type */ - } - - /* return the selected fltr_type */ - *final_fltr_type = fltr_type; - - return 0; -} - /** * ice_determine_gtp_tun_type - determine TUN type based on user params * @pf: Pointer to PF @@ -454,9 +121,30 @@ ice_determine_gtp_tun_type(struct ice_pf *pf, u16 l4_proto, u32 flags, return true; } +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +/** + * ice_is_tunnel_fltr - is this a tunnel filter + * @f: Pointer to tc-flower filter + * + * This function should be called only after tunnel_type + * of the filter is set by calling ice_tc_tun_parse() + */ +static bool ice_is_tunnel_fltr(struct ice_tc_flower_fltr *f) +{ + return (f->tunnel_type == TNL_VXLAN || + f->tunnel_type == TNL_GENEVE || + f->tunnel_type == TNL_GRETAP || +#ifdef HAVE_GTP_SUPPORT + f->tunnel_type == TNL_GTPU || + f->tunnel_type == TNL_GTPC || +#endif /* HAVE_GTP_SUPPORT */ + f->tunnel_type == TNL_GTP); +} +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ + /** * ice_tc_count_lkups - determine lookup count for switch filter - * @flags: tc-flower flags + * @flags: TC-flower flags * @headers: Pointer to TC flower filter header structure * @fltr: Pointer to outer TC filter structure * @@ -468,23 +156,35 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, { int lkups_cnt = 0; - if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) + if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) lkups_cnt++; - /* is Tunnel ID specified */ - if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) { - /* For ADQ filter, outer DMAC gets added implictly */ - if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) - lkups_cnt++; - /* Copy outer L4 port for non-GTP tunnel */ - if (fltr->tunnel_type != TNL_GTP) { - if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) - if (headers->l3_key.ip_proto == IPPROTO_UDP) - lkups_cnt++; - } - /* due to tunnel */ + /* For ADQ filter, outer DMAC gets added implicitly */ + if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) + lkups_cnt++; + +#ifdef HAVE_GTP_SUPPORT + if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS) + lkups_cnt++; + +#endif /* HAVE_GTP_SUPPORT */ + if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | + ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 | + ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | + ICE_TC_FLWR_FIELD_ENC_DEST_IPV6)) + lkups_cnt++; + +#ifdef HAVE_FLOW_DISSECTOR_KEY_ENC_IP + if (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS | + ICE_TC_FLWR_FIELD_ENC_IP_TTL)) + lkups_cnt++; + +#endif /* HAVE_FLOW_DISSECTOR_KEY_ENC_IP */ + if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) + lkups_cnt++; + + if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) lkups_cnt++; - } /* is MAC fields specified? */ if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | ICE_TC_FLWR_FIELD_SRC_MAC)) @@ -494,14 +194,28 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, if (flags & ICE_TC_FLWR_FIELD_VLAN) lkups_cnt++; - /* is IPv[4|6] fields specified? */ - if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4)) +#ifdef HAVE_FLOW_DISSECTOR_KEY_CVLAN + /* is CVLAN specified? */ + if (flags & ICE_TC_FLWR_FIELD_CVLAN) lkups_cnt++; - else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 | - ICE_TC_FLWR_FIELD_SRC_IPV6)) +#endif /* HAVE_FLOW_DISSECTOR_KEY_CVLAN */ + + /* are PPPoE options specified? */ + if (flags & (ICE_TC_FLWR_FIELD_PPPOE_SESSID | + ICE_TC_FLWR_FIELD_PPP_PROTO)) lkups_cnt++; - /* is L4 (TCP/UDP/any other L4 protocol fields specified? */ + /* are IPv[4|6] fields specified? */ + if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4 | + ICE_TC_FLWR_FIELD_DEST_IPV6 | ICE_TC_FLWR_FIELD_SRC_IPV6)) + lkups_cnt++; + +#ifdef HAVE_FLOW_DISSECTOR_KEY_IP + if (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL)) + lkups_cnt++; +#endif /* HAVE_FLOW_DISSECTOR_KEY_IP */ + + /* is L4 (TCP/UDP/any other L4 protocol fields) specified? */ if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT | ICE_TC_FLWR_FIELD_SRC_L4_PORT)) lkups_cnt++; @@ -509,17 +223,257 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, return lkups_cnt; } +static enum ice_protocol_type ice_proto_type_from_mac(bool inner) +{ + return inner ? ICE_MAC_IL : ICE_MAC_OFOS; +} + +static enum ice_protocol_type ice_proto_type_from_etype(bool inner) +{ + return inner ? ICE_ETYPE_IL : ICE_ETYPE_OL; +} + +static enum ice_protocol_type ice_proto_type_from_ipv4(bool inner) +{ + return inner ? ICE_IPV4_IL : ICE_IPV4_OFOS; +} + +static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner) +{ + return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS; +} + +static enum ice_protocol_type ice_proto_type_from_l4_port(u16 ip_proto) +{ + switch (ip_proto) { + case IPPROTO_TCP: + return ICE_TCP_IL; + case IPPROTO_UDP: + return ICE_UDP_ILOS; + } + + return 0; +} + +static enum ice_protocol_type +ice_proto_type_from_tunnel(enum ice_tunnel_type type) +{ + switch (type) { + case TNL_VXLAN: + return ICE_VXLAN; + case TNL_GENEVE: + return ICE_GENEVE; + case TNL_GRETAP: + return ICE_NVGRE; +#ifdef HAVE_GTP_SUPPORT + case TNL_GTPU: + /* NO_PAY profiles will not work with GTP-U */ + return ICE_GTP; + case TNL_GTPC: + return ICE_GTP_NO_PAY; +#endif /* HAVE_GTP_SUPPORT */ + default: + return 0; + } +} + +static enum ice_sw_tunnel_type +ice_sw_type_from_tunnel(enum ice_tunnel_type type) +{ + switch (type) { + case TNL_VXLAN: + return ICE_SW_TUN_VXLAN; + case TNL_GENEVE: + return ICE_SW_TUN_GENEVE; + case TNL_GRETAP: + return ICE_SW_TUN_NVGRE; +#ifdef HAVE_GTP_SUPPORT + case TNL_GTPU: + return ICE_SW_TUN_GTPU; + case TNL_GTPC: + return ICE_SW_TUN_GTPC; +#endif /* HAVE_GTP_SUPPORT */ + default: + return ICE_NON_TUN; + } +} + +static int +ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, + struct ice_adv_lkup_elem *list) +{ + struct ice_tc_flower_lyr_2_4_hdrs *hdr = &fltr->outer_headers; + int i = 0; + + if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) { + u32 tenant_id; + + list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type); + switch (fltr->tunnel_type) { + case TNL_VXLAN: + case TNL_GENEVE: + tenant_id = be32_to_cpu(fltr->tenant_id) << 8; + list[i].h_u.tnl_hdr.vni = cpu_to_be32(tenant_id); + memcpy(&list[i].m_u.tnl_hdr.vni, "\xff\xff\xff\x00", 4); + i++; + break; + case TNL_GRETAP: + list[i].h_u.nvgre_hdr.tni_flow = fltr->tenant_id; + memcpy(&list[i].m_u.nvgre_hdr.tni_flow, + "\xff\xff\xff\xff", 4); + i++; + break; +#ifdef HAVE_GTP_SUPPORT + case TNL_GTPC: + case TNL_GTPU: + list[i].h_u.gtp_hdr.teid = fltr->tenant_id; + memcpy(&list[i].m_u.gtp_hdr.teid, + "\xff\xff\xff\xff", 4); + i++; + break; +#endif /* HAVE_GTP_SUPPORT */ + default: + break; + } + } + + if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) { + list[i].type = ice_proto_type_from_mac(false); + ether_addr_copy(list[i].h_u.eth_hdr.dst_addr, + hdr->l2_key.dst_mac); + ether_addr_copy(list[i].m_u.eth_hdr.dst_addr, + hdr->l2_mask.dst_mac); + i++; + } + +#ifdef HAVE_GTP_SUPPORT + if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS && + (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) { + list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type); + + if (fltr->gtp_pdu_info_masks.pdu_type) { + list[i].h_u.gtp_hdr.pdu_type = + fltr->gtp_pdu_info_keys.pdu_type << 4; + memcpy(&list[i].m_u.gtp_hdr.pdu_type, "\xf0", 1); + } + + if (fltr->gtp_pdu_info_masks.qfi) { + list[i].h_u.gtp_hdr.qfi = fltr->gtp_pdu_info_keys.qfi; + memcpy(&list[i].m_u.gtp_hdr.qfi, "\x3f", 1); + } + + i++; + } +#endif /* HAVE_GTP_SUPPORT */ + + if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | + ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) { + list[i].type = ice_proto_type_from_ipv4(false); + + if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV4) { + list[i].h_u.ipv4_hdr.src_addr = hdr->l3_key.src_ipv4; + list[i].m_u.ipv4_hdr.src_addr = hdr->l3_mask.src_ipv4; + } + if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV4) { + list[i].h_u.ipv4_hdr.dst_addr = hdr->l3_key.dst_ipv4; + list[i].m_u.ipv4_hdr.dst_addr = hdr->l3_mask.dst_ipv4; + } + i++; + } + + if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | + ICE_TC_FLWR_FIELD_ENC_DEST_IPV6)) { + list[i].type = ice_proto_type_from_ipv6(false); + + if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV6) { + memcpy(&list[i].h_u.ipv6_hdr.src_addr, + &hdr->l3_key.src_ipv6_addr, + sizeof(hdr->l3_key.src_ipv6_addr)); + memcpy(&list[i].m_u.ipv6_hdr.src_addr, + &hdr->l3_mask.src_ipv6_addr, + sizeof(hdr->l3_mask.src_ipv6_addr)); + } + if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV6) { + memcpy(&list[i].h_u.ipv6_hdr.dst_addr, + &hdr->l3_key.dst_ipv6_addr, + sizeof(hdr->l3_key.dst_ipv6_addr)); + memcpy(&list[i].m_u.ipv6_hdr.dst_addr, + &hdr->l3_mask.dst_ipv6_addr, + sizeof(hdr->l3_mask.dst_ipv6_addr)); + } + i++; + } + +#ifdef HAVE_FLOW_DISSECTOR_KEY_ENC_IP + if (fltr->inner_headers.l2_key.n_proto == htons(ETH_P_IP) && + (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS | + ICE_TC_FLWR_FIELD_ENC_IP_TTL))) { + list[i].type = ice_proto_type_from_ipv4(false); + + if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TOS) { + list[i].h_u.ipv4_hdr.tos = hdr->l3_key.tos; + list[i].m_u.ipv4_hdr.tos = hdr->l3_mask.tos; + } + + if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TTL) { + list[i].h_u.ipv4_hdr.time_to_live = hdr->l3_key.ttl; + list[i].m_u.ipv4_hdr.time_to_live = hdr->l3_mask.ttl; + } + + i++; + } + + if (fltr->inner_headers.l2_key.n_proto == htons(ETH_P_IPV6) && + (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS | + ICE_TC_FLWR_FIELD_ENC_IP_TTL))) { + struct ice_ipv6_hdr *hdr_h, *hdr_m; + + hdr_h = &list[i].h_u.ipv6_hdr; + hdr_m = &list[i].m_u.ipv6_hdr; + list[i].type = ice_proto_type_from_ipv6(false); + + if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TOS) { + hdr_h->be_ver_tc_flow = + htonl((hdr->l3_key.tos << + ICE_IPV6_HDR_TC_OFFSET) & + ICE_IPV6_HDR_TC_MASK); + hdr_m->be_ver_tc_flow = + htonl((hdr->l3_mask.tos << + ICE_IPV6_HDR_TC_OFFSET) & + ICE_IPV6_HDR_TC_MASK); + } + + if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TTL) { + hdr_h->hop_limit = hdr->l3_key.ttl; + hdr_m->hop_limit = hdr->l3_mask.ttl; + } + + i++; + } + +#endif /* HAVE_FLOW_DISSECTOR_KEY_ENC_IP */ + if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) && + hdr->l3_key.ip_proto == IPPROTO_UDP) { + list[i].type = ICE_UDP_OF; + list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port; + list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port; + i++; + } + + return i; +} + /** - * ice_tc_fill_rules - fill filter rules based on tc fltr - * @hw: pointer to hw structure - * @flags: tc flower field flags - * @tc_fltr: pointer to tc flower filter + * ice_tc_fill_rules - fill filter rules based on TC fltr + * @hw: pointer to HW structure + * @flags: TC flower field flags + * @tc_fltr: pointer to TC flower filter * @list: list of advance rule elements * @rule_info: pointer to information about rule * @l4_proto: pointer to information such as L4 proto type * - * Fill ice_adv_lkup_elem list based on tc flower flags and - * tc flower headers. This list should be used to add + * Fill ice_adv_lkup_elem list based on TC flower flags and + * TC flower headers. This list should be used to add * advance filter in hardware. */ static int @@ -530,135 +484,96 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, u16 *l4_proto) { struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers; + bool inner = false; int i = 0; + rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type); + if (tc_fltr->tunnel_type != TNL_LAST) { + i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list); + + headers = &tc_fltr->inner_headers; + inner = true; + } + if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) { - list[i].type = ICE_ETYPE_OL; + list[i].type = ice_proto_type_from_etype(inner); list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto; list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto; i++; } - /* copy L2 (MAC) fields, Outer UDP (in case of tunnel) port info */ - if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) { - u32 tenant_id; + if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | + ICE_TC_FLWR_FIELD_SRC_MAC)) { + struct ice_tc_l2_hdr *l2_key, *l2_mask; - /* copy L2 (MAC) fields if specified, For tunnel outer DMAC - * is needed and supported and is part of outer_headers.dst_mac - * For VxLAN tunnel, supported ADQ filter config is: - * - Outer dest MAC + VNI + Inner IPv4 + Inner L4 ports - */ - if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) { - list[i].type = ICE_MAC_OFOS; + l2_key = &headers->l2_key; + l2_mask = &headers->l2_mask; + + list[i].type = ice_proto_type_from_mac(inner); + if (flags & ICE_TC_FLWR_FIELD_DST_MAC) { ether_addr_copy(list[i].h_u.eth_hdr.dst_addr, - headers->l2_key.dst_mac); + l2_key->dst_mac); ether_addr_copy(list[i].m_u.eth_hdr.dst_addr, - headers->l2_mask.dst_mac); - i++; + l2_mask->dst_mac); } - /* copy outer UDP (enc_dst_port) only for non-GTP tunnel */ - if (tc_fltr->tunnel_type != TNL_GTP) { - if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) && - headers->l3_key.ip_proto == IPPROTO_UDP) { - list[i].type = ICE_UDP_OF; - list[i].h_u.l4_hdr.dst_port = - headers->l4_key.dst_port; - list[i].m_u.l4_hdr.dst_port = - headers->l4_mask.dst_port; - i++; - } + if (flags & ICE_TC_FLWR_FIELD_SRC_MAC) { + ether_addr_copy(list[i].h_u.eth_hdr.src_addr, + l2_key->src_mac); + ether_addr_copy(list[i].m_u.eth_hdr.src_addr, + l2_mask->src_mac); } - - /* setup encap info in list elements such as VNI/encap key-id, - * mask, type of tunnel - */ - if (tc_fltr->tunnel_type == TNL_VXLAN) - list[i].type = ICE_VXLAN; - else if (tc_fltr->tunnel_type == TNL_GENEVE) - list[i].type = ICE_GENEVE; - else if (tc_fltr->tunnel_type == TNL_GTP) - list[i].type = ICE_GTP; - - if (tc_fltr->tunnel_type == TNL_VXLAN || - tc_fltr->tunnel_type == TNL_GENEVE) { - tenant_id = be32_to_cpu(tc_fltr->tenant_id) << 8; - list[i].h_u.tnl_hdr.vni = cpu_to_be32(tenant_id); - if (tenant_id) - /* 24 bit tunnel key: mask "\xff\xff\xff\x00" */ - memcpy(&list[i].m_u.tnl_hdr.vni, - "\xff\xff\xff\x00", 4); - else - memcpy(&list[i].m_u.tnl_hdr.vni, - "\x00\x00\x00\x00", 4); - } else if (tc_fltr->tunnel_type == TNL_GTP) { - tenant_id = be32_to_cpu(tc_fltr->tenant_id); - list[i].h_u.gtp_hdr.teid = cpu_to_be32(tenant_id); - if (tenant_id) - /* 32 bit tunnel key: mask "\xff\xff\xff\xff" */ - memcpy(&list[i].m_u.gtp_hdr.teid, - "\xff\xff\xff\xff", 4); - else - memcpy(&list[i].m_u.gtp_hdr.teid, - "\x00\x00\x00x00", 4); - } - /* advance list index */ i++; - - /* now access values from inner_headers such as inner MAC (if - * supported), inner IPv4[6], Inner L4 ports, hence update - * "headers" to point to inner_headers - */ - headers = &tc_fltr->inner_headers; - } else { - rule_info->tun_type = ICE_NON_TUN; - /* copy L2 (MAC) fields, for non-tunnel case */ - if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | - ICE_TC_FLWR_FIELD_SRC_MAC)) { - struct ice_tc_l2_hdr *l2_key, *l2_mask; - - l2_key = &headers->l2_key; - l2_mask = &headers->l2_mask; - - list[i].type = ICE_MAC_OFOS; - if (flags & ICE_TC_FLWR_FIELD_DST_MAC) { - ether_addr_copy(list[i].h_u.eth_hdr.dst_addr, - l2_key->dst_mac); - ether_addr_copy(list[i].m_u.eth_hdr.dst_addr, - l2_mask->dst_mac); - } - if (flags & ICE_TC_FLWR_FIELD_SRC_MAC) { - ether_addr_copy(list[i].h_u.eth_hdr.src_addr, - l2_key->src_mac); - ether_addr_copy(list[i].m_u.eth_hdr.src_addr, - l2_mask->src_mac); - } - i++; - } } /* copy VLAN info */ if (flags & ICE_TC_FLWR_FIELD_VLAN) { list[i].type = ICE_VLAN_OFOS; +#ifdef HAVE_FLOW_DISSECTOR_KEY_CVLAN + if (flags & ICE_TC_FLWR_FIELD_CVLAN) + list[i].type = ICE_VLAN_EX; +#endif /* HAVE_FLOW_DISSECTOR_KEY_CVLAN */ list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id; list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF); i++; } +#ifdef HAVE_FLOW_DISSECTOR_KEY_CVLAN + if (flags & ICE_TC_FLWR_FIELD_CVLAN) { + list[i].type = ICE_VLAN_IN; + list[i].h_u.vlan_hdr.vlan = headers->cvlan_hdr.vlan_id; + list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF); + i++; + } +#endif /* HAVE_FLOW_DISSECTOR_KEY_CVLAN */ + + if (flags & (ICE_TC_FLWR_FIELD_PPPOE_SESSID | + ICE_TC_FLWR_FIELD_PPP_PROTO)) { + struct ice_pppoe_hdr *vals, *masks; + + vals = &list[i].h_u.pppoe_hdr; + masks = &list[i].m_u.pppoe_hdr; + + list[i].type = ICE_PPPOE; + + if (flags & ICE_TC_FLWR_FIELD_PPPOE_SESSID) { + vals->session_id = headers->pppoe_hdr.session_id; + masks->session_id = cpu_to_be16(0xFFFF); + } + + if (flags & ICE_TC_FLWR_FIELD_PPP_PROTO) { + vals->ppp_prot_id = headers->pppoe_hdr.ppp_proto; + masks->ppp_prot_id = cpu_to_be16(0xFFFF); + } + + i++; + } /* copy L3 (IPv[4|6]: src, dest) address */ if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4)) { struct ice_tc_l3_hdr *l3_key, *l3_mask; - /* For encap, Outer L3 and L4 based are not supported, - * hence if user specified L3, L4 fields, they are treated - * as inner L3 and L4 respectivelt - */ - if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) - list[i].type = ICE_IPV4_IL; - else - list[i].type = ICE_IPV4_OFOS; - + list[i].type = ice_proto_type_from_ipv4(inner); l3_key = &headers->l3_key; l3_mask = &headers->l3_mask; if (flags & ICE_TC_FLWR_FIELD_DEST_IPV4) { @@ -675,10 +590,7 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, struct ice_ipv6_hdr *ipv6_hdr, *ipv6_mask; struct ice_tc_l3_hdr *l3_key, *l3_mask; - if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) - list[i].type = ICE_IPV6_IL; - else - list[i].type = ICE_IPV6_OFOS; + list[i].type = ice_proto_type_from_ipv6(inner); ipv6_hdr = &list[i].h_u.ipv6_hdr; ipv6_mask = &list[i].m_u.ipv6_hdr; l3_key = &headers->l3_key; @@ -699,32 +611,64 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, i++; } +#ifdef HAVE_FLOW_DISSECTOR_KEY_IP + if (headers->l2_key.n_proto == htons(ETH_P_IP) && + (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))) { + list[i].type = ice_proto_type_from_ipv4(inner); + + if (flags & ICE_TC_FLWR_FIELD_IP_TOS) { + list[i].h_u.ipv4_hdr.tos = headers->l3_key.tos; + list[i].m_u.ipv4_hdr.tos = headers->l3_mask.tos; + } + + if (flags & ICE_TC_FLWR_FIELD_IP_TTL) { + list[i].h_u.ipv4_hdr.time_to_live = + headers->l3_key.ttl; + list[i].m_u.ipv4_hdr.time_to_live = + headers->l3_mask.ttl; + } + + i++; + } + + if (headers->l2_key.n_proto == htons(ETH_P_IPV6) && + (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))) { + struct ice_ipv6_hdr *hdr_h, *hdr_m; + + hdr_h = &list[i].h_u.ipv6_hdr; + hdr_m = &list[i].m_u.ipv6_hdr; + list[i].type = ice_proto_type_from_ipv6(inner); + + if (flags & ICE_TC_FLWR_FIELD_IP_TOS) { + hdr_h->be_ver_tc_flow = + htonl((headers->l3_key.tos << + ICE_IPV6_HDR_TC_OFFSET) & + ICE_IPV6_HDR_TC_MASK); + hdr_m->be_ver_tc_flow = + htonl((headers->l3_mask.tos << + ICE_IPV6_HDR_TC_OFFSET) & + ICE_IPV6_HDR_TC_MASK); + } + + if (flags & ICE_TC_FLWR_FIELD_IP_TTL) { + hdr_h->hop_limit = headers->l3_key.ttl; + hdr_m->hop_limit = headers->l3_mask.ttl; + } + + i++; + } + +#endif /* HAVE_FLOW_DISSECTOR_KEY_IP */ /* copy L4 (src, dest) port */ if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT | ICE_TC_FLWR_FIELD_SRC_L4_PORT)) { struct ice_tc_l4_hdr *l4_key, *l4_mask; - u16 dst_port; + list[i].type = + ice_proto_type_from_l4_port(headers->l3_key.ip_proto); l4_key = &headers->l4_key; l4_mask = &headers->l4_mask; - dst_port = be16_to_cpu(l4_key->dst_port); - if (headers->l3_key.ip_proto == IPPROTO_TCP) { - list[i].type = ICE_TCP_IL; - /* detected L4 proto is TCP */ - if (l4_proto) - *l4_proto = IPPROTO_TCP; - } else if (headers->l3_key.ip_proto == IPPROTO_UDP) { - /* Check if UDP dst port is known as a tunnel port */ - if (ice_tunnel_port_in_use(hw, dst_port, NULL)) { - list[i].type = ICE_UDP_OF; - rule_info->tun_type = ICE_SW_TUN_VXLAN; - } else { - list[i].type = ICE_UDP_ILOS; - } - /* detected L4 proto is UDP */ - if (l4_proto) - *l4_proto = IPPROTO_UDP; - } + if (flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) { list[i].h_u.l4_hdr.dst_port = l4_key->dst_port; list[i].m_u.l4_hdr.dst_port = l4_mask->dst_port; @@ -739,11 +683,164 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, return i; } -#ifdef HAVE_TC_FLOW_RULE_INFRASTRUCTURE -static int ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr, - struct flow_action_entry *act) +#if defined(HAVE_TCF_MIRRED_DEV) || defined(HAVE_TC_FLOW_RULE_INFRASTRUCTURE) +/** + * ice_is_tnl_gtp - detect if tunnel type is GTP or not + * @tunnel_dev: ptr to tunnel device + * @rule: ptr to flow_rule + * + * If curr_tnl_type is TNL_LAST and "flow_rule" is non-NULL, then + * check if enc_dst_port is well known GTP port (2152) + * if so - return true (indicating that tunnel type is GTP), otherwise false. + */ +static bool +ice_is_tnl_gtp(struct net_device *tunnel_dev, struct flow_rule *rule) { - struct ice_repr *repr; + /* if flow_rule is non-NULL, proceed with detecting possibility + * of GTP tunnel. Unlike VXLAN and GENEVE, there is no such API + * like netif_is_gtp since GTP is not natively supported in kernel + */ + if (rule && (!is_vlan_dev(tunnel_dev))) { + struct flow_match_ports match; + u16 enc_dst_port; + + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) + return false; + + /* get ENC_PORTS info */ + flow_rule_match_enc_ports(rule, &match); + enc_dst_port = be16_to_cpu(match.key->dst); + + /* Outer UDP port is GTP well known port, + * if 'enc_dst_port' matched with GTP well known port, + * return true from this function. + */ + if (enc_dst_port != ICE_GTPU_PORT) + return false; + + /* all checks passed including outer UDP port to be qualified + * for GTP tunnel + */ + return true; + } + return false; +} + +#ifdef HAVE_FLOW_DISSECTOR_KEY_PPPOE +/** + * ice_tc_set_pppoe - Parse PPPoE fields from TC flower filter + * @match: Pointer to flow match structure + * @fltr: Pointer to filter structure + * @headers: Pointer to outer header fields + * @returns PPP protocol used in filter (ppp_ses or ppp_disc) + */ +static u16 +ice_tc_set_pppoe(struct flow_match_pppoe *match, + struct ice_tc_flower_fltr *fltr, + struct ice_tc_flower_lyr_2_4_hdrs *headers) +{ + if (match->mask->session_id) { + fltr->flags |= ICE_TC_FLWR_FIELD_PPPOE_SESSID; + headers->pppoe_hdr.session_id = match->key->session_id; + } + + if (match->mask->ppp_proto) { + fltr->flags |= ICE_TC_FLWR_FIELD_PPP_PROTO; + headers->pppoe_hdr.ppp_proto = match->key->ppp_proto; + } + + return be16_to_cpu(match->key->type); +} +#endif /* HAVE_FLOW_DISSECTOR_KEY_PPPOE */ + +/** + * ice_tc_tun_get_type - get the tunnel type + * @tunnel_dev: ptr to tunnel device + * @rule: ptr to flow_rule + * + * This function detects appropriate tunnel_type if specified device is + * tunnel device such as vxlan/geneve othertwise it tries to detect + * tunnel type based on outer GTP port (2152) + */ +int +ice_tc_tun_get_type(struct net_device *tunnel_dev, struct flow_rule *rule) +{ +#ifdef HAVE_VXLAN_TYPE +#if IS_ENABLED(CONFIG_VXLAN) + if (netif_is_vxlan(tunnel_dev)) + return TNL_VXLAN; +#endif +#endif /* HAVE_VXLAN_TYPE */ +#ifdef HAVE_GENEVE_TYPE +#if IS_ENABLED(CONFIG_GENEVE) + if (netif_is_geneve(tunnel_dev)) + return TNL_GENEVE; +#endif +#endif /* HAVE_GENEVE_TYPE */ +#ifdef HAVE_GRETAP_TYPE + if (netif_is_gretap(tunnel_dev) || + netif_is_ip6gretap(tunnel_dev)) + return TNL_GRETAP; +#endif /* HAVE_GRETAP_TYPE */ + +#ifdef HAVE_GTP_SUPPORT + /* Assume GTP-U by default in case of GTP netdev. + * GTP-C may be selected later, based on enc_dst_port. + */ + if (netif_is_gtp(tunnel_dev)) + return TNL_GTPU; +#endif /* HAVE_GTP_SUPPORT */ + + /* detect possibility of GTP tunnel type based on input */ + if (ice_is_tnl_gtp(tunnel_dev, rule)) + return TNL_GTP; + + return TNL_LAST; +} + +static bool +ice_is_tunnel_supported(struct net_device *dev, struct flow_rule *rule) +{ + return ice_tc_tun_get_type(dev, rule) != TNL_LAST; +} +#endif /* HAVE_TCF_MIRRED_DEC || HAVE_TC_FLOW_RULE_INFRASTRUCTURE */ + +#if defined(HAVE_TCF_MIRRED_DEV) || defined(HAVE_TC_FLOW_RULE_INFRASTRUCTURE) +static int +ice_tc_setup_redirect_action(struct ice_tc_flower_fltr *fltr, + struct net_device *target_dev) +{ + fltr->action.fltr_act = ICE_FWD_TO_VSI; + + if (ice_is_port_repr_netdev(target_dev)) { + struct ice_repr *repr = ice_netdev_to_repr(target_dev); + + fltr->dest_vsi = repr->src_vsi; + fltr->direction = ICE_ESWITCH_FLTR_INGRESS; + } else if (netif_is_ice(target_dev) || + ice_is_tunnel_supported(target_dev, NULL)) { + fltr->direction = ICE_ESWITCH_FLTR_EGRESS; + } else { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode"); + return -EINVAL; + } + + return 0; +} +#endif + +#ifdef HAVE_TC_FLOW_RULE_INFRASTRUCTURE +static int +ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr, + struct flow_action_entry *act) +#else +static int +ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr, + struct tc_action *tc_act) +#endif +{ +#ifdef HAVE_TC_FLOW_RULE_INFRASTRUCTURE + int err; switch (act->id) { case FLOW_ACTION_DROP: @@ -751,35 +848,35 @@ static int ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr, break; case FLOW_ACTION_REDIRECT: - fltr->action.fltr_act = ICE_FWD_TO_VSI; - - if (ice_is_port_repr_netdev(act->dev)) { - repr = ice_netdev_to_repr(act->dev); - - fltr->dest_vsi = repr->src_vsi; - fltr->direction = ICE_ESWITCH_FLTR_INGRESS; - } else if (netif_is_ice(act->dev)) { - struct ice_netdev_priv *np = netdev_priv(act->dev); - - fltr->dest_vsi = np->vsi; - fltr->direction = ICE_ESWITCH_FLTR_EGRESS; - } else { - NL_SET_ERR_MSG_MOD(fltr->extack, - "Unsupported netdevice in switchdev mode"); - return -EINVAL; - } + err = ice_tc_setup_redirect_action(fltr, act->dev); + if (err) + return err; break; default: - NL_SET_ERR_MSG_MOD(fltr->extack, - "Unsupported action in switchdev mode"); + NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action in switchdev mode"); return -EINVAL; } return 0; +#elif defined(HAVE_TCF_MIRRED_DEV) + int err; + + if (is_tcf_gact_shot(tc_act)) { + fltr->action.fltr_act = ICE_DROP_PACKET; + } else if (is_tcf_mirred_egress_redirect(tc_act)) { + err = ice_tc_setup_redirect_action(fltr, + tcf_mirred_dev(tc_act)); + if (err) + return err; + } + + return 0; +#else + return -EINVAL; +#endif } -#endif /* HAVE_TC_FLOW_RULE_INFRASTRUCTURE */ static int ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) @@ -787,21 +884,14 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; struct ice_adv_rule_info rule_info = { 0 }; struct ice_rule_query_data rule_added; - struct ice_adv_lkup_elem *list; struct ice_hw *hw = &vsi->back->hw; + struct ice_adv_lkup_elem *list; u32 flags = fltr->flags; - enum ice_status status; int lkups_cnt; - int ret = 0; - int i; + int i, ret; - if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 | - ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | - ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 | - ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | - ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) { - NL_SET_ERR_MSG_MOD(fltr->extack, - "Unsupported encap field(s)"); + if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)"); return -EOPNOTSUPP; } @@ -816,8 +906,32 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) goto exit; } + if (fltr->tunnel_type == TNL_VXLAN) + rule_info.tun_type = ICE_SW_TUN_VXLAN; + else if (fltr->tunnel_type == TNL_GENEVE) + rule_info.tun_type = ICE_SW_TUN_GENEVE; + else if (fltr->tunnel_type == TNL_GRETAP) + rule_info.tun_type = ICE_SW_TUN_NVGRE; +#ifdef HAVE_GTP_SUPPORT + else if (fltr->tunnel_type == TNL_GTPU) + rule_info.tun_type = ICE_SW_TUN_GTPU; + else if (fltr->tunnel_type == TNL_GTPC) + rule_info.tun_type = ICE_SW_TUN_GTPC; +#endif /* HAVE_GTP_SUPPORT */ + + /* egress traffic is always redirect to uplink */ + if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS) + fltr->dest_vsi = vsi->back->switchdev.uplink_vsi; + rule_info.sw_act.fltr_act = fltr->action.fltr_act; - rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx; + if (fltr->action.fltr_act != ICE_DROP_PACKET) + rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx; + /* For now, making priority to be highest, and it also becomes + * the priority for recipe which will get created as a result of + * new extraction sequence based on input set. + * Priority '7' is max val for switch recipe, higher the number + * results into order of switch rule evaluation. + */ rule_info.priority = 7; if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) { @@ -828,21 +942,22 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) rule_info.sw_act.flag |= ICE_FLTR_TX; rule_info.sw_act.src = vsi->idx; rule_info.rx = false; + rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE; + rule_info.flags_info.act_valid = true; } + rule_info.add_dir_lkup = false; + /* specify the cookie as filter_rule_id */ rule_info.fltr_rule_id = fltr->cookie; - status = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); - if (status == ICE_ERR_ALREADY_EXISTS) { - NL_SET_ERR_MSG_MOD(fltr->extack, - "Unable to add filter because it already exist"); + ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); + if (ret == -EEXIST) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist"); ret = -EINVAL; goto exit; - } else if (status) { - NL_SET_ERR_MSG_MOD(fltr->extack, - "Unable to add filter due to error"); - ret = -EIO; + } else if (ret) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error"); goto exit; } @@ -851,18 +966,216 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) */ fltr->rid = rule_added.rid; fltr->rule_id = rule_added.rule_id; - - if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS) { - if (ice_fltr_update_flags(vsi, fltr->rule_id, fltr->rid, - ICE_SINGLE_ACT_LAN_ENABLE)) - ice_rem_adv_rule_by_id(hw, &rule_added); - } + fltr->dest_vsi_handle = rule_added.vsi_handle; exit: kfree(list); return ret; } +/** + * ice_locate_vsi_using_queue - locate VSI using queue (forward to queue) + * @vsi: Pointer to VSI + * @tc_fltr: Pointer to tc_flower_filter + * + * Locate the VSI using specified "queue" (which is part of tc_fltr). When ADQ + * is not enabled, always return input VSI, otherwise locate corresponding + * VSI based on per channel "offset" and "qcount" + */ +static struct ice_vsi * +ice_locate_vsi_using_queue(struct ice_vsi *vsi, + struct ice_tc_flower_fltr *tc_fltr) +{ +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO + int num_tc, tc; + int queue; +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ + + /* verify action is forward to queue */ + if (tc_fltr->action.fltr_act != ICE_FWD_TO_Q) + return NULL; + + /* if ADQ is not active, passed VSI is the candidate VSI */ + if (!ice_is_adq_active(vsi->back)) + return vsi; + +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO + /* now locate the VSI (it could still be main PF VSI or CHNL_VSI + * depending upon "queue number") + */ + num_tc = vsi->mqprio_qopt.qopt.num_tc; + queue = (int)tc_fltr->action.fwd.q.queue; + + for (tc = 0; tc < num_tc; tc++) { + int qcount = vsi->mqprio_qopt.qopt.count[tc]; + int offset = vsi->mqprio_qopt.qopt.offset[tc]; + + if (queue >= offset && (queue < offset + qcount)) { + /* for non-ADQ TCs, passed VSI is the candidate VSI */ + if (tc < ICE_CHNL_START_TC) + return vsi; + else + return vsi->tc_map_vsi[tc]; + } + } +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ + return NULL; +} + +static struct ice_ring * +ice_locate_rx_ring_using_queue(struct ice_vsi *vsi, + struct ice_tc_flower_fltr *tc_fltr) +{ + u32 queue = tc_fltr->action.fwd.q.queue; + struct ice_pf *pf = vsi->back; + struct device *dev; + struct ice_vf *vf; + int tc; + + dev = ice_pf_to_dev(pf); + vf = vsi->vf; + + if (vsi->type != ICE_VSI_VF || vf->num_tc <= ICE_VF_CHNL_START_TC) + return (queue < vsi->num_rxq) ? vsi->rx_rings[queue] : NULL; + + /* now locate the corresponding Rx rings */ + for (tc = 0; tc < vf->num_tc; tc++) { + u16 num_qps, offset; + + offset = vf->ch[tc].offset; + num_qps = vf->ch[tc].num_qps; + + if (queue >= offset && + (queue < (offset + num_qps))) { + struct ice_vsi *tc_vsi; + + tc_vsi = pf->vsi[vf->ch[tc].vsi_idx]; + if (!tc_vsi) { + dev_err(dev, "VF %d: VF ADQ VSI is not valid\n", + vf->vf_id); + return NULL; + } + if ((queue - offset) >= vsi->num_rxq) { + dev_err(dev, "VF %d: forward to queue (%u) not in range, offset:%u, num_qps %u, num_rxq %u\n", + vf->vf_id, queue, offset, + num_qps, tc_vsi->num_rxq); + return NULL; + } + return tc_vsi->rx_rings[queue - offset]; + } + } + return NULL; +} + +/** + * ice_tc_forward_action - Determine destination VSI and queue for the action + * @vsi: Pointer to VSI + * @tc_fltr: Pointer to TC flower filter structure + * @rx_ring: Pointer to ring ptr + * @dest_vsi: Pointer to VSI ptr + * + * Validates the tc forward action and determines the destination VSI and queue + * for the forward action. + */ +static int +ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr, + struct ice_ring **rx_ring, struct ice_vsi **dest_vsi) +{ + struct ice_channel_vf *vf_ch = NULL; + struct ice_vsi *ch_vsi = NULL; + struct ice_pf *pf = vsi->back; + struct ice_ring *ring = NULL; + struct ice_vf *vf = NULL; + struct device *dev; + u16 tc_class = 0; + + dev = ice_pf_to_dev(pf); + *rx_ring = NULL; + *dest_vsi = NULL; + + /* validate VSI types */ + if (vsi->type == ICE_VSI_VF) { + vf = vsi->vf; + if (!vf) { + dev_err(dev, "VF is NULL for VF VSI, vsi_num %d\n", + vsi->vsi_num); + return -EINVAL; + } + if (!tc_fltr->dest_vsi) { + dev_err(dev, + "Needs valid destination VSI if main VSI type is ICE_VSI_VF\n"); + return -EINVAL; + } + if (tc_fltr->dest_vsi->type != ICE_VSI_VF) { + dev_err(dev, + "Unexpected destination VSI type when input VSI type is ICE_VSI_VF\n"); + return -EINVAL; + } + } + + /* Get the destination VSI and/or destination queue and validate them */ + tc_class = tc_fltr->action.fwd.tc.tc_class; + if (tc_class && tc_fltr->action.fltr_act == ICE_FWD_TO_VSI) { + /* Select the destination VSI */ + if (tc_class < ICE_CHNL_START_TC) { + NL_SET_ERR_MSG_MOD(tc_fltr->extack, + "Unable to add filter because of unsupported destination"); + return -EOPNOTSUPP; + } + if (vsi->type == ICE_VSI_VF) { + ch_vsi = tc_fltr->dest_vsi; + /* For VF ADQ, locate channel based on tc_class */ + vf_ch = &vf->ch[tc_class]; + if (!vf_ch) { + dev_err(dev, "Unable to add filter because TC specific param are invalid\n"); + return -EINVAL; + } + } else { + /* Locate ADQ VSI depending on hw_tc number */ + ch_vsi = vsi->tc_map_vsi[tc_class]; + } + } else if (tc_fltr->action.fltr_act == ICE_FWD_TO_Q) { + /* Locate the Rx queue using "action.fwd.q.queue" */ + ring = ice_locate_rx_ring_using_queue(vsi, tc_fltr); + if (!ring) { + dev_err(dev, + "Unable to locate Rx queue for action fwd_to_queue: %u\n", + tc_fltr->action.fwd.q.queue); + return -EINVAL; + } + /* Determine destination VSI even though forward action is + * FWD_TO_QUEUE, because QUEUE is associated with VSI + */ + if (vsi->type == ICE_VSI_VF) { + ch_vsi = tc_fltr->dest_vsi; + /* Locate channel which corresponds to TC0 */ + vf_ch = &vf->ch[0]; + if (!vf_ch) { + dev_err(dev, "Unable to add filter because TC specific param are invalid\n"); + return -EINVAL; + } + } else { + ch_vsi = ice_locate_vsi_using_queue(vsi, tc_fltr); + } + } else { + dev_err(dev, + "Unable to add filter because of unsupported action %u (supported actions: fwd to tc, fwd to queue)\n", + tc_fltr->action.fltr_act); + return -EINVAL; + } + + /* Must have valid "ch_vsi" (it could be main VSI or ADQ VSI */ + if (!ch_vsi) { + dev_err(dev, + "Unable to add filter because specified destination VSI doesn't exist\n"); + return -EINVAL; + } + + *rx_ring = ring; + *dest_vsi = ch_vsi; + return 0; +} + /** * ice_add_tc_flower_adv_fltr - add appropriate filter rules * @vsi: Pointer to VSI @@ -876,27 +1189,23 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr) { struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers; - enum ice_channel_fltr_type fltr_type = ICE_CHNL_FLTR_TYPE_INVALID; struct ice_adv_rule_info rule_info = {0}; struct ice_rule_query_data rule_added; - struct ice_channel_vf *vf_ch = NULL; + struct ice_ring *rx_ring = NULL; struct ice_adv_lkup_elem *list; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; u32 flags = tc_fltr->flags; - enum ice_status status; struct ice_vsi *ch_vsi; struct device *dev; - struct ice_vf *vf; u16 lkups_cnt = 0; u16 l4_proto = 0; - int ret = 0; u16 i = 0; + int ret; dev = ice_pf_to_dev(pf); if (ice_is_safe_mode(pf)) { - NL_SET_ERR_MSG_MOD(tc_fltr->extack, - "Unable to add filter because driver is in safe mode"); + NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because driver is in safe mode"); return -EOPNOTSUPP; } @@ -905,16 +1214,14 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 | ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) { - NL_SET_ERR_MSG_MOD(tc_fltr->extack, - "Unsupported encap field(s)"); + NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unsupported encap field(s)"); return -EOPNOTSUPP; } - /* get the channel (aka ADQ VSI) */ - if (tc_fltr->dest_vsi) - ch_vsi = tc_fltr->dest_vsi; - else - ch_vsi = vsi->tc_map_vsi[tc_fltr->action.tc_class]; + /* validate forwarding action VSI and queue */ + ret = ice_tc_forward_action(vsi, tc_fltr, &rx_ring, &ch_vsi); + if (ret) + return ret; lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr); list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); @@ -927,6 +1234,11 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, goto exit; } + if (tc_fltr->tunnel_type == TNL_VXLAN) + rule_info.tun_type = ICE_SW_TUN_VXLAN; + else if (tc_fltr->tunnel_type == TNL_GENEVE) + rule_info.tun_type = ICE_SW_TUN_GENEVE; + /* Now determine correct TUN type of based on encap params */ if ((flags & ICE_TC_FLWR_FIELD_TENANT_ID) && tc_fltr->tunnel_type == TNL_GTP) { @@ -934,112 +1246,60 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, &rule_info)) { if (vsi->type == ICE_VSI_VF) dev_err(dev, "Unable to add filter because could not determine tun type, VSI %u, vf_id:%u\n", - vsi->vsi_num, vsi->vf_id); + vsi->vsi_num, vsi->vf->vf_id); else - NL_SET_ERR_MSG_MOD(tc_fltr->extack, - "Unable to add filter because could not determine TUN type. "); + NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because could not determine TUN type. "); ret = -EINVAL; goto exit; } } rule_info.sw_act.fltr_act = tc_fltr->action.fltr_act; - if (tc_fltr->action.tc_class >= ICE_CHNL_START_TC) { - if (!ch_vsi) { - NL_SET_ERR_MSG_MOD(tc_fltr->extack, - "Unable to add filter because specified destination doesn't exist"); - ret = -EINVAL; - goto exit; - } - - /* dest_vsi is preset, means it is from virtchnl message */ - if (tc_fltr->dest_vsi) { - if (vsi->type != ICE_VSI_VF || - tc_fltr->dest_vsi->type != ICE_VSI_VF) { - dev_err(dev, "Unexpected VSI(vf_id:%u) type: %u\n", - vsi->vf_id, vsi->type); - ret = -EINVAL; - goto exit; - } - vf = &pf->vf[vsi->vf_id]; - if (!vf) { - dev_err(dev, "VF is NULL for VSI->type: ICE_VF_VSI and vf_id %d\n", - vsi->vf_id); - ret = -EINVAL; - goto exit; - } - vf_ch = &vf->ch[tc_fltr->action.tc_class]; - - fltr_type = (enum ice_channel_fltr_type) - vf_ch->fltr_type; - } else if (ch_vsi->ch) { - fltr_type = ch_vsi->ch->fltr_type; - } else { - dev_err(dev, "Can't add switch rule, neither dest_vsi is valid now VSI channel but tc_class sepcified is %u\n", - tc_fltr->action.tc_class); - ret = -EINVAL; - goto exit; - } - - /* perform fltr_type check for channel (aka ADQ) VSI */ - ret = ice_chnl_fltr_type_chk(pf, tc_fltr, &fltr_type); - if (ret) { - NL_SET_ERR_MSG_MOD(tc_fltr->extack, - "Unable to add filter because filter type check failed"); - dev_err(dev, "Unable to add filter because filter type check failed"); - ret = -EINVAL; - goto exit; - } - - /* Code is applicable only for PF ADQ, for VF ADQ - such - * checks to be handled by VF driver - */ - if (ch_vsi && (ch_vsi->type == ICE_VSI_PF || - ch_vsi->type == ICE_VSI_CHNL)) { - ret = ice_detect_filter_conflict(pf, tc_fltr); - if (ret) - goto exit; - } - - if (tc_fltr->dest_vsi) { - if (vf_ch && !fltr_type) - vf_ch->fltr_type = fltr_type; - } else if (ch_vsi->ch) { - ch_vsi->ch->fltr_type = fltr_type; - } + if (tc_fltr->action.fltr_act == ICE_FWD_TO_VSI) { rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI; -#ifdef __CHECKER__ - /* cppcheck-suppress nullPointerRedundantCheck */ -#endif /* _CHECKER__ */ rule_info.sw_act.vsi_handle = ch_vsi->idx; - rule_info.priority = 7; + rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI; rule_info.sw_act.src = hw->pf_id; rule_info.rx = true; dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n", - tc_fltr->action.tc_class, + tc_fltr->action.fwd.tc.tc_class, rule_info.sw_act.vsi_handle, lkups_cnt); + } else if (tc_fltr->action.fltr_act == ICE_FWD_TO_Q) { + rule_info.sw_act.fltr_act = ICE_FWD_TO_Q; + /* HW queue number in global space */ + rule_info.sw_act.fwd_id.q_id = tc_fltr->action.fwd.q.hw_queue; + rule_info.sw_act.vsi_handle = ch_vsi->idx; + rule_info.priority = ICE_SWITCH_FLTR_PRIO_QUEUE; + rule_info.sw_act.src = hw->pf_id; + rule_info.rx = true; + dev_dbg(dev, "add switch rule action to forward to queue:%u (HW queue %u), lkups_cnt:%u\n", + tc_fltr->action.fwd.q.queue, + tc_fltr->action.fwd.q.hw_queue, + lkups_cnt); } else { rule_info.sw_act.flag |= ICE_FLTR_TX; - rule_info.sw_act.src = vsi->idx; + /* In case of Tx (LOOKUP_TX), src needs to be src VSI */ + rule_info.sw_act.src = ch_vsi->idx; + /* 'Rx' is false, direction of rule(LOOKUPTRX) */ rule_info.rx = false; + rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI; } + rule_info.add_dir_lkup = false; + /* specify the cookie as filter_rule_id */ rule_info.fltr_rule_id = tc_fltr->cookie; - status = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); - if (status == ICE_ERR_ALREADY_EXISTS) { - NL_SET_ERR_MSG_MOD(tc_fltr->extack, - "Unable to add filter because it already exist"); + ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); + if (ret == -EEXIST) { + NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because it already exist"); ret = -EINVAL; goto exit; - } else if (status) { - NL_SET_ERR_MSG_MOD(tc_fltr->extack, - "Unable to add filter due to error"); - ret = -EIO; + } else if (ret) { + NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter due to error"); goto exit; } @@ -1048,19 +1308,14 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, */ tc_fltr->rid = rule_added.rid; tc_fltr->rule_id = rule_added.rule_id; - if (tc_fltr->action.tc_class > 0 && ch_vsi) { - /* For PF ADQ, VSI type is set as ICE_VSI_CHNL, and - * for PF ADQ filter, it is not yet set in tc_fltr, - * hence store the dest_vsi ptr in tc_fltr - */ - if (ch_vsi->type == ICE_VSI_CHNL) - tc_fltr->dest_vsi = ch_vsi; + if (tc_fltr->action.fltr_act == ICE_FWD_TO_VSI || + tc_fltr->action.fltr_act == ICE_FWD_TO_Q) { + tc_fltr->dest_vsi_handle = rule_added.vsi_handle; + tc_fltr->dest_vsi = ch_vsi; /* keep track of advanced switch filter for - * destination VSI (channel VSI) + * destination VSI */ ch_vsi->num_chnl_fltr++; - /* in this case, dest_id is VSI handle (sw handle) */ - tc_fltr->dest_id = rule_added.vsi_handle; /* keeps track of channel filters for PF VSI */ if (vsi->type == ICE_VSI_PF && @@ -1068,10 +1323,17 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, ICE_TC_FLWR_FIELD_ENC_DST_MAC))) pf->num_dmac_chnl_fltrs++; } - dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x) for TC %u, rid %u, rule_id %u, vsi_idx %u\n", - lkups_cnt, flags, - tc_fltr->action.tc_class, rule_added.rid, - rule_added.rule_id, rule_added.vsi_handle); + if (tc_fltr->action.fltr_act == ICE_FWD_TO_VSI) { + dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x) for TC %u, rid %u, rule_id %u, vsi_idx %u\n", + lkups_cnt, flags, + tc_fltr->action.fwd.tc.tc_class, rule_added.rid, + rule_added.rule_id, rule_added.vsi_handle); + } else if (tc_fltr->action.fltr_act == ICE_FWD_TO_Q) { + dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is forward to queue: %u (HW queue %u) , rid %u, rule_id %u\n", + lkups_cnt, flags, tc_fltr->action.fwd.q.queue, + tc_fltr->action.fwd.q.hw_queue, rule_added.rid, + rule_added.rule_id); + } exit: kfree(list); return ret; @@ -1127,14 +1389,13 @@ ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match, */ if (ipv6_addr_loopback(&match->key->dst) || ipv6_addr_loopback(&match->key->src)) { - NL_SET_ERR_MSG_MOD(fltr->extack, "Bad ipv6, addr is LOOPBACK"); + NL_SET_ERR_MSG_MOD(fltr->extack, "Bad IPv6, addr is LOOPBACK"); return -EINVAL; } /* if src/dest IPv6 address is *,* error */ if (ipv6_addr_any(&match->mask->dst) && ipv6_addr_any(&match->mask->src)) { - NL_SET_ERR_MSG_MOD(fltr->extack, - "Bad src/dest IPV6, addr is any"); + NL_SET_ERR_MSG_MOD(fltr->extack, "Bad src/dest IPv6, addr is any"); return -EINVAL; } if (!ipv6_addr_any(&match->mask->dst)) { @@ -1203,85 +1464,35 @@ ice_tc_set_port(struct flow_match_ports match, } #if defined(HAVE_TC_FLOWER_ENC) && defined(HAVE_TC_INDIR_BLOCK) -/** - * ice_is_tnl_gtp - detect if tunnel type is GTP or not - * @tunnel_dev: ptr to tunnel device - * @rule: ptr to flow_rule - * - * If curr_tnl_type is TNL_LAST and "flow_rule" is non-NULL, then - * check if enc_dst_port is well known GTP port (2152) - * if so - return true (indicating that tunnel type is GTP), otherwise false. - */ -static bool -ice_is_tnl_gtp(struct net_device *tunnel_dev, - struct flow_rule *rule) +static bool ice_is_tunnel_supported_rule(struct flow_rule *rule) { - /* if flow_rule is non-NULL, proceed with detecting possibility - * of GTP tunnel. Unlike VXLAN and GENEVE, there is no such API - * like netif_is_gtp since GTP is not natively supported in kernel - */ - if (rule && (!is_vlan_dev(tunnel_dev))) { - struct flow_match_ports match; - u16 enc_dst_port; - - if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) { - netdev_err(tunnel_dev, - "Tunnel HW offload is not supported, ENC_PORTs are not specified\n"); - return false; - } - - /* get ENC_PORTS info */ - flow_rule_match_enc_ports(rule, &match); - enc_dst_port = be16_to_cpu(match.key->dst); - - /* Outer UDP port is GTP well known port, - * if 'enc_dst_port' matched with GTP wellknown port, - * return true from this function. - */ - if (enc_dst_port != ICE_GTP_TNL_WELLKNOWN_PORT) { - netdev_err(tunnel_dev, - "Tunnel HW offload is not supported for non-GTP tunnel, ENC_DST_PORT is %u\n", - enc_dst_port); - return false; - } - - /* all checks passed including outer UDP port to be qualified - * for GTP tunnel - */ - return true; - } - return false; + return (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) || + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) || + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID) || + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)); } -/** - * ice_tc_tun_get_type - get the tunnel type - * @tunnel_dev: ptr to tunnel device - * @rule: ptr to flow_rule - * - * This function detects appropriate tunnel_type if specified device is - * tunnel device such as vxlan/geneve othertwise it tries to detect - * tunnel type based on outer GTP port (2152) - */ -int -ice_tc_tun_get_type(struct net_device *tunnel_dev, - struct flow_rule *rule) +static struct net_device * +ice_get_tunnel_device(struct net_device *dev, struct flow_rule *rule) { -#ifdef HAVE_VXLAN_TYPE -#if IS_ENABLED(CONFIG_VXLAN) - if (netif_is_vxlan(tunnel_dev)) - return TNL_VXLAN; -#endif /* HAVE_VXLAN_TYPE */ -#elif defined(HAVE_GENEVE_TYPE) -#if IS_ENABLED(CONFIG_GENEVE) - if (netif_is_geneve(tunnel_dev)) - return TNL_GENEVE; -#endif -#endif /* HAVE_GENEVE_TYPE */ - /* detect possibility of GTP tunnel type based on input */ - if (ice_is_tnl_gtp(tunnel_dev, rule)) - return TNL_GTP; +#ifdef HAVE_TC_FLOW_RULE_INFRASTRUCTURE + struct flow_action_entry *act; + int i; - return TNL_LAST; + if (ice_is_tunnel_supported(dev, rule)) + return dev; + + flow_action_for_each(i, act, &rule->action) { + if (act->id == FLOW_ACTION_REDIRECT && + ice_is_tunnel_supported(act->dev, rule)) + return act->dev; + } +#endif /* HAVE_TC_FLOW_RULE_INFRASTRUCTURE */ + + if (ice_is_tunnel_supported_rule(rule)) + return dev; + + return NULL; } /** @@ -1366,8 +1577,11 @@ ice_tc_tun_parse(struct net_device *filter_dev, struct ice_vsi *vsi, dev = ice_pf_to_dev(pf); tunnel_type = ice_tc_tun_get_type(filter_dev, rule); - /* VXLAN and GTP tunnel are supported now */ - if (tunnel_type == TNL_VXLAN || tunnel_type == TNL_GTP) { + if (tunnel_type == TNL_VXLAN || tunnel_type == TNL_GTP || +#ifdef HAVE_GTP_SUPPORT + tunnel_type == TNL_GTPU || tunnel_type == TNL_GTPC || +#endif /* HAVE_GTP_SUPPORT */ + tunnel_type == TNL_GENEVE || tunnel_type == TNL_GRETAP) { err = ice_tc_tun_info(pf, f, fltr, tunnel_type); if (err) { dev_err(dev, "Failed to parse tunnel (tunnel_type %u) attributes\n", @@ -1384,6 +1598,42 @@ ice_tc_tun_parse(struct net_device *filter_dev, struct ice_vsi *vsi, return err; } +#ifdef HAVE_GTP_SUPPORT +/** + * ice_parse_gtp_type - Sets GTP tunnel type to GTP-U or GTP-C + * @match: Flow match structure + * @fltr: Pointer to filter structure + * + * GTP-C/GTP-U is selected based on destination port number (enc_dst_port). + * Before calling this funtcion, fltr->tunnel_type should be set to TNL_GTPU, + * therefore making GTP-U the default choice (when destination port number is + * not specified). + */ +static int +ice_parse_gtp_type(struct flow_match_ports match, + struct ice_tc_flower_fltr *fltr) +{ + u16 dst_port; + + if (match.key->dst) { + dst_port = be16_to_cpu(match.key->dst); + + switch (dst_port) { + case GTP1U_PORT: + break; + case ICE_GTPC_PORT: + fltr->tunnel_type = TNL_GTPC; + break; + default: + NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported GTP port number"); + return -EINVAL; + } + } + + return 0; +} +#endif /* HAVE_GTP_SUPPORT */ + /** * ice_parse_tunnel_attr - Parse tunnel attributes from TC flower filter * @filter_dev: Pointer to device on which filter is being added @@ -1404,8 +1654,7 @@ ice_parse_tunnel_attr(struct net_device *filter_dev, struct ice_vsi *vsi, err = ice_tc_tun_parse(filter_dev, vsi, f, fltr, headers); if (err) { - NL_SET_ERR_MSG_MOD(fltr->extack, - "failed to parse tunnel attributes"); + NL_SET_ERR_MSG_MOD(fltr->extack, "failed to parse tunnel attributes"); return err; } @@ -1426,17 +1675,25 @@ ice_parse_tunnel_attr(struct net_device *filter_dev, struct ice_vsi *vsi, return -EINVAL; } -#ifdef HAVE_TC_FLOWER_ENC_IP +#ifdef HAVE_FLOW_DISSECTOR_KEY_ENC_IP if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { struct flow_match_ip match; flow_rule_match_enc_ip(rule, &match); - headers->l3_key.tos = match.key->tos; - headers->l3_key.ttl = match.key->ttl; - headers->l3_mask.tos = match.mask->tos; - headers->l3_mask.ttl = match.mask->ttl; + + if (match.mask->tos) { + fltr->flags |= ICE_TC_FLWR_FIELD_ENC_IP_TOS; + headers->l3_key.tos = match.key->tos; + headers->l3_mask.tos = match.mask->tos; + } + + if (match.mask->ttl) { + fltr->flags |= ICE_TC_FLWR_FIELD_ENC_IP_TTL; + headers->l3_key.ttl = match.key->ttl; + headers->l3_mask.ttl = match.mask->ttl; + } } -#endif /* HAVE_TC_FLOWER_ENC_IP */ +#endif /* HAVE_FLOW_DISSECTOR_KEY_ENC_IP */ if (fltr->tunnel_type == TNL_GTP && flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) { @@ -1447,6 +1704,33 @@ ice_parse_tunnel_attr(struct net_device *filter_dev, struct ice_vsi *vsi, if (ice_tc_set_port(match, fltr, headers, true)) return -EINVAL; } + +#ifdef HAVE_GTP_SUPPORT + if ((fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC) && + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_enc_ports(rule, &match); + + if (ice_parse_gtp_type(match, fltr)) + return -EINVAL; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) { + struct flow_match_enc_opts match; + + flow_rule_match_enc_opts(rule, &match); + + memcpy(&fltr->gtp_pdu_info_keys, &match.key->data[0], + sizeof(struct gtp_pdu_session_info)); + + memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0], + sizeof(struct gtp_pdu_session_info)); + + fltr->flags |= ICE_TC_FLWR_FIELD_ENC_OPTS; + } +#endif /* HAVE_GTP_SUPPORT */ + return 0; } #endif /* HAVE_TC_FLOWER_ENC && HAVE_TC_INDIR_BLOCK */ @@ -1473,8 +1757,13 @@ ice_parse_cls_flower(struct net_device __always_unused *filter_dev, { struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; struct flow_rule *rule = flow_cls_offload_flow_rule(f); - struct flow_dissector *dissector = rule->match.dissector; u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0; + struct flow_dissector *dissector; +#if defined(HAVE_TC_FLOWER_ENC) && defined(HAVE_TC_INDIR_BLOCK) + struct net_device *tunnel_dev; +#endif /* HAVE_TC_FLOWER_ENC && HAVE_TC_INDIR_BLOCK */ + + dissector = rule->match.dissector; if (dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | @@ -1486,6 +1775,9 @@ ice_parse_cls_flower(struct net_device __always_unused *filter_dev, #ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS BIT(FLOW_DISSECTOR_KEY_VLAN) | #endif +#ifdef HAVE_FLOW_DISSECTOR_KEY_CVLAN + BIT(FLOW_DISSECTOR_KEY_CVLAN) | +#endif /* HAVE_FLOW_DISSECTOR_KEY_CVLAN */ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | #ifdef HAVE_TC_FLOWER_ENC @@ -1493,27 +1785,36 @@ ice_parse_cls_flower(struct net_device __always_unused *filter_dev, BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | +#ifdef HAVE_GTP_SUPPORT + BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | +#endif /* HAVE_GTP_SUPPORT */ BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | -#ifdef HAVE_TC_FLOWER_ENC_IP +#ifdef HAVE_FLOW_DISSECTOR_KEY_IP + BIT(FLOW_DISSECTOR_KEY_IP) | +#endif /* HAVE_FLOW_DISSECTOR_KEY_IP */ +#ifdef HAVE_FLOW_DISSECTOR_KEY_ENC_IP BIT(FLOW_DISSECTOR_KEY_ENC_IP) | -#endif /* HAVE_TC_FLOWER_ENC_IP */ +#endif /* HAVE_FLOW_DISSECTOR_KEY_ENC_IP */ #endif /* HAVE_TC_FLOWER_ENC */ +#ifdef HAVE_FLOW_DISSECTOR_KEY_PPPOE + BIT(FLOW_DISSECTOR_KEY_PPPOE) | +#endif /* HAVE_FLOW_DISSECTOR_KEY_PPPOE */ BIT(FLOW_DISSECTOR_KEY_PORTS))) { NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used"); return -EOPNOTSUPP; } #if defined(HAVE_TC_FLOWER_ENC) && defined(HAVE_TC_INDIR_BLOCK) - if ((flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) || - flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) || - flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID) || - flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS))) { + tunnel_dev = ice_get_tunnel_device(filter_dev, rule); + + if (tunnel_dev) { int err; + filter_dev = tunnel_dev; + err = ice_parse_tunnel_attr(filter_dev, vsi, f, fltr, headers); if (err) { - NL_SET_ERR_MSG_MOD(fltr->extack, - "Failed to parse TC flower tunnel attributes"); + NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to parse TC flower tunnel attributes"); return err; } @@ -1540,13 +1841,13 @@ ice_parse_cls_flower(struct net_device __always_unused *filter_dev, n_proto_key = 0; n_proto_mask = 0; } else { - if (!ice_is_adq_active(vsi->back)) - fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID; + fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID; } headers->l2_key.n_proto = cpu_to_be16(n_proto_key); headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask); headers->l3_key.ip_proto = match.key->ip_proto; + headers->l3_mask.ip_proto = match.mask->ip_proto; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { @@ -1587,10 +1888,8 @@ ice_parse_cls_flower(struct net_device __always_unused *filter_dev, if (mask->vlan_id) { if (mask->vlan_id == VLAN_VID_MASK) { fltr->flags |= ICE_TC_FLWR_FIELD_VLAN; - fltr->flags &= ~ICE_TC_FLWR_FIELD_ETH_TYPE_ID; } else { - NL_SET_ERR_MSG_MOD(fltr->extack, - "Bad VLAN mask"); + NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask"); return -EINVAL; } } @@ -1622,10 +1921,8 @@ ice_parse_cls_flower(struct net_device __always_unused *filter_dev, if (match.mask->vlan_id) { if (match.mask->vlan_id == VLAN_VID_MASK) { fltr->flags |= ICE_TC_FLWR_FIELD_VLAN; - fltr->flags &= ~ICE_TC_FLWR_FIELD_ETH_TYPE_ID; } else { - NL_SET_ERR_MSG_MOD(fltr->extack, - "Bad VLAN mask"); + NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask"); return -EINVAL; } } @@ -1639,6 +1936,55 @@ ice_parse_cls_flower(struct net_device __always_unused *filter_dev, } #endif /* HAVE_TC_FLOWER_VLAN_IN_TAGS */ +#ifdef HAVE_FLOW_DISSECTOR_KEY_CVLAN + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { + struct flow_match_vlan match; + + if (!ice_is_dvm_ena(&vsi->back->hw)) { + NL_SET_ERR_MSG_MOD(fltr->extack, + "Double VLAN mode is not enabled"); + return -EINVAL; + } + + flow_rule_match_cvlan(rule, &match); + + if (match.mask->vlan_id) { + if (match.mask->vlan_id == VLAN_VID_MASK) { + fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN; + } else { + NL_SET_ERR_MSG_MOD(fltr->extack, + "Bad CVLAN mask"); + return -EINVAL; + } + } + + headers->cvlan_hdr.vlan_id = + cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK); +#ifdef HAVE_FLOW_DISSECTOR_VLAN_PRIO + if (match.mask->vlan_priority) + headers->cvlan_hdr.vlan_prio = match.key->vlan_priority; +#endif + } +#endif /* HAVE_FLOW_DISSECTOR_KEY_CVLAN */ + +#ifdef HAVE_FLOW_DISSECTOR_KEY_PPPOE + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PPPOE)) { + struct flow_match_pppoe match; + + flow_rule_match_pppoe(rule, &match); + n_proto_key = ice_tc_set_pppoe(&match, fltr, headers); + + /* If ethertype equals ETH_P_PPP_SES, n_proto might be + * overwritten by encapsulated protocol (ppp_proto field) or set + * to 0. To correct this, flow_match_pppoe provides the type + * field, which contains the actual ethertype (ETH_P_PPP_SES). + */ + headers->l2_key.n_proto = cpu_to_be16(n_proto_key); + headers->l2_mask.n_proto = cpu_to_be16(0xFFFF); + fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID; + } +#endif /* HAVE_FLOW_DISSECTOR_KEY_PPPOE */ + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { struct flow_match_control match; @@ -1663,6 +2009,26 @@ ice_parse_cls_flower(struct net_device __always_unused *filter_dev, return -EINVAL; } +#ifdef HAVE_FLOW_DISSECTOR_KEY_IP + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { + struct flow_match_ip match; + + flow_rule_match_ip(rule, &match); + + if (match.mask->tos) { + fltr->flags |= ICE_TC_FLWR_FIELD_IP_TOS; + headers->l3_key.tos = match.key->tos; + headers->l3_mask.tos = match.mask->tos; + } + + if (match.mask->ttl) { + fltr->flags |= ICE_TC_FLWR_FIELD_IP_TTL; + headers->l3_key.ttl = match.key->ttl; + headers->l3_mask.ttl = match.mask->ttl; + } + } +#endif /* HAVE_FLOW_DISSECTOR_KEY_IP */ + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { struct flow_match_ports match; @@ -1674,8 +2040,7 @@ ice_parse_cls_flower(struct net_device __always_unused *filter_dev, case IPPROTO_UDP: break; default: - NL_SET_ERR_MSG_MOD(fltr->extack, - "Only UDP and TCP transport are supported"); + NL_SET_ERR_MSG_MOD(fltr->extack, "Only UDP and TCP transport are supported"); return -EINVAL; } } @@ -1707,13 +2072,11 @@ ice_add_remove_tc_flower_dflt_fltr(struct ice_vsi *vsi, if (add) { err = ice_fltr_add_mac(vsi, dst_mac, act); if (err) - NL_SET_ERR_MSG_MOD(tc_fltr->extack, - "Could not add MAC filters"); + NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Could not add MAC filters"); } else { err = ice_fltr_remove_mac(vsi, dst_mac, act); if (err) - NL_SET_ERR_MSG_MOD(tc_fltr->extack, - "Could not remove MAC filters"); + NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Could not remove MAC filters"); } break; case ICE_TC_FLWR_FLTR_FLAGS_VLAN: @@ -1722,34 +2085,29 @@ ice_add_remove_tc_flower_dflt_fltr(struct ice_vsi *vsi, ICE_VLAN(ETH_P_8021Q, vlan_id, 0, act); err = vlan_ops->add_vlan(vsi, &vlan); if (err) - NL_SET_ERR_MSG_MOD(tc_fltr->extack, - "Could not add VLAN filters"); + NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Could not add VLAN filters"); } else { struct ice_vlan vlan = ICE_VLAN(ETH_P_8021Q, vlan_id, 0, act); err = vlan_ops->del_vlan(vsi, &vlan); if (err) - NL_SET_ERR_MSG_MOD(tc_fltr->extack, - "Could not delete VLAN filters"); + NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Could not delete VLAN filters"); } break; case ICE_TC_FLWR_FLTR_FLAGS_DST_MAC_VLAN: if (add) { err = ice_fltr_add_mac_vlan(vsi, dst_mac, vlan_id, act); if (err) - NL_SET_ERR_MSG_MOD(tc_fltr->extack, - "Could not add MAC VLAN filters"); + NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Could not add MAC VLAN filters"); } else { err = ice_fltr_remove_mac_vlan(vsi, dst_mac, vlan_id, act); if (err) - NL_SET_ERR_MSG_MOD(tc_fltr->extack, - "Could not remove MAC VLAN filters"); + NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Could not remove MAC VLAN filters"); } break; default: - NL_SET_ERR_MSG_MOD(tc_fltr->extack, - "Not a default filter type"); + NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Not a default filter type"); err = -EOPNOTSUPP; break; } @@ -1786,51 +2144,79 @@ ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) #ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO /** - * ice_handle_tclass_action - Support directing to a traffic class + * ice_handle_tclass_action - Support directing to a traffic class or queue * @vsi: Pointer to VSI * @cls_flower: Pointer to TC flower offload structure * @fltr: Pointer to TC flower filter structure * - * Support directing traffic to a traffic class + * Support directing traffic to a traffic class or queue */ static int ice_handle_tclass_action(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower, struct ice_tc_flower_fltr *fltr) { - int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid); - struct ice_vsi *main_vsi; + unsigned int nrx = TC_H_MIN(cls_flower->classid); + u32 num_tc; + int tc; - if (tc < 0) { - NL_SET_ERR_MSG_MOD(fltr->extack, - "Unable to add filter because specified destination is invalid"); - return -EINVAL; - } - if (!tc) { - NL_SET_ERR_MSG_MOD(fltr->extack, - "Unable to add filter because of invalid destination"); - return -EINVAL; - } + num_tc = (u32)netdev_get_num_tc(vsi->netdev); - if (!(vsi->all_enatc & BIT(tc))) { - NL_SET_ERR_MSG_MOD(fltr->extack, - "Unable to add filter because of non-existence destination"); - return -EINVAL; - } + /* There are two regions which will have valid "classid" values: + * 1. The first region will have a classid value of 1 through + * num_tx_queues (i.e forward to queue). + * 2. The second region represents the hardware traffic classes. These + * are represented by classid values of TC_H_MIN_PRIORITY through + * TC_H_MIN_PRIORITY + netdev_get_num_tc - 1. (i.e forward to TC) + */ + if (nrx < TC_H_MIN_PRIORITY) { + struct ice_hw *hw = &vsi->back->hw; + u32 queue, global_qid; + /* user specified queue, hence action is forward to queue */ + if (nrx > vsi->num_rxq) { + NL_SET_ERR_MSG_MOD(fltr->extack, + "Unable to add filter because specified queue is invalid"); + return -ENXIO; + } + /* since nrx is 1 based */ + queue = nrx - 1; - /* Redirect to a TC class or Queue Group */ - main_vsi = ice_get_main_vsi(vsi->back); - if (!main_vsi || !main_vsi->netdev) { + /* forward to queue */ + fltr->action.fltr_act = ICE_FWD_TO_Q; + fltr->action.fwd.q.queue = queue; + + /* determine corresponding HW queue */ + global_qid = hw->func_caps.common_cap.rxq_first_id + queue; + fltr->action.fwd.q.hw_queue = global_qid; + } else if ((nrx - TC_H_MIN_PRIORITY) < num_tc) { + /* user specified hw_tc (it must be non-zero for ADQ TC, hence + * action is forward to "hw_tc (aka ADQ channel number)" + */ + tc = nrx - TC_H_MIN_PRIORITY; + if (tc < ICE_CHNL_START_TC) { + NL_SET_ERR_MSG_MOD(fltr->extack, + "Unable to add filter because of unsupported destination"); + return -EOPNOTSUPP; + } + + if (!(vsi->all_enatc & BIT(tc))) { + NL_SET_ERR_MSG_MOD(fltr->extack, + "Unable to add filter because of non-existence destination"); + return -EINVAL; + } + /* forward to hw_tc (aka ADQ VSI) */ + fltr->action.fltr_act = ICE_FWD_TO_VSI; + fltr->action.fwd.tc.tc_class = tc; + } else { NL_SET_ERR_MSG_MOD(fltr->extack, - "Unable to add filter because of invalid netdevice"); + "Unable to add filter because user specified neither queue nor hw_tc as forward action"); return -EINVAL; } if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) && (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC | ICE_TC_FLWR_FIELD_SRC_MAC))) { - NL_SET_ERR_MSG_MOD(fltr->extack, - "Unable to add filter because filter using tunnel key and inner MAC is unsupported combination"); + NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because filter using tunnel key and inner MAC is unsupported combination"); return -EOPNOTSUPP; } @@ -1842,52 +2228,36 @@ ice_handle_tclass_action(struct ice_vsi *vsi, * this code won't do anything * 2. For non-tunnel, if user didn't specify MAC address, add implicit * dest MAC to be lower netdev's active unicast MAC address - * 3. For tunnel, as of now tc-filter thru flower classifier doesn't + * 3. For tunnel, as of now TC-filter thru flower classifier doesn't * have provision for user to specify outer DMAC, hence driver to * implicitly add outer dest MAC to be lower netdev's active unicast * MAC address. */ - if (fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) { + if (ice_is_tunnel_fltr(fltr)) { if (!(fltr->flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC)) { ether_addr_copy(fltr->outer_headers.l2_key.dst_mac, - main_vsi->netdev->dev_addr); + vsi->netdev->dev_addr); eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac); fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DST_MAC; } } else if (!(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC)) { ether_addr_copy(fltr->outer_headers.l2_key.dst_mac, - main_vsi->netdev->dev_addr); + vsi->netdev->dev_addr); eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac); fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC; } - /* validate specified dest MAC address, make sure either it belongs to - * lower netdev or any of non-offloaded MACVLAN. Non-offloaded MACVLANs - * MAC address are added as unicast MAC filter destined to main VSI. - */ - if (!ice_mac_fltr_exist(&main_vsi->back->hw, - fltr->outer_headers.l2_key.dst_mac, - main_vsi->idx)) { - NL_SET_ERR_MSG_MOD(fltr->extack, - "Unable to add filter because legacy MAC filter for specified destination doesn't exist"); - return -EINVAL; - } - /* Make sure VLAN is already added to main VSI, before allowing ADQ to * add a VLAN based filter such as MAC + VLAN + L4 port. */ if (fltr->flags & ICE_TC_FLWR_FIELD_VLAN) { u16 vlan_id = be16_to_cpu(fltr->outer_headers.vlan_hdr.vlan_id); - if (!ice_vlan_fltr_exist(&main_vsi->back->hw, vlan_id, - main_vsi->idx)) { - NL_SET_ERR_MSG_MOD(fltr->extack, - "Unable to add filter because legacy VLAN filter for specified destination doesn't exist"); + if (!ice_vlan_fltr_exist(&vsi->back->hw, vlan_id, vsi->idx)) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because legacy VLAN filter for specified destination doesn't exist"); return -EINVAL; } } - fltr->action.fltr_act = ICE_FWD_TO_VSI; - fltr->action.tc_class = tc; return 0; } @@ -1947,17 +2317,17 @@ ice_parse_tc_flower_actions(struct ice_vsi *vsi, #else list_for_each_entry_safe(tc_act, temp, &(exts)->actions, list) { #endif /* HAVE_TCF_EXTS_TO_LIST */ -#ifdef HAVE_TC_FLOW_RULE_INFRASTRUCTURE if (ice_is_eswitch_mode_switchdev(vsi->back)) { - int err = ice_eswitch_tc_parse_action(fltr, act); +#ifdef HAVE_TC_FLOW_RULE_INFRASTRUCTURE + int err = ice_eswitch_tc_parse_action(fltr, act); +#else + int err = ice_eswitch_tc_parse_action(fltr, tc_act); +#endif if (err) return err; + continue; } -#else - if (ice_is_eswitch_mode_switchdev(vsi->back)) - return -EINVAL; -#endif /* HAVE_TC_FLOW_RULE_INFRASTRUCTURE */ /* Allow only one rule per filter */ /* Drop action */ @@ -1966,8 +2336,7 @@ ice_parse_tc_flower_actions(struct ice_vsi *vsi, #else if (is_tcf_gact_shot(tc_act)) { #endif - NL_SET_ERR_MSG_MOD(fltr->extack, - "Unsupported action DROP"); + NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action DROP"); return -EINVAL; } fltr->action.fltr_act = ICE_FWD_TO_VSI; @@ -1996,18 +2365,16 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) rule_rem.rid = fltr->rid; rule_rem.rule_id = fltr->rule_id; - rule_rem.vsi_handle = fltr->dest_id; + rule_rem.vsi_handle = fltr->dest_vsi_handle; err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem); } if (err) { - if (err == ICE_ERR_DOES_NOT_EXIST) { - NL_SET_ERR_MSG_MOD(fltr->extack, - "filter does not exist\n"); + if (err == -ENOENT) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist"); return -ENOENT; } - NL_SET_ERR_MSG_MOD(fltr->extack, - "Failed to delete TC flower filter"); + NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to delete TC flower filter"); return -EIO; } @@ -2016,16 +2383,8 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) */ if (fltr->dest_vsi) { if (fltr->dest_vsi->type == ICE_VSI_CHNL) { - struct ice_channel *ch = fltr->dest_vsi->ch; - fltr->dest_vsi->num_chnl_fltr--; - /* reset filter type for channel if channel filter - * count reaches zero - */ - if (!fltr->dest_vsi->num_chnl_fltr && ch) - ch->fltr_type = ICE_CHNL_FLTR_TYPE_INVALID; - /* keeps track of channel filters for PF VSI */ if (vsi->type == ICE_VSI_PF && (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC | @@ -2043,7 +2402,7 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) * @f: Pointer to flower offload structure * @__fltr: Pointer to struct ice_tc_flower_fltr * - * This function parses tc-flower input fields, parses action, + * This function parses TC-flower input fields, parses action, * and adds a filter. */ #ifdef HAVE_TC_INDIR_BLOCK @@ -2135,7 +2494,7 @@ ice_add_cls_flower(struct net_device __always_unused *netdev, struct net_device *vsi_netdev = vsi->netdev; struct ice_tc_flower_fltr *fltr; struct ice_pf *pf = vsi->back; - int err = 0; + int err; if (ice_is_reset_in_progress(pf->state)) return -EBUSY; @@ -2160,15 +2519,12 @@ ice_add_cls_flower(struct net_device __always_unused *netdev, * Avoid triggering explicit error in this case. */ if (netdev == vsi_netdev) - NL_SET_ERR_MSG_MOD(extack, - "can't apply TC flower filters, turn ON hw-tc-offload and try again"); + NL_SET_ERR_MSG_MOD(extack, "can't apply TC flower filters, turn ON hw-tc-offload and try again"); #else - NL_SET_ERR_MSG_MOD(extack, - "can't apply TC flower filters, turn ON hw-tc-offload and try again"); + NL_SET_ERR_MSG_MOD(extack, "can't apply TC flower filters, turn ON hw-tc-offload and try again"); #endif /* HAVE_TC_INDIR_BLOCK */ #else /* !HAVE_TC_FLOWER_OFFLOAD_COMMON_EXTACK */ - netdev_err(vsi_netdev, - "can't apply TC flower filters, turn ON hw-tc-offload and try again\n"); + netdev_err(vsi_netdev, "can't apply TC flower filters, turn ON hw-tc-offload and try again\n"); #endif /* HAVE_TC_FLOWER_OFFLOAD_COMMON_EXTACK */ return -EINVAL; } @@ -2177,17 +2533,15 @@ ice_add_cls_flower(struct net_device __always_unused *netdev, fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie); if (fltr) { #ifdef HAVE_TC_FLOWER_OFFLOAD_COMMON_EXTACK - NL_SET_ERR_MSG_MOD(extack, - "filter cookie already exists, ignoring"); + NL_SET_ERR_MSG_MOD(extack, "filter cookie already exists, ignoring"); #else - netdev_warn(vsi_netdev, - "filter cookie %lx already exists, ignoring\n", + netdev_warn(vsi_netdev, "filter cookie %lx already exists, ignoring\n", fltr->cookie); #endif /* HAVE_TC_FLOWER_OFFLOAD_COMMON_EXTACK */ return -EEXIST; } - /* prep and add tc-flower filter in HW */ + /* prep and add TC-flower filter in HW */ err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr); if (err) return err; @@ -2234,8 +2588,7 @@ ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower) return 0; #ifdef HAVE_TC_FLOWER_OFFLOAD_COMMON_EXTACK - NL_SET_ERR_MSG_MOD(cls_flower->common.extack, - "failed to delete TC flower filter because unable to find it"); + NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "failed to delete TC flower filter because unable to find it"); #else dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter because unable to find it\n"); @@ -2261,7 +2614,7 @@ ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower) } /** - * ice_replay_tc_fltrs - replay tc filters + * ice_replay_tc_fltrs - replay TC filters * @pf: pointer to PF struct */ void ice_replay_tc_fltrs(struct ice_pf *pf) diff --git a/drivers/thirdparty/ice/ice_tc_lib.h b/drivers/thirdparty/ice/ice_tc_lib.h index 6a6f4566e1fb..91feab8af868 100644 --- a/drivers/thirdparty/ice/ice_tc_lib.h +++ b/drivers/thirdparty/ice/ice_tc_lib.h @@ -23,6 +23,20 @@ #define ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT BIT(15) #define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16) #define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17) +#ifdef HAVE_GTP_SUPPORT +#define ICE_TC_FLWR_FIELD_ENC_OPTS BIT(18) +#endif /* HAVE_GTP_SUPPORT */ +#ifdef HAVE_FLOW_DISSECTOR_KEY_IP +#define ICE_TC_FLWR_FIELD_IP_TOS BIT(19) +#define ICE_TC_FLWR_FIELD_IP_TTL BIT(20) +#endif /* HAVE_FLOW_DISSECTOR_KEY_IP */ +#ifdef HAVE_FLOW_DISSECTOR_KEY_ENC_IP +#define ICE_TC_FLWR_FIELD_ENC_IP_TOS BIT(21) +#define ICE_TC_FLWR_FIELD_ENC_IP_TTL BIT(22) +#endif /* HAVE_FLOW_DISSECTOR_KEY_ENC_IP */ +#define ICE_TC_FLWR_FIELD_PPPOE_SESSID BIT(23) +#define ICE_TC_FLWR_FIELD_PPP_PROTO BIT(24) +#define ICE_TC_FLWR_FIELD_CVLAN BIT(25) /* TC flower supported filter match */ #define ICE_TC_FLWR_FLTR_FLAGS_DST_MAC ICE_TC_FLWR_FIELD_DST_MAC @@ -42,6 +56,11 @@ #define ICE_TC_FLOWER_MASK_16 0xFFFF #define ICE_TC_FLOWER_VNI_MAX 0xFFFFFFU +#if defined(HAVE_FLOW_DISSECTOR_KEY_IP) || defined(HAVE_FLOW_DISSECTOR_KEY_ENC_IP) +#define ICE_IPV6_HDR_TC_OFFSET 20 +#define ICE_IPV6_HDR_TC_MASK GENMASK(27, 20) +#endif /* HAVE_FLOW_DISSECTOR_KEY_IP || HAVE_FLOW_DISSECTOR_KEY_ENC_IP */ + #ifdef HAVE_TC_INDIR_BLOCK struct ice_indr_block_priv { struct net_device *netdev; @@ -51,7 +70,20 @@ struct ice_indr_block_priv { #endif /* HAVE_TC_INDIR_BLOCK */ struct ice_tc_flower_action { - u32 tc_class; + /* forward action specific params */ + union { + struct { + u32 tc_class; /* forward to hw_tc */ + u32 rsvd; + } tc; + struct { + u32 queue; /* forward to queue */ + /* to add filter in HW, it needs absolute queue number + * in global space of queues (between 0...N) + */ + u32 hw_queue; + } q; + } fwd; enum ice_sw_fwd_act_type fltr_act; }; @@ -62,6 +94,11 @@ struct ice_tc_vlan_hdr { #endif }; +struct ice_tc_pppoe_hdr { + __be16 session_id; + __be16 ppp_proto; +}; + struct ice_tc_l2_hdr { u8 dst_mac[ETH_ALEN]; u8 src_mac[ETH_ALEN]; @@ -101,6 +138,8 @@ struct ice_tc_flower_lyr_2_4_hdrs { struct ice_tc_l2_hdr l2_key; struct ice_tc_l2_hdr l2_mask; struct ice_tc_vlan_hdr vlan_hdr; + struct ice_tc_vlan_hdr cvlan_hdr; + struct ice_tc_pppoe_hdr pppoe_hdr; /* L3 (IPv4[6]) layer fields with their mask */ struct ice_tc_l3_hdr l3_key; struct ice_tc_l3_hdr l3_mask; @@ -126,11 +165,11 @@ struct ice_tc_flower_fltr { */ u16 rid; u16 rule_id; - /* this could be queue/vsi_idx (sw handle)/queue_group, depending upon - * destination type + /* VSI handle of the destination VSI (it could be main PF VSI, CHNL_VSI, + * VF VSI) */ - u16 dest_id; - /* if dest_id is vsi_idx, then need to store destination VSI ptr */ + u16 dest_vsi_handle; + /* ptr to destination VSI */ struct ice_vsi *dest_vsi; /* direction of fltr for eswitch use case */ enum ice_eswitch_fltr_direction direction; @@ -140,6 +179,10 @@ struct ice_tc_flower_fltr { struct ice_tc_flower_lyr_2_4_hdrs inner_headers; struct ice_vsi *src_vsi; __be32 tenant_id; +#ifdef HAVE_GTP_SUPPORT + struct gtp_pdu_session_info gtp_pdu_info_keys; + struct gtp_pdu_session_info gtp_pdu_info_masks; +#endif /* HAVE_GTP_SUPPORT */ u32 flags; #define ICE_TC_FLWR_TNL_TYPE_NONE 0xff u8 tunnel_type; @@ -156,12 +199,23 @@ struct ice_tc_flower_fltr { * @f: Pointer to tc-flower filter * * Criteria to determine of given filter is valid channel filter - * or not is based on its "destination". If destination is hw_tc (aka tc_class) - * and it is non-zero, then it is valid channel (aka ADQ) filter + * or not is based on its "destination". + * For forward to VSI action, if destination is valid hw_tc (aka tc_class) + * and in supported range of TCs for ADQ, then return true. + * For forward to queue, as long as dest_vsi is valid and it is of type + * VSI_CHNL (PF ADQ VSI is of type VSI_CHNL), return true. + * NOTE: For forward to queue, correct dest_vsi is still set in tc_fltr based + * on destination queue specified. */ static inline bool ice_is_chnl_fltr(struct ice_tc_flower_fltr *f) { - return !!f->action.tc_class; + if (f->action.fltr_act == ICE_FWD_TO_VSI) + return (f->action.fwd.tc.tc_class >= ICE_CHNL_START_TC && + f->action.fwd.tc.tc_class < ICE_CHNL_MAX_TC); + else if (f->action.fltr_act == ICE_FWD_TO_Q) + return (f->dest_vsi && f->dest_vsi->type == ICE_VSI_CHNL); + + return false; } /** @@ -175,11 +229,11 @@ static inline int ice_chnl_dmac_fltr_cnt(struct ice_pf *pf) int ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr); -#if defined(HAVE_TC_FLOWER_ENC) && defined(HAVE_TC_INDIR_BLOCK) +#if defined(HAVE_TCF_MIRRED_DEV) || defined(HAVE_TC_FLOW_RULE_INFRASTRUCTURE) int ice_tc_tun_get_type(struct net_device *tunnel_dev, struct flow_rule *rule); -#endif /* HAVE_TC_FLOWER_ENC && HAVE_TC_INDIR_BLOCK */ +#endif /* HAVE_TCF_MIRRED_DEC || HAVE_TC_FLOW_RULE_INFRASTRUCTURE */ int #ifdef HAVE_TC_INDIR_BLOCK ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, diff --git a/drivers/thirdparty/ice/ice_tmatch.h b/drivers/thirdparty/ice/ice_tmatch.h new file mode 100644 index 000000000000..f46642d17ed6 --- /dev/null +++ b/drivers/thirdparty/ice/ice_tmatch.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_TMATCH_H_ +#define _ICE_TMATCH_H_ + +static inline +bool ice_ternary_match_byte(u8 key, u8 key_inv, u8 pat) +{ + u8 k1, k2, v; + int i; + + for (i = 0; i < 8; i++) { + k1 = (u8)(key & (1 << i)); + k2 = (u8)(key_inv & (1 << i)); + v = (u8)(pat & (1 << i)); + + if (k1 != 0 && k2 != 0) + continue; + if (k1 == 0 && k2 == 0) + return false; + + if (k1 == v) + return false; + } + + return true; +} + +static inline +bool ice_ternary_match(const u8 *key, const u8 *key_inv, + const u8 *pat, int len) +{ + int i; + + for (i = 0; i < len; i++) + if (!ice_ternary_match_byte(key[i], key_inv[i], pat[i])) + return false; + + return true; +} + +#endif /* _ICE_TMATCH_H_ */ diff --git a/drivers/thirdparty/ice/ice_trace.h b/drivers/thirdparty/ice/ice_trace.h index 3e50808fe490..814a65900cee 100644 --- a/drivers/thirdparty/ice/ice_trace.h +++ b/drivers/thirdparty/ice/ice_trace.h @@ -172,7 +172,7 @@ DECLARE_EVENT_CLASS(ice_tx_template, __entry->buf = buf; __assign_str(devname, ring->netdev->name);), - TP_printk("netdev: %s ring: %pK desc: %pK buf %pK", __get_str(devname), + TP_printk("netdev: %s ring: %p desc: %p buf %p", __get_str(devname), __entry->ring, __entry->desc, __entry->buf) ); @@ -200,7 +200,7 @@ DECLARE_EVENT_CLASS(ice_rx_template, __entry->desc = desc; __assign_str(devname, ring->netdev->name);), - TP_printk("netdev: %s ring: %pK desc: %pK", __get_str(devname), + TP_printk("netdev: %s ring: %p desc: %p", __get_str(devname), __entry->ring, __entry->desc) ); DEFINE_EVENT(ice_rx_template, ice_clean_rx_irq, @@ -224,7 +224,7 @@ DECLARE_EVENT_CLASS(ice_rx_indicate_template, __entry->skb = skb; __assign_str(devname, ring->netdev->name);), - TP_printk("netdev: %s ring: %pK desc: %pK skb %pK", __get_str(devname), + TP_printk("netdev: %s ring: %p desc: %p skb %p", __get_str(devname), __entry->ring, __entry->desc, __entry->skb) ); @@ -247,7 +247,7 @@ DECLARE_EVENT_CLASS(ice_xmit_template, __entry->skb = skb; __assign_str(devname, ring->netdev->name);), - TP_printk("netdev: %s skb: %pK ring: %pK", __get_str(devname), + TP_printk("netdev: %s skb: %p ring: %p", __get_str(devname), __entry->skb, __entry->ring) ); @@ -259,6 +259,30 @@ DEFINE_EVENT(ice_xmit_template, name, \ DEFINE_XMIT_TEMPLATE_OP_EVENT(ice_xmit_frame_ring); DEFINE_XMIT_TEMPLATE_OP_EVENT(ice_xmit_frame_ring_drop); +DECLARE_EVENT_CLASS(ice_tx_tstamp_template, + TP_PROTO(struct sk_buff *skb, int idx), + + TP_ARGS(skb, idx), + + TP_STRUCT__entry(__field(void *, skb) + __field(int, idx)), + + TP_fast_assign(__entry->skb = skb; + __entry->idx = idx;), + + TP_printk("skb %pK idx %d", + __entry->skb, __entry->idx) +); +#define DEFINE_TX_TSTAMP_OP_EVENT(name) \ +DEFINE_EVENT(ice_tx_tstamp_template, name, \ + TP_PROTO(struct sk_buff *skb, int idx), \ + TP_ARGS(skb, idx)) + +DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_request); +DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_complete); +DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_fw_req); +DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_fw_done); + /* End tracepoints */ #endif /* _ICE_TRACE_H_ */ diff --git a/drivers/thirdparty/ice/ice_txrx.c b/drivers/thirdparty/ice/ice_txrx.c index c3c2746fe977..6aa6b7a25780 100644 --- a/drivers/thirdparty/ice/ice_txrx.c +++ b/drivers/thirdparty/ice/ice_txrx.c @@ -3,12 +3,14 @@ /* The driver transmit and receive code */ -#include #include +#include +#include #include "ice_txrx_lib.h" #include "ice_lib.h" #include "ice.h" #include "ice_dcb_lib.h" +#include #ifdef HAVE_XDP_SUPPORT #ifdef HAVE_AF_XDP_ZC_SUPPORT #include "ice_xsk.h" @@ -23,8 +25,9 @@ #include "ice_eswitch.h" #include +#ifndef CONFIG_ICE_USE_SKB #define ICE_RX_HDR_SIZE 256 - +#endif #define FDIR_DESC_RXDID 0x40 #define ICE_FDIR_CLEAN_DELAY 10 @@ -119,8 +122,6 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, static void ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf) { - struct ice_vsi *vsi = ring->vsi; - if (tx_buf->skb) { if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) devm_kfree(ring->dev, tx_buf->raw_buf); @@ -135,11 +136,6 @@ ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf) dma_unmap_addr(tx_buf, dma), dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); - if (unlikely(tx_buf->tx_flags & ICE_TX_FLAGS_TSYN)) { - dev_kfree_skb_any(vsi->ptp_tx_skb[tx_buf->ptp_ts_idx]); - vsi->ptp_tx_skb[tx_buf->ptp_ts_idx] = NULL; - tx_buf->ptp_ts_idx = -1; - } } else if (dma_unmap_len(tx_buf, len)) { dma_unmap_page(ring->dev, dma_unmap_addr(tx_buf, dma), @@ -178,9 +174,9 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring) return; /* Free all the Tx ring sk_buffs */ - for (i = 0; i < tx_ring->count; i++) + for (i = 0; i < tx_ring->count; i++) { ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); - + } #ifdef HAVE_AF_XDP_ZC_SUPPORT tx_skip_free: #endif /* HAVE_AF_XDP_ZC_SUPPORT */ @@ -216,6 +212,7 @@ void ice_free_tx_ring(struct ice_ring *tx_ring) tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL; } + } /** @@ -234,6 +231,14 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget) struct ice_tx_desc *tx_desc; struct ice_tx_buf *tx_buf; + /* get the bql data ready */ +#ifdef HAVE_XDP_SUPPORT + if (!ice_ring_is_xdp(tx_ring)) + netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring)); +#else + netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring)); +#endif /* HAVE_XDP_SUPPORT */ + tx_buf = &tx_ring->tx_buf[i]; tx_desc = ICE_TX_DESC(tx_ring, i); i -= tx_ring->count; @@ -247,6 +252,9 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget) if (!eop_desc) break; + /* follow the guidelines of other drivers */ + prefetchw(&tx_buf->skb->users); + smp_rmb(); /* prevent any other reads prior to eop_desc */ ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); @@ -332,8 +340,7 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget) return !!budget; #endif /* HAVE_XDP_SUPPORT */ - netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, - total_bytes); + netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes); #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && @@ -342,11 +349,9 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget) * sees the new next_to_clean. */ smp_mb(); - if (__netif_subqueue_stopped(tx_ring->netdev, - tx_ring->q_index) && + if (netif_tx_queue_stopped(txring_txq(tx_ring)) && !test_bit(ICE_VSI_DOWN, vsi->state)) { - netif_wake_subqueue(tx_ring->netdev, - tx_ring->q_index); + netif_tx_wake_queue(txring_txq(tx_ring)); ++tx_ring->tx_stats.restart_q; } } @@ -370,7 +375,7 @@ int ice_setup_tx_ring(struct ice_ring *tx_ring) /* warn if we are about to overwrite the pointer */ WARN_ON(tx_ring->tx_buf); tx_ring->tx_buf = - devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count, + devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count, GFP_KERNEL); if (!tx_ring->tx_buf) return -ENOMEM; @@ -421,6 +426,17 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring) for (i = 0; i < rx_ring->count; i++) { struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; +#ifdef CONFIG_ICE_USE_SKB + if (!rx_buf->skb) + continue; + + if (rx_buf->dma) + dma_unmap_single(dev, rx_buf->dma, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); + dev_kfree_skb(rx_buf->skb); + rx_buf->dma = 0; + rx_buf->skb = NULL; +#else /* CONFIG_ICE_USE_SKB */ if (rx_buf->skb) { dev_kfree_skb(rx_buf->skb); rx_buf->skb = NULL; @@ -448,6 +464,7 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring) rx_buf->page = NULL; rx_buf->page_offset = 0; +#endif /* CONFIG_ICE_USE_SKB */ } #ifdef HAVE_AF_XDP_ZC_SUPPORT @@ -506,7 +523,7 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring) /* warn if we are about to overwrite the pointer */ WARN_ON(rx_ring->rx_buf); rx_ring->rx_buf = - devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count, + devm_kcalloc(dev, sizeof(*rx_ring->rx_buf), rx_ring->count, GFP_KERNEL); if (!rx_ring->rx_buf) return -ENOMEM; @@ -545,6 +562,49 @@ err: return -ENOMEM; } +#ifdef CONFIG_ICE_USE_SKB +static bool +ice_alloc_mapped_skb(struct ice_ring *rx_ring, struct ice_rx_buf *bi) +{ + struct sk_buff *skb = bi->skb; + dma_addr_t dma; + + if (unlikely(skb)) + return true; + + /* must not call __napi_alloc_skb with preemption enabled */ + preempt_disable(); + + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, + rx_ring->rx_buf_len, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_buf_failed++; + preempt_enable(); + return false; + } + + dma = dma_map_single(rx_ring->dev, skb->data, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + dev_kfree_skb_any(skb); + rx_ring->rx_stats.alloc_buf_failed++; + preempt_enable(); + return false; + } + + bi->skb = skb; + bi->dma = dma; + + preempt_enable(); + + return true; +} +#else /* CONFIG_ICE_USE_SKB */ /** * ice_rx_offset - Return expected offset into page to access data * @rx_ring: Ring we are requesting offset of @@ -640,14 +700,16 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp, #endif break; default: - bpf_warn_invalid_xdp_action(act); - /* fallthrough -- not supported action */ + bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); + fallthrough; /* not supported action */ + case XDP_ABORTED: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); #ifdef ICE_ADD_PROBES rx_ring->xdp_stats.xdp_unknown++; #endif - /* fallthrough -- handle aborts by dropping frame */ + fallthrough; /* handle aborts by dropping frame */ + case XDP_DROP: result = ICE_XDP_CONSUMED; #ifdef ICE_ADD_PROBES @@ -807,6 +869,7 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) return true; } +#endif /* CONFIG_ICE_USE_SKB */ /** * ice_alloc_rx_bufs - Replace used receive buffers * @rx_ring: ring to place buffers on @@ -836,6 +899,12 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) bi = &rx_ring->rx_buf[ntu]; do { +#ifdef CONFIG_ICE_USE_SKB + if (!ice_alloc_mapped_skb(rx_ring, bi)) + break; + + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); +#else /* CONFIG_ICE_USE_SKB */ /* if we fail here, we have work remaining */ if (!ice_alloc_mapped_page(rx_ring, bi)) break; @@ -850,6 +919,7 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) * because each write-back erases this info. */ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); +#endif /* CONFIG_ICE_USE_SKB */ rx_desc++; bi++; ntu++; @@ -871,6 +941,82 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) return !!cleaned_count; } +/** + * ice_inc_ntc: Advance the next_to_clean index + * @rx_ring: Rx ring + **/ +static void ice_inc_ntc(struct ice_ring *rx_ring) +{ + u16 ntc = rx_ring->next_to_clean + 1; + + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + prefetch(ICE_RX_DESC(rx_ring, ntc)); +} + +static struct ice_rx_buf *ice_rx_buf(struct ice_ring *rx_ring, u32 idx) +{ + return &rx_ring->rx_buf[idx]; +} + +#ifdef CONFIG_ICE_USE_SKB +/** + * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use + * @rx_ring: rx descriptor ring to transact packets on + * @skb: skb to be used + * @size: size of buffer to add to skb + * @rx_buf_pgcnt: rx_buf page refcount - unused + * + * This function will pull an Rx buffer from the ring and synchronize it + * for use by the CPU. + * + * ONE-BUFF version + */ +static struct ice_rx_buf * +ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, + const unsigned int size, __always_unused int *rx_buf_pgcnt) +{ + struct ice_rx_buf *rx_buf; + + rx_buf = ice_rx_buf(rx_ring, rx_ring->next_to_clean); + *skb = rx_buf->skb; + + /* we are reusing so sync this buffer for CPU use */ + dma_unmap_single(rx_ring->dev, rx_buf->dma, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); + + prefetch(rx_buf->skb->data); + + return rx_buf; +} + +/** + * ice_put_rx_buf - Clean up used buffer and either recycle or free + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buf: rx buffer to pull data from + * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect() - unused + * + * This function will clean up the contents of the rx_buf. It will + * either recycle the buffer or unmap it and free the associated resources. + * + * ONE-BUFF version + */ +static void +ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, + __always_unused int rx_buf_pgcnt) +{ + ice_inc_ntc(rx_ring); + + if (!rx_buf) + return; + + /* TODO add skb recycling here? */ + + /* clear contents of buffer_info */ + rx_buf->skb = NULL; +} + +#else /* CONFIG_ICE_USE_SKB */ /** * ice_page_is_reserved - check if reuse is possible * @page: page struct to check @@ -905,13 +1051,15 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) /** * ice_can_reuse_rx_page - Determine if page can be reused for another Rx * @rx_buf: buffer containing the page + * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call * * If page is reusable, we have a green light for calling ice_reuse_rx_page, * which will assign the current buffer to the buffer that next_to_alloc is * pointing to; otherwise, the DMA mapping needs to be destroyed and * page freed */ -static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) +static bool +ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt) { unsigned int pagecnt_bias = rx_buf->pagecnt_bias; struct page *page = rx_buf->page; @@ -922,7 +1070,7 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ - if (unlikely((page_count(page) - pagecnt_bias) > 1)) + if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) return false; #else #define ICE_LAST_OFFSET \ @@ -1014,17 +1162,24 @@ ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) * @rx_ring: Rx descriptor ring to transact packets on * @skb: skb to be used * @size: size of buffer to add to skb + * @rx_buf_pgcnt: rx_buf page refcount * * This function will pull an Rx buffer from the ring and synchronize it * for use by the CPU. */ static struct ice_rx_buf * ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, - const unsigned int size) + const unsigned int size, int *rx_buf_pgcnt) { struct ice_rx_buf *rx_buf; - rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; + rx_buf = ice_rx_buf(rx_ring, rx_ring->next_to_clean); + *rx_buf_pgcnt = +#if (PAGE_SIZE < 8192) + page_count(rx_buf->page); +#else + 0; +#endif prefetchw(rx_buf->page); *skb = rx_buf->skb; @@ -1164,23 +1319,22 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, * ice_put_rx_buf - Clean up used buffer and either recycle or free * @rx_ring: Rx descriptor ring to transact packets on * @rx_buf: Rx buffer to pull data from + * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect() * * This function will update next_to_clean and then clean up the contents * of the rx_buf. It will either recycle the buffer or unmap it and free * the associated resources. */ -static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) +static void +ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, + int rx_buf_pgcnt) { - u16 ntc = rx_ring->next_to_clean + 1; - - /* fetch, update, and store next to clean */ - ntc = (ntc < rx_ring->count) ? ntc : 0; - rx_ring->next_to_clean = ntc; + ice_inc_ntc(rx_ring); if (!rx_buf) return; - if (ice_can_reuse_rx_page(rx_buf)) { + if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) { /* hand second half of page back to the ring */ ice_reuse_rx_page(rx_ring, rx_buf); #ifdef ICE_ADD_PROBES @@ -1204,6 +1358,8 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) rx_buf->skb = NULL; } +#endif /* CONFIG_ICE_USE_SKB */ + /** * ice_is_non_eop - process handling of non-EOP buffers * @rx_ring: Rx ring being processed @@ -1396,20 +1552,10 @@ ice_rx_queue_override(struct sk_buff *skb, struct ice_ring *rx_ring, if (flags & ICE_RX_FLEXI_FLAGS_ACK) return; - - /* proceed only when filter type for channel is of type dest - * port or src+dest port or tunnel - */ - if (!(ch->fltr_type == ICE_CHNL_FLTR_TYPE_DEST_PORT || - ch->fltr_type == ICE_CHNL_FLTR_TYPE_SRC_DEST_PORT || - ch->fltr_type == ICE_CHNL_FLTR_TYPE_TENANT_ID)) - return; - /* make sure channel VSI is FD capable and enabled for * inline flow-director usage */ - if (!ice_vsi_fd_ena(ch->ch_vsi) || - !ice_vsi_inline_fd_ena(ch->ch_vsi)) + if (!ice_vsi_fd_ena(ch->ch_vsi) || !ch->inline_fd) return; /* Detection logic to check if HW table is about to get full, @@ -1472,12 +1618,15 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) unsigned int total_rx_bytes = 0, total_rx_pkts = 0; u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); #ifdef HAVE_XDP_SUPPORT - unsigned int xdp_res, xdp_xmit = 0; struct bpf_prog *xdp_prog = NULL; + unsigned int xdp_xmit = 0; #endif /* HAVE_XDP_SUPPORT */ +#ifndef CONFIG_ICE_USE_SKB struct xdp_buff xdp; +#endif bool failure; +#ifndef CONFIG_ICE_USE_SKB #ifdef HAVE_XDP_SUPPORT #ifdef HAVE_XDP_BUFF_RXQ xdp.rxq = &rx_ring->xdp_rxq; @@ -1489,14 +1638,21 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) xdp.frame_sz = ice_rx_frame_truesize(rx_ring, 0); #endif #endif /* HAVE_XDP_BUFF_FRAME_SZ */ +#endif /* !CONFIG_ICE_USE_SKB */ /* start the loop to process Rx packets bounded by 'budget' */ while (likely(total_rx_pkts < (unsigned int)budget)) { union ice_32b_rx_flex_desc *rx_desc; struct ice_rx_buf *rx_buf; struct sk_buff *skb; +#ifndef CONFIG_ICE_USE_SKB +#ifdef HAVE_XDP_SUPPORT + unsigned int xdp_res; +#endif /* HAVE_XDP_SUPPORT */ +#endif /* CONFIG_ICE_USE_SKB */ unsigned int size; u16 stat_err_bits; + int rx_buf_pgcnt; u16 vlan_tag = 0; u16 rx_ptype; @@ -1523,9 +1679,9 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) struct ice_vsi *ctrl_vsi = rx_ring->vsi; if (rx_desc->wb.rxdid == FDIR_DESC_RXDID && - ctrl_vsi->vf_id != ICE_INVAL_VFID) + ctrl_vsi->vf) ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc); - ice_put_rx_buf(rx_ring, NULL); + ice_put_rx_buf(rx_ring, NULL, 0); cleaned_count++; continue; } @@ -1534,8 +1690,9 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) ICE_RX_FLX_DESC_PKT_LEN_M; /* retrieve a buffer from the ring */ - rx_buf = ice_get_rx_buf(rx_ring, &skb, size); + rx_buf = ice_get_rx_buf(rx_ring, &skb, size, &rx_buf_pgcnt); +#ifndef CONFIG_ICE_USE_SKB if (!size) { xdp.data = NULL; xdp.data_end = NULL; @@ -1595,10 +1752,14 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) total_rx_pkts++; cleaned_count++; - ice_put_rx_buf(rx_ring, rx_buf); + ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); continue; #endif /* HAVE_XDP_SUPPORT */ construct_skb: +#endif /* !CONFIG_ICE_USE_SKB */ +#ifdef CONFIG_ICE_USE_SKB + __skb_put(skb, size); +#else /* CONFIG_ICE_USE_SKB */ if (skb) { ice_add_rx_frag(rx_ring, rx_buf, skb, size); } else if (likely(xdp.data)) { @@ -1614,8 +1775,9 @@ construct_skb: rx_buf->pagecnt_bias++; break; } +#endif /* CONFIG_ICE_USE_SKB */ - ice_put_rx_buf(rx_ring, rx_buf); + ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); cleaned_count++; /* skip if it is NOP desc */ @@ -1698,7 +1860,7 @@ static void __ice_update_sample(struct ice_q_vector *q_vector, /* if dim settings get stale, like when not updated for 1 * second or longer, force it to start again. This addresses the - * freqent case of an idle queue being switched to by the + * frequent case of an idle queue being switched to by the * scheduler. The 1,000 here means 1,000 milliseconds. */ if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000) @@ -1769,29 +1931,72 @@ static void ice_enable_interrupt(struct ice_q_vector *q_vector) bool wb_en = q_vector->wb_on_itr; u32 itr_val; - if (test_bit(ICE_DOWN, vsi->state)) + if (test_bit(ICE_VSI_DOWN, vsi->state)) return; - /* When exiting WB_ON_ITR, let ITR resume its normal - * interrupts-enabled path. + /* trigger an ITR delayed software interrupt when exiting busy poll, to + * make sure to catch any pending cleanups that might have been missed + * due to interrupt state transition. If busy poll or poll isn't + * enabled, then don't update ITR, and just enable the interrupt. */ - if (wb_en) + if (!wb_en) { + itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); + } else { q_vector->wb_on_itr = false; - itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); - /* trigger an immediate software interrupt when exiting - * busy poll, to make sure to catch any pending cleanups - * that might have been missed due to interrupt state - * transition. - */ - if (wb_en) { + /* do two things here with a single write. Set up the third ITR + * index to be used for software interrupt moderation, and then + * trigger a software interrupt with a rate limit of 20K on + * software interrupts, this will help avoid high interrupt + * loads due to frequently polling and exiting polling. + */ + itr_val = ice_buildreg_itr(ICE_IDX_ITR2, ICE_ITR_20K); itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M | - GLINT_DYN_CTL_SW_ITR_INDX_M | + ICE_IDX_ITR2 << GLINT_DYN_CTL_SW_ITR_INDX_S | GLINT_DYN_CTL_SW_ITR_INDX_ENA_M; } wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); } +/** + * ice_force_wb - trigger force write-back by setting WB_ON_ITR bit + * @hw: ptr to HW + * @q_vector: pointer to q_vector + * + * This function is used to force write-backs by setting WB_ON_ITR bit + * in DYN_CTLN register. WB_ON_ITR and INTENA are mutually exclusive bits. + * Setting WB_ON_ITR bits means Tx and Rx descriptors are written back based + * on ITR expiration irrespective of INTENA setting + */ +static void ice_force_wb(struct ice_hw *hw, struct ice_q_vector *q_vector) +{ + if (q_vector->num_ring_rx || q_vector->num_ring_tx) { +#ifdef ADQ_PERF_COUNTERS + q_vector->ch_stats.num_wb_on_itr_set++; +#endif /* ADQ_PERF_COUNTERS */ + wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), + ICE_GLINT_DYN_CTL_WB_ON_ITR(0, ICE_RX_ITR)); + } + + /* needed to avoid triggering WB_ON_ITR again which typically + * happens from ice_set_wb_on_itr function + */ + q_vector->wb_on_itr = true; +} + +/** + * ice_vector_intr_busypoll + * @qv: pointer to q_vector + * + * Returns: true if vector is transitioning from INTERRUPT + * to BUSY_POLL based on current and previous state of vector + */ +static bool ice_vector_intr_busypoll(struct ice_q_vector *qv) +{ + return !(qv->state_flags & ICE_CHNL_PREV_IN_BP) && + (qv->state_flags & ICE_CHNL_IN_BP); +} + /** * ice_refresh_bp_state - refresh state machine * @napi: ptr to NAPI struct @@ -1852,7 +2057,8 @@ state_update: #ifdef HAVE_NAPI_STATE_IN_BUSY_POLL /* update current state of vector */ - if (test_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state)) + if (test_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state) || + ice_vector_ind_poller(q_vector)) q_vector->state_flags |= ICE_CHNL_IN_BP; else q_vector->state_flags &= ~ICE_CHNL_IN_BP; @@ -1862,7 +2068,7 @@ state_update: skip_state_update: if (q_vector->state_flags & ICE_CHNL_IN_BP) { q_vector->jiffy = jiffies; - /* triffer force_wb by setting WB_ON_ITR only when + /* trigger force_wb by setting WB_ON_ITR only when * - vector is transitioning from INTR->BUSY_POLL * - once_in_bp is false, this is to prevent from doing it * every time whenever vector state is changing from @@ -1910,7 +2116,6 @@ ice_handle_chnl_vector(struct ice_q_vector *q_vector, bool unlikely_cb_bp) struct ice_vsi *ch_vsi = q_vector->ch->ch_vsi; struct ice_vsi *vsi = q_vector->vsi; - /* caller of this function deteremines next occurrence/execution context * of napi_poll (means next time whether napi_poll will be invoked from * busy_poll or SOFT IRQ context). Please refer to the caller of this @@ -1960,10 +2165,27 @@ ice_handle_chnl_vector(struct ice_q_vector *q_vector, bool unlikely_cb_bp) stats->once_bp_false++; #endif /* ADQ_PERF_COUNTERS */ ice_enable_interrupt(q_vector); + } else if (ice_vector_ind_poller(q_vector) && + !q_vector->last_wd_jiffy) { + q_vector->state_flags &= ~ICE_CHNL_IN_BP; + q_vector->state_flags &= ~ICE_CHNL_ONCE_IN_BP; + ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector); } } #ifdef HAVE_NAPI_STATE_IN_BUSY_POLL +/** + * ice_vector_ever_in_busypoll - check entry to busy poll + * @qv: pointer to q_vector + * + * Returns: true if vector state is currently OR previously BUSY_POLL + */ +static bool ice_vector_ever_in_busypoll(struct ice_q_vector *qv) +{ + return (qv->state_flags & ICE_CHNL_PREV_IN_BP) || + (qv->state_flags & ICE_CHNL_IN_BP); +} + /** * ice_chnl_vector_bypass_clean_complete * @napi: ptr to napi @@ -2156,12 +2378,14 @@ int ice_napi_poll(struct napi_struct *napi, int budget) * budget and be more aggressive about cleaning up the Tx descriptors. */ ice_for_each_ring(ring, q_vector->tx) { + bool wd; + #ifdef HAVE_AF_XDP_ZC_SUPPORT - bool wd = ring->xsk_pool ? + wd = ring->xsk_pool ? ice_clean_tx_irq_zc(ring) : ice_clean_tx_irq(ring, budget); #else - bool wd = ice_clean_tx_irq(ring, budget); + wd = ice_clean_tx_irq(ring, budget); #endif /* HAVE_AF_XDP_ZC_SUPPORT */ #ifdef ICE_ADD_PROBES @@ -2249,12 +2473,14 @@ int ice_napi_poll(struct napi_struct *napi, int budget) } /* end for ice_for_each_ring */ #ifdef HAVE_NAPI_STATE_IN_BUSY_POLL - if (ch_enabled && + if (ch_enabled && !ice_vector_ind_poller(q_vector) && (!test_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state))) { if (ice_chnl_vector_bypass_clean_complete(napi, budget, work_done)) goto bypass; } + if (ice_vector_ind_poller(q_vector) && work_done) + q_vector->last_wd_jiffy = get_jiffies_64(); #endif /* HAVE_NAPI_STATE_IN_BUSY_POLL */ /* If work not completed, return budget and polling will return */ @@ -2267,6 +2493,12 @@ int ice_napi_poll(struct napi_struct *napi, int budget) return budget; } + if (ice_vector_ind_poller(q_vector)) { + if (time_is_after_jiffies64(q_vector->last_wd_jiffy + + q_vector->ch->poller_timeout + 1)) + return budget; + q_vector->last_wd_jiffy = 0; + } bypass: /* reset the counter if code flow reached here because this function * determined that it is not going to return budget and will @@ -2364,7 +2596,7 @@ bypass: */ static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) { - netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index); + netif_tx_stop_queue(txring_txq(tx_ring)); /* Memory barrier before checking head and tail */ smp_mb(); @@ -2372,8 +2604,8 @@ static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) if (likely(ICE_DESC_UNUSED(tx_ring) < size)) return -EBUSY; - /* A reprieve! - use start_subqueue because it doesn't call schedule */ - netif_start_subqueue(tx_ring->netdev, tx_ring->q_index); + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_tx_start_queue(txring_txq(tx_ring)); ++tx_ring->tx_stats.restart_q; return 0; } @@ -2415,6 +2647,7 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, struct sk_buff *skb; skb_frag_t *frag; dma_addr_t dma; + bool kick; td_tag = off->td_l2tag1; td_cmd = off->td_cmd; @@ -2424,7 +2657,6 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, data_len = skb->data_len; size = skb_headlen(skb); - tx_desc = ICE_TX_DESC(tx_ring, i); if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { @@ -2497,9 +2729,6 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, tx_buf = &tx_ring->tx_buf[i]; } - /* record bytecount for BQL */ - netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); - /* record SW timestamp if HW timestamp is not available */ skb_tx_timestamp(first->skb); @@ -2527,9 +2756,10 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, ice_maybe_stop_tx(tx_ring, DESC_NEEDED); /* notify HW of packet */ -#ifdef HAVE_SKB_XMIT_MORE - if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { -#endif /* HAVE_SKB_XMIT_MORE */ + kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount, + netdev_xmit_more()); + if (kick) { + /* notify HW of packet */ writel_relaxed(i, tx_ring->tail); #ifndef SPIN_UNLOCK_IMPLIES_MMIOWB @@ -2538,9 +2768,7 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, */ mmiowb(); #endif /* SPIN_UNLOCK_IMPLIES_MMIOWB */ -#ifdef HAVE_SKB_XMIT_MORE } -#endif /* HAVE_SKB_XMIT_MORE */ return; @@ -2559,7 +2787,6 @@ dma_error: tx_ring->next_to_use = i; } - /** * ice_tx_csum - Enable Tx checksum offloads * @first: pointer to the first descriptor @@ -2570,9 +2797,9 @@ dma_error: static int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) { -#ifdef ICE_ADD_PROBES +#if defined(ICE_ADD_PROBES) struct ice_ring *tx_ring = off->tx_ring; -#endif +#endif /* ICE_ADD_PROBES */ u32 l4_len = 0, l3_len = 0, l2_len = 0; struct sk_buff *skb = first->skb; union { @@ -3208,8 +3435,7 @@ static void ice_chnl_inline_fd(struct ice_ring *tx_ring, struct sk_buff *skb, /* make sure channel VSI is FD capable and enabled for * inline flow-director usage */ - if (!ice_vsi_fd_ena(ch->ch_vsi) || - !ice_vsi_inline_fd_ena(ch->ch_vsi)) + if (!ice_vsi_fd_ena(ch->ch_vsi) || !ch->inline_fd) return; /* snag network header to get L4 type and address */ @@ -3303,17 +3529,6 @@ static void ice_chnl_inline_fd(struct ice_ring *tx_ring, struct sk_buff *skb, vsi_num = ch->ch_vsi->vsi_num; if (th->syn && th->ack) { - /* server side connection establishment, hence SYN+ACK. - * proceed only when filter type for channel is of type dest - * port or src+dest port. This is to handle server (target) - * side use case where server side filter is either - * based on dest port or src+dest port - */ - if (!(ch->fltr_type == ICE_CHNL_FLTR_TYPE_DEST_PORT || - ch->fltr_type == ICE_CHNL_FLTR_TYPE_SRC_DEST_PORT || - ch->fltr_type == ICE_CHNL_FLTR_TYPE_TENANT_ID)) - return; - if (atomic_dec_if_positive(&qv->inline_fd_cnt) < 0) { /* bailout */ #ifdef ADQ_PERF_COUNTERS @@ -3327,19 +3542,6 @@ static void ice_chnl_inline_fd(struct ice_ring *tx_ring, struct sk_buff *skb, } else if (th->syn) { #ifdef ADQ_PERF_COUNTERS struct ice_ring *ch_tx_ring; -#endif /* ADQ_PERF_COUNTERS */ - /* client side doing active connect, hence SYN. - * proceed only when filter type for channel is of type src - * port or src+dest port. This is to handle client (initiator) - * side, where filter type would be either based on - * src port or src+dest port. - */ - if (!(ch->fltr_type == ICE_CHNL_FLTR_TYPE_SRC_PORT || - ch->fltr_type == ICE_CHNL_FLTR_TYPE_TENANT_ID || - ch->fltr_type == ICE_CHNL_FLTR_TYPE_SRC_DEST_PORT)) - return; - -#ifdef ADQ_PERF_COUNTERS ch_tx_ring = qv->vsi->tx_rings[q_index + ch->base_q]; if (ch_tx_ring) ch_tx_ring->ch_q_stats.tx.num_atr_setup++; @@ -3349,23 +3551,14 @@ static void ice_chnl_inline_fd(struct ice_ring *tx_ring, struct sk_buff *skb, tx_ring->ch_q_stats.tx.num_atr_evict++; #endif /* ADQ_PERF_COUNTERS */ } else { - /* This case is due to skb-mark, no need to check again, - * It is handled previously - */ - - /* filter type must be valid, SO_MARK based FD programming - * is agnostic to client/server type connection, hence - * not checking specific type of filter - */ - if (ch->fltr_type == ICE_CHNL_FLTR_TYPE_INVALID || - ch->fltr_type == ICE_CHNL_FLTR_TYPE_LAST) - return; #ifdef ADQ_PERF_COUNTERS + { struct ice_ring *ch_tx_ring; ch_tx_ring = qv->vsi->tx_rings[q_index + ch->base_q]; if (ch_tx_ring) ch_tx_ring->ch_q_stats.tx.num_mark_atr_setup++; + } #endif /* ADQ_PERF_COUNTERS */ } @@ -3378,7 +3571,6 @@ static void ice_chnl_inline_fd(struct ice_ring *tx_ring, struct sk_buff *skb, ice_set_dflt_val_fd_desc(&fd_ctx); - /* set report completion to NONE, means flow-director programming * status won't be informed to SW. */ @@ -3400,46 +3592,40 @@ static void ice_chnl_inline_fd(struct ice_ring *tx_ring, struct sk_buff *skb, } /** - * ice_tsyn - set up the tsyn context descriptor - * @tx_ring: ptr to the ring to send - * @skb: ptr to the skb we're sending + * ice_tstamp - set up context descriptor for hardware timestamp + * @tx_ring: pointer to the Tx ring to send buffer on + * @skb: pointer to the SKB we're sending * @first: Tx buffer - * @off: Quad Word 1 - * @ptp_idx: ptp index to be filled in - * - * Returns NETDEV_TX_BUSY if not index avail, else OK + * @off: Tx offload parameters */ -static netdev_tx_t -ice_tsyn(struct ice_ring *tx_ring, struct sk_buff *skb, - struct ice_tx_buf *first, - struct ice_tx_offload_params *off, int *ptp_idx) +static void +ice_tstamp(struct ice_ring *tx_ring, struct sk_buff *skb, + struct ice_tx_buf *first, struct ice_tx_offload_params *off) { - struct ice_vsi *vsi = tx_ring->vsi; - int idx; + s8 idx; - if (!vsi->ptp_tx) - return NETDEV_TX_BUSY; + /* only timestamp the outbound packet if the user has requested it */ + if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) + return; + + if (!tx_ring->ptp_tx) + return; /* Tx timestamps cannot be sampled when doing TSO */ if (first->tx_flags & ICE_TX_FLAGS_TSO) - return NETDEV_TX_BUSY; + return; - idx = ice_ptp_get_ts_idx(vsi); - if (idx >= 0) { - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - vsi->ptp_tx_skb[idx] = skb_get(skb); - *ptp_idx = idx; - } else { - vsi->tx_hwtstamp_skipped++; - return NETDEV_TX_BUSY; + /* Grab an open timestamp slot */ + idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb); + if (idx < 0) { + tx_ring->vsi->back->ptp.tx_hwtstamp_skipped++; + return; } off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) | ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S)); first->tx_flags |= ICE_TX_FLAGS_TSYN; - - return NETDEV_TX_OK; } /** @@ -3457,9 +3643,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) struct ice_tx_buf *first; struct ethhdr *eth; unsigned int count; - bool tsyn = true; int tso, csum; - int idx = -1; ice_trace(xmit_frame_ring, tx_ring, skb); @@ -3483,6 +3667,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) return NETDEV_TX_BUSY; } + /* prefetch for bql data which is infrequently used */ + netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring)); + offload.tx_ring = tx_ring; /* record the location of the first descriptor for this packet */ @@ -3523,17 +3710,8 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S); - /* only timestamp the outbound packet if the user has requested it */ -#ifdef SKB_SHARED_TX_IS_UNION - if (likely(!(skb_tx(skb)->hardware))) -#else - if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) -#endif /* SKB_SHARED_TX_IS_UNION */ - tsyn = false; + ice_tstamp(tx_ring, skb, first, &offload); - if (tsyn && - ice_tsyn(tx_ring, skb, first, &offload, &idx) == NETDEV_TX_BUSY) - goto out_ptp_drop; #if IS_ENABLED(CONFIG_NET_DEVLINK) if (ice_is_switchdev_running(vsi->back)) ice_eswitch_set_target_vsi(skb, &offload); @@ -3554,12 +3732,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) cdesc->qw1 = cpu_to_le64(offload.cd_qw1); } - if (ice_ring_ch_enabled(tx_ring)) ice_chnl_inline_fd(tx_ring, skb, first->tx_flags); - first->ptp_ts_idx = idx; - ice_tx_map(tx_ring, first, &offload); return NETDEV_TX_OK; @@ -3567,8 +3742,6 @@ out_drop: ice_trace(xmit_frame_ring_drop, tx_ring, skb); dev_kfree_skb_any(skb); return NETDEV_TX_OK; -out_ptp_drop: - return NETDEV_TX_BUSY; } /** @@ -3595,6 +3768,70 @@ netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) return ice_xmit_frame_ring(skb, tx_ring); } +/** + * ice_get_dscp_up - return the UP/TC value for a SKB + * @dcbcfg: DCB config that contains DSCP to UP/TC mapping + * @skb: SKB to query for info to determine UP/TC + * + * This function is to only be called when the PF is in L3 DSCP PFC mode + */ +static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb) +{ + u8 dscp = 0; + + if (skb->protocol == htons(ETH_P_IP)) + dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; + else if (skb->protocol == htons(ETH_P_IPV6)) + dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; + + return dcbcfg->dscp_map[dscp]; +} + +#ifndef HAVE_NDO_SELECT_QUEUE_SB_DEV +#if defined(HAVE_NDO_SELECT_QUEUE_ACCEL) || defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) +#ifndef HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED +u16 +ice_select_queue(struct net_device *netdev, struct sk_buff *skb, + void __always_unused *accel_priv, + select_queue_fallback_t fallback) +#else /* HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED */ +u16 +ice_select_queue(struct net_device *netdev, struct sk_buff *skb, + void __always_unused *accel_priv); +#endif /* HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED */ +#else /* HAVE_NDO_SELECT_QUEUE_ACCEL || HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK */ +u16 ice_select_queue(struct net_device *netdev, struct sk_buff *skb) +#endif /*HAVE_NDO_SELECT_QUEUE_ACCEL || HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK */ +#else /* HAVE_NDO_SELECT_QUEUE_SB_DEV */ +#ifdef HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED +u16 +ice_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev) +#else /* HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED */ +u16 +ice_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev, select_queue_fallback_t fallback) +#endif /* HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED */ +#endif /* HAVE_NDO_SELECT_QUEUE_SB_DEV */ +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_dcbx_cfg *dcbcfg; + + dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; + if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP) + skb->priority = ice_get_dscp_up(dcbcfg, skb); + +#if defined(HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED) + return netdev_pick_tx(netdev, skb, sb_dev); +#elif defined(HAVE_NDO_SELECT_QUEUE_SB_DEV) + return fallback(netdev, skb, sb_dev); +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) + return fallback(netdev, skb); +#else + return __netdev_pick_tx(netdev, skb); +#endif +} + /** * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue * @tx_ring: tx_ring to clean diff --git a/drivers/thirdparty/ice/ice_txrx.h b/drivers/thirdparty/ice/ice_txrx.h index 054982dbf1c3..a50051d734c4 100644 --- a/drivers/thirdparty/ice/ice_txrx.h +++ b/drivers/thirdparty/ice/ice_txrx.h @@ -154,7 +154,6 @@ struct ice_tx_buf { u32 tx_flags; DEFINE_DMA_UNMAP_LEN(len); DEFINE_DMA_UNMAP_ADDR(dma); - int ptp_ts_idx; }; struct ice_tx_offload_params { @@ -329,7 +328,6 @@ struct ice_xdp_stats { /* descriptor ring, associated with a VSI */ struct ice_ring { - /* CL1 - 1st cacheline starts here */ struct ice_ring *next; /* pointer to next ring in q_vector */ void *desc; /* Descriptor ring memory */ struct device *dev; /* Used for DMA mapping */ @@ -341,10 +339,19 @@ struct ice_ring { struct ice_tx_buf *tx_buf; struct ice_rx_buf *rx_buf; }; - /* CL2 - 2nd cacheline starts here */ + /* --- cacheline 1 boundary (64 bytes) --- */ u16 q_index; /* Queue number of ring */ u16 q_handle; /* Queue handle per TC */ +#ifdef HAVE_XDP_SUPPORT +#define ICE_TX_FLAGS_RING_XDP BIT(0) +#endif /* HAVE_XDP_SUPPORT */ +#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1) +#define ICE_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(2) +#define ICE_TX_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(3) +#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(6) + u8 flags; + u16 count; /* Number of descriptors */ u16 reg_idx; /* HW register index of the ring */ @@ -369,6 +376,7 @@ struct ice_ring { struct ice_rxq_stats rx_stats; }; + /* --- cacheline 2 boundary (128 bytes) was 8 bytes ago --- */ struct rcu_head rcu; /* to avoid race on free */ DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */ struct ice_channel *ch; @@ -382,36 +390,33 @@ struct ice_ring { struct xsk_buff_pool *xsk_pool; #else struct xdp_umem *xsk_pool; -#endif +#endif /* HAVE_NETDEV_BPF_XSK_POOL */ #ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL struct zero_copy_allocator zca; #endif #endif /* HAVE_AF_XDP_ZC_SUPPORT */ - /* CL3 - 3rd cacheline starts here */ #ifdef HAVE_XDP_BUFF_RXQ + /* --- cacheline 3 boundary (192 bytes) --- */ struct xdp_rxq_info xdp_rxq; #endif /* HAVE_XDP_BUFF_RXQ */ #endif /* HAVE_XDP_SUPPORT */ - /* CLX - the below items are only accessed infrequently and should be - * in their own cache line if possible - */ -#ifdef HAVE_XDP_SUPPORT -#define ICE_TX_FLAGS_RING_XDP BIT(0) -#endif /* HAVE_XDP_SUPPORT */ -#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1) -#define ICE_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(2) -#define ICE_TX_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(3) - u8 flags; + /* --- cacheline 4 boundary (256 bytes) --- */ dma_addr_t dma; /* physical address of ring */ unsigned int size; /* length of descriptor ring in bytes */ u32 txq_teid; /* Added Tx queue TEID */ - u16 rx_buf_len; - u8 rx_crc_strip_dis; - u8 dcb_tc; /* Traffic class of ring */ - u64 cached_systime; - u8 ptp_rx:1; u32 ch_inline_fd_cnt_index; + u8 dcb_tc; /* Traffic class of ring */ + u16 rx_buf_len; + u16 quanta_prof_id; + struct ice_ptp_tx *tx_tstamps; + u64 cached_phctime; + u8 ptp_rx:1; + u8 ptp_tx:1; + + /* cacheline - the below items are only accessed infrequently and + * should be in their own cache line if possible + */ #ifdef ADQ_PERF_COUNTERS struct ice_ch_q_stats ch_q_stats; #endif /* ADQ_PERF_COUNTERS */ @@ -452,9 +457,14 @@ struct ice_ring_container { /* this matches the maximum number of ITR bits, but in usec * values, so it is shifted left one bit (bit zero is ignored) */ - u16 itr_setting:13; - u16 itr_reserved:2; - u16 itr_mode:1; + union { + struct { + u16 itr_setting:13; + u16 itr_reserved:2; + u16 itr_mode:1; + }; + u16 itr_settings; + }; }; struct ice_coalesce_stored { @@ -480,16 +490,38 @@ static inline unsigned int ice_rx_pg_order(struct ice_ring *ring) #define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring)) - union ice_32b_rx_flex_desc; bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count); netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev); +#ifndef HAVE_NDO_SELECT_QUEUE_SB_DEV +#if defined(HAVE_NDO_SELECT_QUEUE_ACCEL) || defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) +#ifndef HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED +u16 ice_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback); +#else /* HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED */ +u16 ice_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv); +#endif /* HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED */ +#else /* HAVE_NDO_SELECT_QUEUE_ACCEL || HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK */ +u16 ice_select_queue(struct net_device *dev, struct sk_buff *skb); +#endif /* HAVE_NDO_SELECT_QUEUE_ACCEL || HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK */ +#else /* HAVE_NDO_SELECT_QUEUE_SB_DEV */ +#ifdef HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED +u16 ice_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev); +#else /* HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED */ +u16 ice_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback); +#endif /* HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED */ +#endif /* HAVE_NDO_SELECT_QUEUE_SB_DEV */ +int ice_setup_tstamp_ring(struct ice_ring *tstamp_ring); +void ice_free_tx_ring(struct ice_ring *tx_ring); void ice_clean_tx_ring(struct ice_ring *tx_ring); void ice_clean_rx_ring(struct ice_ring *rx_ring); int ice_setup_tx_ring(struct ice_ring *tx_ring); int ice_setup_rx_ring(struct ice_ring *rx_ring); -void ice_free_tx_ring(struct ice_ring *tx_ring); void ice_free_rx_ring(struct ice_ring *rx_ring); int ice_napi_poll(struct napi_struct *napi, int budget); int diff --git a/drivers/thirdparty/ice/ice_txrx_lib.c b/drivers/thirdparty/ice/ice_txrx_lib.c index 261a8635e8b6..64ab7994a18d 100644 --- a/drivers/thirdparty/ice/ice_txrx_lib.c +++ b/drivers/thirdparty/ice/ice_txrx_lib.c @@ -3,6 +3,7 @@ #include "ice_txrx_lib.h" #include "ice_eswitch.h" +#include /** * ice_release_rx_desc - Store the new tail and head values @@ -147,7 +148,6 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, decoded = ice_decode_rx_desc_ptype(ptype); - /* Start with CHECKSUM_NONE and by default csum_level = 0 */ skb->ip_summed = CHECKSUM_NONE; skb_checksum_none_assert(skb); @@ -156,7 +156,6 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, if (!(ring->netdev->features & NETIF_F_RXCSUM)) return; - /* check if HW has decoded the packet and checksum */ if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) return; @@ -208,6 +207,7 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, case ICE_RX_PTYPE_INNER_PROT_UDP: case ICE_RX_PTYPE_INNER_PROT_SCTP: skb->ip_summed = CHECKSUM_UNNECESSARY; + break; default: break; } @@ -236,22 +236,17 @@ ice_process_skb_fields(struct ice_ring *rx_ring, ice_rx_hash(rx_ring, rx_desc, skb, ptype); /* modifies the skb - consumes the enet header */ -#if IS_ENABLED(CONFIG_NET_DEVLINK) - skb->protocol = eth_type_trans(skb, ice_eswitch_get_target_netdev - (rx_ring, rx_desc)); -#else skb->protocol = eth_type_trans(skb, rx_ring->netdev); -#endif /* CONFIG_NET_DEVLINK */ ice_rx_csum(rx_ring, skb, rx_desc, ptype); if (rx_ring->ptp_rx) ice_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); -#ifdef HAVE_NETDEV_SB_DEV +#ifdef HAVE_NDO_DFWD_OPS if (!netif_is_ice(rx_ring->netdev)) macvlan_count_rx((const struct macvlan_dev *)netdev_priv(rx_ring->netdev), skb->len + ETH_HLEN, true, false); -#endif /* HAVE_NETDEV_SB_DEV */ +#endif /* HAVE_NDO_DFWD_OPS */ } /** @@ -284,6 +279,13 @@ ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag); #endif /* ICE_ADD_PROBES */ + if (ice_ring_ch_enabled(rx_ring) && rx_ring->ch->qps_per_poller > 1) { + struct napi_struct *napi; + + napi = &rx_ring->vsi->q_vectors[rx_ring->q_index]->napi; + skb_mark_napi_id(skb, napi); + } + napi_gro_receive(&rx_ring->q_vector->napi, skb); } diff --git a/drivers/thirdparty/ice/ice_txrx_lib.h b/drivers/thirdparty/ice/ice_txrx_lib.h index 09a2e80525cd..5edb85142de5 100644 --- a/drivers/thirdparty/ice/ice_txrx_lib.h +++ b/drivers/thirdparty/ice/ice_txrx_lib.h @@ -32,7 +32,7 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) } /** - * ice_get_vlan_from_rx_desc - get VLAN from Rx flex descriptor + * ice_get_vlan_tag_from_rx_desc - get VLAN from Rx flex descriptor * @rx_desc: Rx 32b flex descriptor with RXDID=2 * * The OS and current PF implementation only support stripping a single VLAN tag diff --git a/drivers/thirdparty/ice/ice_type.h b/drivers/thirdparty/ice/ice_type.h index eeb2de1a152e..889cf4b40c98 100644 --- a/drivers/thirdparty/ice/ice_type.h +++ b/drivers/thirdparty/ice/ice_type.h @@ -4,43 +4,27 @@ #ifndef _ICE_TYPE_H_ #define _ICE_TYPE_H_ - - - - - -#define ICE_BYTES_PER_WORD 2 -#define ICE_BYTES_PER_DWORD 4 -#define ICE_MAX_TRAFFIC_CLASS 8 -#define ICE_CHNL_MAX_TC 16 - - - - - -#include "ice_status.h" +#include "ice_defs.h" #include "ice_hw_autogen.h" #include "ice_devids.h" #include "ice_osdep.h" -#include "ice_controlq.h" #include "ice_lan_tx_rx.h" +#include "ice_ddp.h" +#include "ice_controlq.h" #include "ice_flex_type.h" #include "ice_protocol_type.h" #include "ice_sbq_cmd.h" #include "ice_vlan_mode.h" #include "ice_fwlog.h" - - static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc) { return test_bit(tc, &bitmap); } - static inline u64 round_up_64bit(u64 a, u32 b) { - return div64_long(((a) + (b) / 2), (b)); + return div64_u64(((a) + (b) / 2), (b)); } static inline u32 ice_round_to_num(u32 N, u32 R) @@ -58,6 +42,8 @@ static inline u32 ice_round_to_num(u32 N, u32 R) /* Data type manipulation macros. */ #define ICE_HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF)) #define ICE_LO_WORD(x) ((u16)((x) & 0xFFFF)) +#define ICE_HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF)) +#define ICE_LO_BYTE(x) ((u8)((x) & 0xFF)) /* debug masks - set these bits in hw->debug_mask to control output */ #define ICE_DBG_INIT BIT_ULL(1) @@ -88,6 +74,7 @@ static inline u32 ice_round_to_num(u32 N, u32 R) ICE_DBG_AQ_DESC | \ ICE_DBG_AQ_DESC_BUF | \ ICE_DBG_AQ_CMD) +#define ICE_DBG_PARSER BIT_ULL(28) #define ICE_DBG_USER BIT_ULL(31) #define ICE_DBG_ALL 0xFFFFFFFFFFFFFFFFULL @@ -96,11 +83,6 @@ static inline u32 ice_round_to_num(u32 N, u32 R) #define __always_unused #endif - - - - - enum ice_aq_res_ids { ICE_NVM_RES_ID = 1, ICE_SPD_RES_ID, @@ -113,11 +95,6 @@ enum ice_aq_res_ids { #define ICE_CHANGE_LOCK_TIMEOUT 1000 #define ICE_GLOBAL_CFG_LOCK_TIMEOUT 3000 -enum ice_aq_res_access_type { - ICE_RES_READ = 1, - ICE_RES_WRITE -}; - struct ice_driver_ver { u8 major_ver; u8 minor_ver; @@ -145,7 +122,8 @@ enum ice_fec_mode { ICE_FEC_NONE = 0, ICE_FEC_RS, ICE_FEC_BASER, - ICE_FEC_AUTO + ICE_FEC_AUTO, + ICE_FEC_DIS_AUTO }; struct ice_phy_cache_mode_data { @@ -170,9 +148,9 @@ enum ice_mac_type { ICE_MAC_VF, ICE_MAC_E810, ICE_MAC_GENERIC, + ICE_MAC_GENERIC_3K, }; - /* Media Types */ enum ice_media_type { ICE_MEDIA_UNKNOWN = 0, @@ -193,6 +171,7 @@ enum ice_vsi_type { ICE_VSI_OFFLOAD_MACVLAN = 5, ICE_VSI_LB = 6, ICE_VSI_SWITCHDEV_CTRL = 7, + ICE_VSI_ADI = 8, }; struct ice_link_status { @@ -253,6 +232,15 @@ struct ice_phy_info { #define ICE_MAX_NUM_MIRROR_RULES 64 +#define ICE_L2TPV2_FLAGS_CTRL 0x8000 +#define ICE_L2TPV2_FLAGS_LEN 0x4000 +#define ICE_L2TPV2_FLAGS_SEQ 0x0800 +#define ICE_L2TPV2_FLAGS_OFF 0x0200 +#define ICE_L2TPV2_FLAGS_VER 0x0002 + +#define ICE_L2TPV2_PKT_LENGTH 6 +#define ICE_PPP_PKT_LENGTH 4 + /* protocol enumeration for filters */ enum ice_fltr_ptype { /* NONE - used for undef/error */ @@ -262,28 +250,37 @@ enum ice_fltr_ptype { ICE_FLTR_PTYPE_NONF_IPV4_SCTP, ICE_FLTR_PTYPE_NONF_IPV4_OTHER, ICE_FLTR_PTYPE_NONF_IPV4_GTPU, - ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH, - ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW, - ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP, - ICE_FLTR_PTYPE_NONF_IPV6_GTPU, - ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH, - ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH_DW, - ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH_UP, ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4, ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP, ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP, ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV6, ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV6_UDP, ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV6_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH, ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4, ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_UDP, ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6, + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW, ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4, ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4_UDP, ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6, + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP, ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4, ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4_UDP, ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6, + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6_TCP, + ICE_FLTR_PTYPE_NONF_IPV6_GTPU, + ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH, + ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH_DW, + ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH_UP, ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP, ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER, ICE_FLTR_PTYPE_NONF_IPV6_GTPU_IPV6_OTHER, @@ -306,11 +303,159 @@ enum ice_fltr_ptype { ICE_FLTR_PTYPE_NONF_IPV4_UDP_ECPRI_TP0, ICE_FLTR_PTYPE_FRAG_IPV4, ICE_FLTR_PTYPE_FRAG_IPV6, + ICE_FLTR_PTYPE_NONF_IPV4_GRE, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_TCP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_UDP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV4, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV6, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV6_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV6_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_IPV4, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_IPV6, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_IPV6_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_IPV6_TCP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV4, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV6, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV6_UDP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV6_TCP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_IPV4, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_IPV6, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_IPV6_UDP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_IPV6_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV4, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV6, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV6_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV6_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_EH, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_EH_IPV4, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_EH_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_EH_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_EH_IPV6, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_EH_IPV6_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_EH_IPV6_TCP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV4, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV6, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV6_UDP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV6_TCP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_EH, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_EH_IPV4, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_EH_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_EH_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_EH_IPV6, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_EH_IPV6_UDP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_EH_IPV6_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV4, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV6, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV6_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV6_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_EH_DW, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_EH_DW_IPV4, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_EH_DW_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_EH_DW_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_EH_DW_IPV6, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_EH_DW_IPV6_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_EH_DW_IPV6_TCP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV4, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV6, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV6_UDP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV6_TCP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_EH_DW, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_EH_DW_IPV4, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_EH_DW_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_EH_DW_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_EH_DW_IPV6, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_EH_DW_IPV6_UDP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_EH_DW_IPV6_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV4, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV6, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV6_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV6_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_EH_UP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_EH_UP_IPV4, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_EH_UP_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_EH_UP_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_EH_UP_IPV6, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_EH_UP_IPV6_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_GTPU_EH_UP_IPV6_TCP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV4, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV6, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV6_UDP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV6_TCP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_EH_UP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_EH_UP_IPV4, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_EH_UP_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_EH_UP_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_EH_UP_IPV6, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_EH_UP_IPV6_UDP, + ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_GTPU_EH_UP_IPV6_TCP, ICE_FLTR_PTYPE_NONF_IPV6_UDP, ICE_FLTR_PTYPE_NONF_IPV6_TCP, ICE_FLTR_PTYPE_NONF_IPV6_SCTP, ICE_FLTR_PTYPE_NONF_IPV6_OTHER, ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN, + ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_SCTP, + ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_OTHER, + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_CONTROL, + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2, + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP, + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4, + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6, + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6_TCP, + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_CONTROL, + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2, + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP, + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4, + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6, + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6_UDP, + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6_TCP, ICE_FLTR_PTYPE_MAX, }; @@ -402,6 +547,7 @@ struct ice_hw_common_caps { u8 ieee_1588; u8 mgmt_cem; u8 iwarp; + u8 roce_lag; /* WoL and APM support */ #define ICE_WOL_SUPPORT_M BIT(0) @@ -410,6 +556,13 @@ struct ice_hw_common_caps { u8 apm_wol_support; u8 acpi_prog_mthd; u8 proxy_support; +#define ICE_NVM_ADDRESS_VALUE_READS 3 + u16 nvm_word_address[ICE_NVM_ADDRESS_VALUE_READS]; + u16 nvm_value[ICE_NVM_ADDRESS_VALUE_READS]; + u32 orom_ver; + u32 base_release_ver_major; + u32 base_release_ver_type; + u32 base_release_ver_iana; bool nvm_update_pending_nvm; bool nvm_update_pending_orom; bool nvm_update_pending_netlist; @@ -422,6 +575,10 @@ struct ice_hw_common_caps { #define ICE_NVM_MGMT_SEC_REV_DISABLED BIT(0) #define ICE_NVM_MGMT_UPDATE_DISABLED BIT(1) #define ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3) + /* PCIe reset avoidance */ + bool pcie_reset_avoidance; /* false: not supported, true: supported */ + /* Post update reset restriction */ + bool reset_restrict_support; /* false: not supported, true: supported */ /* External topology device images within the NVM */ #define ICE_EXT_TOPO_DEV_IMG_COUNT 4 @@ -435,6 +592,8 @@ struct ice_hw_common_caps { #define ICE_EXT_TOPO_DEV_IMG_LOAD_EN BIT(0) bool ext_topo_dev_img_prog_en[ICE_EXT_TOPO_DEV_IMG_COUNT]; #define ICE_EXT_TOPO_DEV_IMG_PROG_EN BIT(1) + bool tx_sched_topo_comp_mode_en; + bool dyn_flattening_en; }; /* IEEE 1588 TIME_SYNC specific info */ @@ -463,16 +622,23 @@ enum ice_time_ref_freq { NUM_ICE_TIME_REF_FREQ }; +/* Clock source specification */ +enum ice_clk_src { + ICE_CLK_SRC_TCX0 = 0, /* Temperature compensated oscillator */ + ICE_CLK_SRC_TIME_REF = 1, /* Use TIME_REF reference clock */ + + NUM_ICE_CLK_SRC +}; + struct ice_ts_func_info { /* Function specific info */ enum ice_time_ref_freq time_ref; - u8 clk_freq; - u8 clk_src; - u8 tmr_index_assoc; - u8 ena; - u8 tmr_index_owned; - u8 src_tmr_owned; - u8 tmr_ena; + u8 clk_src : 1; + u8 tmr_index_assoc : 1; + u8 ena : 1; + u8 tmr_index_owned : 1; + u8 src_tmr_owned : 1; + u8 tmr_ena : 1; }; /* Device specific definitions */ @@ -484,18 +650,28 @@ struct ice_ts_func_info { #define ICE_TS_DEV_ENA_M BIT(24) #define ICE_TS_TMR0_ENA_M BIT(25) #define ICE_TS_TMR1_ENA_M BIT(26) +#define ICE_TS_LL_TX_TS_READ_M BIT(28) struct ice_ts_dev_info { /* Device specific info */ - u32 ena_ports; u32 tmr_own_map; - u32 tmr0_owner; - u32 tmr1_owner; - u8 tmr0_owned; - u8 tmr1_owned; - u8 ena; - u8 tmr0_ena; - u8 tmr1_ena; + u8 tmr0_owner; + u8 tmr1_owner; + u8 tmr0_owned : 1; + u8 tmr1_owned : 1; + u8 ena : 1; + u8 tmr0_ena : 1; + u8 tmr1_ena : 1; + u8 ts_ll_read : 1; +}; + +#define ICE_NAC_TOPO_PRIMARY_M BIT(0) +#define ICE_NAC_TOPO_DUAL_M BIT(1) +#define ICE_NAC_TOPO_ID_M ICE_M(0xf, 0) + +struct ice_nac_topology { + u32 mode; + u8 id; }; /* Function specific capabilities */ @@ -517,9 +693,9 @@ struct ice_hw_dev_caps { u32 num_flow_director_fltr; /* Number of FD filters available */ struct ice_ts_dev_info ts_dev_info; u32 num_funcs; + struct ice_nac_topology nac_topo; }; - /* Information about MAC such as address, etc... */ struct ice_mac_info { u8 lan_addr[ETH_ALEN]; @@ -924,10 +1100,6 @@ struct ice_port_info { #define ICE_SCHED_PORT_STATE_READY 0x1 u8 lport; #define ICE_LPORT_MASK 0xff - u16 dflt_tx_vsi_rule_id; - u16 dflt_tx_vsi_num; - u16 dflt_rx_vsi_rule_id; - u16 dflt_rx_vsi_num; struct ice_fc_info fc; struct ice_mac_info mac; struct ice_phy_info phy; @@ -949,7 +1121,6 @@ struct ice_switch_info { DECLARE_BITMAP(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS); }; - /* Enum defining the different states of the mailbox snapshot in the * PF-VF mailbox overflow detection algorithm. The snapshot can be in * states: @@ -1024,6 +1195,13 @@ struct ice_mbx_data { u16 async_watermark_val; }; +/* PHY configuration */ +enum ice_phy_cfg { + ICE_PHY_E810 = 1, + ICE_PHY_E822, + ICE_PHY_ETH56G, +}; + /* Port hardware description */ struct ice_hw { u8 __iomem *hw_addr; @@ -1048,6 +1226,8 @@ struct ice_hw { u8 revision_id; u8 pf_id; /* device profile info */ + enum ice_phy_cfg phy_cfg; + u8 logical_pf_id; u16 max_burst_size; /* driver sets this value */ @@ -1113,23 +1293,23 @@ struct ice_hw { /* true if VSIs can share unicast MAC addr */ u8 umac_shared; -#define ICE_PHY_PER_NAC 1 -#define ICE_MAX_QUAD 2 -#define ICE_NUM_QUAD_TYPE 2 -#define ICE_PORTS_PER_QUAD 4 -#define ICE_PHY_0_LAST_QUAD 1 -#define ICE_PORTS_PER_PHY 8 -#define ICE_NUM_EXTERNAL_PORTS ICE_PORTS_PER_PHY - +#define ICE_PHY_PER_NAC_E822 1 +#define ICE_MAX_QUAD 2 +#define ICE_QUADS_PER_PHY_E822 2 +#define ICE_PORTS_PER_PHY_E822 8 +#define ICE_PORTS_PER_QUAD 4 +#define ICE_PORTS_PER_PHY_E810 4 +#define ICE_NUM_EXTERNAL_PORTS (ICE_MAX_QUAD * ICE_PORTS_PER_QUAD) /* Active package version (currently active) */ struct ice_pkg_ver active_pkg_ver; + u32 pkg_seg_id; + u32 pkg_sign_type; u32 active_track_id; + u8 pkg_has_signing_seg:1; u8 active_pkg_name[ICE_PKG_NAME_SIZE]; u8 active_pkg_in_nvm; - enum ice_aq_err pkg_dwnld_status; - /* Driver's package ver - (from the Ice Metadata section) */ struct ice_pkg_ver pkg_ver; u8 pkg_name[ICE_PKG_NAME_SIZE]; @@ -1178,7 +1358,8 @@ struct ice_hw { struct ice_mbx_snapshot mbx_snapshot; DECLARE_BITMAP(hw_ptype, ICE_FLOW_PTYPE_MAX); u8 dvm_ena; - __le16 io_expander_handle; + u16 io_expander_handle; + u8 cgu_part_number; }; /* Statistics collected by each port, VSI, VEB, and S-channel */ @@ -1270,6 +1451,7 @@ enum ice_sw_fwd_act_type { ICE_FWD_TO_Q, ICE_FWD_TO_QGRP, ICE_DROP_PACKET, + ICE_LG_ACTION, ICE_INVAL_ACT }; @@ -1345,17 +1527,13 @@ struct ice_aq_get_set_rss_lut_params { #define ICE_SR_POR_REGISTERS_AUTOLOAD_PTR 0x118 /* CSS Header words */ +#define ICE_NVM_CSS_HDR_LEN_L 0x02 +#define ICE_NVM_CSS_HDR_LEN_H 0x03 #define ICE_NVM_CSS_SREV_L 0x14 #define ICE_NVM_CSS_SREV_H 0x15 -/* Length of CSS header section in words */ -#define ICE_CSS_HEADER_LENGTH 330 - -/* Offset of Shadow RAM copy in the NVM bank area. */ -#define ICE_NVM_SR_COPY_WORD_OFFSET roundup(ICE_CSS_HEADER_LENGTH, 32) - -/* Size in bytes of Option ROM trailer */ -#define ICE_NVM_OROM_TRAILER_LENGTH (2 * ICE_CSS_HEADER_LENGTH) +/* Length of Authentication header section in words */ +#define ICE_NVM_AUTH_HEADER_LEN 0x08 /* The Link Topology Netlist section is stored as a series of words. It is * stored in the NVM as a TLV, with the first two words containing the type @@ -1445,8 +1623,18 @@ struct ice_aq_get_set_rss_lut_params { #define ICE_FW_API_REPORT_DFLT_CFG_MIN 7 #define ICE_FW_API_REPORT_DFLT_CFG_PATCH 3 +/* FW version for FEC disable in Auto FEC mode */ +#define ICE_FW_FEC_DIS_AUTO_BRANCH 1 +#define ICE_FW_FEC_DIS_AUTO_MAJ 7 +#define ICE_FW_FEC_DIS_AUTO_MIN 0 +#define ICE_FW_FEC_DIS_AUTO_PATCH 5 + /* AQ API version for FW health reports */ #define ICE_FW_API_HEALTH_REPORT_MAJ 1 #define ICE_FW_API_HEALTH_REPORT_MIN 7 #define ICE_FW_API_HEALTH_REPORT_PATCH 6 + +/* AQ API version for FW auto drop reports */ +#define ICE_FW_API_AUTO_DROP_MAJ 1 +#define ICE_FW_API_AUTO_DROP_MIN 4 #endif /* _ICE_TYPE_H_ */ diff --git a/drivers/thirdparty/ice/ice_vdcm.c b/drivers/thirdparty/ice/ice_vdcm.c new file mode 100644 index 000000000000..50f6f0ae561c --- /dev/null +++ b/drivers/thirdparty/ice/ice_vdcm.c @@ -0,0 +1,1563 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#include "ice_vdcm.h" + +#define VFIO_PCI_OFFSET_SHIFT 40 +#define VFIO_PCI_OFFSET_TO_INDEX(off) ((off) >> VFIO_PCI_OFFSET_SHIFT) +#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT) +#define VFIO_PCI_OFFSET_MASK (BIT_ULL(VFIO_PCI_OFFSET_SHIFT) - 1) +#define ICE_VDCM_BAR3_SIZE SZ_16K + +/* According to PCI Express Base Specification 4.0r1.0 section 7.5.1.2 + * Type 0 Configuration Space Header, the device specific capabilities + * start at offset 0x40. + */ +#define ICE_VDCM_MSIX_CTRL_OFFS (0x40 + PCI_MSIX_FLAGS) + +struct ice_vdcm_mmap_vma { + struct vm_area_struct *vma; + struct list_head vma_next; +}; + +static u64 ice_vdcm_pci_config[] = { + 0x001000000dd58086ULL, /* 0x00-0x40: PCI config header */ + 0x0000000002000000ULL, + 0x000000000000000cULL, + 0x0000000c00000000ULL, + 0x0000000000000000ULL, + 0x0000808600000000ULL, + 0x0000004000000000ULL, + 0x0000000000000000ULL, + 0x0000000300040011ULL, /* 0x40-0x4C: MSI-X capability */ + 0x0000000000002003ULL, + 0x0000000000920010ULL, + 0x0000000000000000ULL, + 0x0000000000000000ULL, + 0x0000000000000000ULL, + 0x0070001000000000ULL, + 0x0000000000000000ULL, + 0x0000000000000000ULL, + 0x0000000000000000ULL, +}; + +/** + * ice_vdcm_cfg_init - initialize VDCM PCI configuration space + * @ivdm: pointer to VDCM + * + * Return 0 for success, non 0 for failure. + */ +static int ice_vdcm_cfg_init(struct ice_vdcm *ivdm) +{ + int irq_count; + + irq_count = ivdm->adi->get_vector_num(ivdm->adi); + if (irq_count <= 0) + return -EINVAL; + + memcpy(ivdm->pci_cfg_space, ice_vdcm_pci_config, + sizeof(ice_vdcm_pci_config)); + + /* Set MSI-X table size using N-1 encoding */ + ivdm->pci_cfg_space[ICE_VDCM_MSIX_CTRL_OFFS] = irq_count - 1; + + return 0; +} + +/** + * ice_vdcm_create - create an emulated device + * @kobj: kernel object + * @mdev: emulated device instance pointer + * + * This function is called when VFIO consumer (like QEMU) wants to create a + * emulated device, typically by echo some uuid to the SYSFS. + * Return 0 for success, non 0 for failure. + */ +#ifdef HAVE_KOBJ_IN_MDEV_PARENT_OPS_CREATE +static int ice_vdcm_create(struct kobject *kobj, struct mdev_device *mdev) +#else +static int ice_vdcm_create(struct mdev_device *mdev) +#endif +{ + struct device *parent_dev = mdev_parent_dev(mdev); + struct ice_vdcm *ivdm; + int err; + + ivdm = kzalloc(sizeof(*ivdm), GFP_KERNEL); + if (!ivdm) + return -ENOMEM; + + ivdm->adi = ice_vdcm_alloc_adi(parent_dev, ivdm); + if (!ivdm->adi) { + err = -ENOMEM; + goto alloc_adi_err; + } + + ivdm->irq_type = VFIO_PCI_NUM_IRQS; + ivdm->dev = mdev_dev(mdev); + ivdm->parent_dev = parent_dev; + mdev_set_drvdata(mdev, ivdm); + mutex_init(&ivdm->vma_lock); + INIT_LIST_HEAD(&ivdm->vma_list); + err = ice_vdcm_cfg_init(ivdm); + if (err) + goto vdcm_cfg_init_err; + +#ifdef HAVE_DEV_IN_MDEV_API + mdev_set_iommu_device(mdev_dev(mdev), parent_dev); +#else + mdev_set_iommu_device(mdev, parent_dev); +#endif + + return 0; + +vdcm_cfg_init_err: + ice_vdcm_free_adi(ivdm->adi); +alloc_adi_err: + kfree(ivdm); + + return err; +} + +/** + * ice_vdcm_remove - delete an emulated device + * @mdev: emulated device instance pointer + * + * This function is called when VFIO consumer(like QEMU) wants to delete + * emulated device. + * Return 0 for success, negative for failure. + */ +static int ice_vdcm_remove(struct mdev_device *mdev) +{ + struct ice_vdcm *ivdm = mdev_get_drvdata(mdev); + + ice_vdcm_free_adi(ivdm->adi); + ivdm->adi = NULL; + + kfree(ivdm); + ivdm = NULL; + + return 0; +} + +/** + * ice_vdcm_cfg_read - read PCI configuration space + * @ivdm: pointer to VDCM + * @pos: read offset + * @buf: buf stores read content + * @count: read length + * + * Return 0 for success, negative value for failure. + */ +static int +ice_vdcm_cfg_read(struct ice_vdcm *ivdm, unsigned int pos, + char *buf, unsigned int count) +{ + if (pos + count > ICE_VDCM_CFG_SIZE) + return -EINVAL; + + memcpy(buf, &ivdm->pci_cfg_space[pos], count); + return 0; +} + +/* Bitmap for writable bits (RW or RW1C bits, but cannot co-exist in one + * byte) byte by byte in standard PCI configuration space. (not the full + * 256 bytes.) + */ +static const u8 ice_vdcm_csr_rw_bmp[] = { + 0x00, 0x00, 0x00, 0x00, 0xff, 0x07, 0x00, 0xf9, + 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, + 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xf0, 0xff, 0xff, 0xff, + 0xf0, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, +}; + +/** + * ice_vdcm_cfg_write_mask - write PCI configuration space with mask + * @ivdm: pointer to VDCM + * @off: write offset + * @buf: buf stores write content + * @bytes: write length + * + * Return 0 for success, negative value for failure. + */ +static int +ice_vdcm_cfg_write_mask(struct ice_vdcm *ivdm, unsigned int off, + u8 *buf, unsigned int bytes) +{ + u8 *cfg_base = ivdm->pci_cfg_space; + u8 mask, newval, oldval; + unsigned int i = 0; + + for (; i < bytes && (off + i < sizeof(ice_vdcm_csr_rw_bmp)); i++) { + mask = ice_vdcm_csr_rw_bmp[off + i]; + oldval = cfg_base[off + i]; + newval = buf[i] & mask; + + /* The PCI_STATUS high byte has RW1C bits, here + * emulates clear by writing 1 for these bits. + * Writing a 0b to RW1C bits has no effect. + */ + if (off + i == PCI_STATUS + 1) + newval = (~newval & oldval) & mask; + + cfg_base[off + i] = (oldval & ~mask) | newval; + } + + /* For other configuration space directly copy as it is. */ + if (i < bytes) + memcpy(cfg_base + off + i, buf + i, bytes - i); + + return 0; +} + +/** + * ice_vdcm_cfg_write_bar - write PCI configuration space BAR registers + * @ivdm: pointer to VDCM + * @offset: write offset + * @buf: buf stores write content + * @bytes: write length + * + * Return 0 for success, negative value for failure. + */ +static int +ice_vdcm_cfg_write_bar(struct ice_vdcm *ivdm, unsigned int offset, + char *buf, unsigned int bytes) +{ + u32 val = *(u32 *)(buf); + int err; + + switch (offset) { + case PCI_BASE_ADDRESS_0: + val &= ~(ICE_VDCM_BAR0_SIZE - 1); + err = ice_vdcm_cfg_write_mask(ivdm, offset, (u8 *)&val, bytes); + break; + case PCI_BASE_ADDRESS_1: + val &= ~(u64)(ICE_VDCM_BAR0_SIZE - 1) >> 32; + err = ice_vdcm_cfg_write_mask(ivdm, offset, (u8 *)&val, bytes); + break; + case PCI_BASE_ADDRESS_3: + val &= ~(ICE_VDCM_BAR3_SIZE - 1); + err = ice_vdcm_cfg_write_mask(ivdm, offset, (u8 *)&val, bytes); + break; + case PCI_BASE_ADDRESS_4: + val &= ~(u64)(ICE_VDCM_BAR3_SIZE - 1) >> 32; + err = ice_vdcm_cfg_write_mask(ivdm, offset, (u8 *)&val, bytes); + break; + case PCI_BASE_ADDRESS_5: + case PCI_BASE_ADDRESS_2: + err = 0; + break; + default: + err = -EINVAL; + break; + } + + return err; +} + +/** + * ice_vdcm_cfg_write - write PCI configuration space + * @ivdm: pointer to VDCM + * @pos: write offset + * @buf: buf stores write content + * @count: write length + * + * Return 0 for success, negative value for failure. + */ +static int +ice_vdcm_cfg_write(struct ice_vdcm *ivdm, unsigned int pos, + char *buf, unsigned int count) +{ + int err; + + if (pos + count > ICE_VDCM_CFG_SIZE) + return -EINVAL; + + switch (pos) { + case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5: + if (!IS_ALIGNED(pos, 4)) + return -EINVAL; + err = ice_vdcm_cfg_write_bar(ivdm, pos, buf, count); + break; + default: + err = ice_vdcm_cfg_write_mask(ivdm, pos, (u8 *)buf, count); + break; + } + + return err; +} + +/** + * ice_vdcm_bar0_read - read PCI BAR0 region + * @ivdm: pointer to VDCM + * @pos: read offset + * @buf: buf stores read content + * @count: read length + * + * Return 0 for success, negative value for failure. + */ +static int +ice_vdcm_bar0_read(struct ice_vdcm *ivdm, unsigned int pos, + char *buf, unsigned int count) +{ + u32 val; + + if (pos + count > ICE_VDCM_BAR0_SIZE) + return -EINVAL; + + val = ivdm->adi->read_reg32(ivdm->adi, pos); + memcpy(buf, &val, count); + + return 0; +} + +/** + * ice_vdcm_bar0_write - write PCI BAR0 region + * @ivdm: pointer to VDCM + * @pos: write offset + * @buf: buf stores write content + * @count: write length + * + * Return 0 for success, negative value for failure. + */ +static int +ice_vdcm_bar0_write(struct ice_vdcm *ivdm, unsigned int pos, + char *buf, unsigned int count) +{ + u32 val; + + if ((pos + count > ICE_VDCM_BAR0_SIZE) || !IS_ALIGNED(pos, 4)) + return -EINVAL; + + val = *(u32 *)(buf); + ivdm->adi->write_reg32(ivdm->adi, pos, val); + + return 0; +} + +/** + * ice_vdcm_rw - read/write function entry + * @mdev: emulated device instance pointer + * @buf: buf stores read/write content + * @count: read/write length + * @ppos: read/write offset + * @is_write: is write operatoin + * + * Return the number of read/write bytes for success, other value for failure. + */ +static ssize_t +ice_vdcm_rw(struct mdev_device *mdev, char *buf, + size_t count, const loff_t *ppos, bool is_write) +{ + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + struct ice_vdcm *ivdm = mdev_get_drvdata(mdev); + u64 pos = *ppos & VFIO_PCI_OFFSET_MASK; + int err = -EINVAL; + + switch (index) { + case VFIO_PCI_CONFIG_REGION_INDEX: + if (is_write) + err = ice_vdcm_cfg_write(ivdm, pos, buf, count); + else + err = ice_vdcm_cfg_read(ivdm, pos, buf, count); + break; + case VFIO_PCI_BAR0_REGION_INDEX: + if (is_write) + err = ice_vdcm_bar0_write(ivdm, pos, buf, count); + else + err = ice_vdcm_bar0_read(ivdm, pos, buf, count); + break; + default: + break; + } + + return err ? err : count; +} + +/** + * ice_vdcm_read - read function entry + * @mdev: emulated device instance pointer + * @buf: buf stores read content + * @count: read length + * @ppos: read offset + * + * This function is called when VFIO consumer (like QEMU) wants to read + * emulated device with any device specific information like register access + * Return the number of read bytes. + */ +static ssize_t +ice_vdcm_read(struct mdev_device *mdev, char __user *buf, size_t count, + loff_t *ppos) +{ + unsigned int done = 0; + int err; + + while (count) { + size_t filled; + + if (count >= 4 && IS_ALIGNED(*ppos, 4)) { + u32 val; + + err = ice_vdcm_rw(mdev, (char *)&val, sizeof(val), + ppos, false); + if (err <= 0) + return -EFAULT; + + if (copy_to_user(buf, &val, sizeof(val))) + return -EFAULT; + + filled = 4; + } else if (count >= 2 && IS_ALIGNED(*ppos, 2)) { + u16 val; + + err = ice_vdcm_rw(mdev, (char *)&val, sizeof(val), + ppos, false); + if (err <= 0) + return -EFAULT; + + if (copy_to_user(buf, &val, sizeof(val))) + return -EFAULT; + + filled = 2; + } else { + u8 val; + + err = ice_vdcm_rw(mdev, (char *)&val, sizeof(val), + ppos, false); + if (err <= 0) + return -EFAULT; + + if (copy_to_user(buf, &val, sizeof(val))) + return -EFAULT; + + filled = 1; + } + + count -= filled; + done += filled; + *ppos += filled; + buf += filled; + } + + return done; +} + +/** + * ice_vdcm_write - write function entry + * @mdev: emulated device instance pointer + * @buf: buf stores content to be written + * @count: write length + * @ppos: write offset + * + * This function is called when VFIO consumer (like QEMU) wants to write + * emulated device with any device specific information like register access + * Return the number of written bytes. + */ +static ssize_t +ice_vdcm_write(struct mdev_device *mdev, const char __user *buf, size_t count, + loff_t *ppos) +{ + unsigned int done = 0; + int err; + + while (count) { + size_t filled; + + if (count >= 4 && IS_ALIGNED(*ppos, 4)) { + u32 val; + + if (copy_from_user(&val, buf, sizeof(val))) + return -EFAULT; + + err = ice_vdcm_rw(mdev, (char *)&val, sizeof(val), + ppos, true); + if (err <= 0) + return -EFAULT; + + filled = 4; + } else if (count >= 2 && IS_ALIGNED(*ppos, 2)) { + u16 val; + + if (copy_from_user(&val, buf, sizeof(val))) + return -EFAULT; + + err = ice_vdcm_rw(mdev, (char *)&val, sizeof(val), + ppos, true); + if (err <= 0) + return -EFAULT; + + filled = 2; + } else { + u8 val; + + if (copy_from_user(&val, buf, sizeof(val))) + return -EFAULT; + + err = ice_vdcm_rw(mdev, (char *)&val, sizeof(val), + ppos, true); + if (err <= 0) + return -EFAULT; + + filled = 1; + } + + count -= filled; + done += filled; + *ppos += filled; + buf += filled; + } + + return done; +} + +/** + * ice_vdcm_vfio_device_get_info - get VFIO device info + * @ivdm: pointer to VDCM + * @arg: IOCTL command arguments + * + * Return 0 for success, negative for failure. + */ +static long +ice_vdcm_vfio_device_get_info(struct ice_vdcm *ivdm, unsigned long arg) +{ + struct vfio_device_info info; + unsigned long minsz; + + minsz = offsetofend(struct vfio_device_info, num_irqs); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + info.flags = VFIO_DEVICE_FLAGS_PCI | VFIO_DEVICE_FLAGS_RESET; + info.num_regions = VFIO_PCI_NUM_REGIONS; + info.num_irqs = VFIO_PCI_NUM_IRQS; + + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; +} + +/** + * ice_vdcm_sparse_mmap_cap - prepare sparse memory for memory map + * @caps: pointer to vfio region info capabilities + * @adi: pointer to assignable device interface + * + * Return 0 if success, negative for failure. + */ +static int ice_vdcm_sparse_mmap_cap(struct vfio_info_cap *caps, + struct ice_adi *adi) +{ + struct vfio_region_info_cap_sparse_mmap *sparse; + int nr_areas = 0; + int ret = 0; + size_t size; + int i = 0; + + if (!caps) + return -EINVAL; + + nr_areas = adi->get_sparse_mmap_num(adi); + + size = sizeof(*sparse) + (nr_areas * sizeof(*sparse->areas)); + + sparse = kzalloc(size, GFP_KERNEL); + if (!sparse) + return -ENOMEM; + + sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP; + sparse->header.version = 1; + sparse->nr_areas = nr_areas; + + for (i = 0; i < nr_areas; i++) { + ret = adi->get_sparse_mmap_area(adi, i, + &sparse->areas[i].offset, + &sparse->areas[i].size); + if (ret < 0) { + kfree(sparse); + return ret; + } + } + + ret = vfio_info_add_capability(caps, &sparse->header, size); + kfree(sparse); + + return ret; +} + +/** + * ice_vdcm_mmap_open - open callback for VMA + * @vma: pointer to VMA + * + * Zap mmaps on open so that we can fault them in on access and therefore + * our vma_list only tracks mappings accessed since last zap. + * + * For the VMA created by QEMU/DPDK calling mmap() with vfio device fd, it is + * not called. If necessary, driver should explicitly call this function in the + * mmap() callback to do initialization. + * + * This callback is typically called after calling mmap() and later forking a + * child process without VM_DONTCOPY vm_flags for multi-process situation. + * + * For QEMU/KVM, QEMU will set MADV_DONTFORK by madvise() when adding ram block, + * this will mark this VMA with VM_DONTCOPY. So forking a child process in QEMU + * will not trigger this callback. Refer to ram_add_block() for more details. + * + * For DPDK, MADV_DONTFORK is not set by default, so forking a child process + * will trigger this callback. + */ +static void ice_vdcm_mmap_open(struct vm_area_struct *vma) +{ + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); +} + +/** + * ice_vdcm_mmap_close - close callback for VMA + * @vma: pointer to VMA + * + * This function is typically called when the process is exiting and this VMA + * has close callback registered. + */ +static void ice_vdcm_mmap_close(struct vm_area_struct *vma) +{ + struct ice_vdcm *ivdm = vma->vm_private_data; + struct ice_vdcm_mmap_vma *mmap_vma; + + mutex_lock(&ivdm->vma_lock); + list_for_each_entry(mmap_vma, &ivdm->vma_list, vma_next) { + if (mmap_vma->vma == vma) { + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + break; + } + } + mutex_unlock(&ivdm->vma_lock); +} + +/** + * ice_vdcm_mmap_fault - close callback for VMA + * @vmf: pointer to vm fault context + */ +static vm_fault_t ice_vdcm_mmap_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct ice_vdcm_mmap_vma *mmap_vma; + struct ice_vdcm *ivdm; + unsigned int index; + u64 addr, pg_off; + int err; + + ivdm = vma->vm_private_data; + mutex_lock(&ivdm->vma_lock); + + mmap_vma = kzalloc(sizeof(*mmap_vma), GFP_KERNEL); + if (!mmap_vma) { + mutex_unlock(&ivdm->vma_lock); + return VM_FAULT_OOM; + } + + mmap_vma->vma = vma; + list_add(&mmap_vma->vma_next, &ivdm->vma_list); + + mutex_unlock(&ivdm->vma_lock); + + index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT); + pg_off = vma->vm_pgoff & + ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); + err = ivdm->adi->get_sparse_mmap_hpa(ivdm->adi, index, pg_off, &addr); + if (err < 0) { + dev_err(ivdm->dev, + "failed to get HPA for memory map, err: %d.\n", err); + return VM_FAULT_SIGBUS; + } + + dev_dbg(ivdm->dev, "fault address GPA:0x%lx HPA:0x%llx HVA:0x%lx", + vma->vm_pgoff << PAGE_SHIFT, addr, vma->vm_start); + + if (io_remap_pfn_range(vma, vma->vm_start, PHYS_PFN(addr), + vma->vm_end - vma->vm_start, vma->vm_page_prot)) + return VM_FAULT_SIGBUS; + + return VM_FAULT_NOPAGE; +} + +static const struct vm_operations_struct ice_vdcm_mmap_ops = { + .open = ice_vdcm_mmap_open, + .close = ice_vdcm_mmap_close, + .fault = ice_vdcm_mmap_fault, +}; + +/** + * ice_vdcm_mmap - map device memory to user space + * @mdev: pointer to the mdev device + * @vma: pointer to the vm where device memory will be mapped + * + * Return 0 if succeed, negative for failure. + */ +static int ice_vdcm_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) +{ + struct ice_vdcm *ivdm = mdev_get_drvdata(mdev); + unsigned int index; + + index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT); + + if (index >= VFIO_PCI_NUM_REGIONS || + vma->vm_end < vma->vm_start || + (vma->vm_flags & VM_SHARED) == 0) + return -EINVAL; + + vma->vm_private_data = ivdm; + /* Set this page's cache policy as UC(Uncachable) memory type in x86 */ + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + /* See remap_pfn_range(), called from vfio_pci_fault() but we can't + * change vm_flags within the fault handler. Set them now. + */ + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_ops = &ice_vdcm_mmap_ops; + + return 0; +} + +/** + * ice_vdcm_vfio_device_get_region_info - get VFIO device region info + * @ivdm: pointer to VDCM + * @arg: IOCTL command arguments + * + * Return 0 for success, negative for failure. + */ +static long +ice_vdcm_vfio_device_get_region_info(struct ice_vdcm *ivdm, unsigned long arg) +{ + struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; + struct vfio_region_info info; + unsigned long minsz; + int ret = 0; + + minsz = offsetofend(struct vfio_region_info, offset); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + switch (info.index) { + case VFIO_PCI_CONFIG_REGION_INDEX: + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.size = ICE_VDCM_CFG_SIZE; + info.flags = VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE; + break; + case VFIO_PCI_BAR0_REGION_INDEX: + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.size = ICE_VDCM_BAR0_SIZE; + info.flags = VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE | + VFIO_REGION_INFO_FLAG_MMAP; + ret = ice_vdcm_sparse_mmap_cap(&caps, ivdm->adi); + if (ret) + return ret; + if (caps.size) { + info.flags |= VFIO_REGION_INFO_FLAG_CAPS; + if (info.argsz < sizeof(info) + caps.size) { + info.argsz = sizeof(info) + caps.size; + info.cap_offset = 0; + } else { + vfio_info_cap_shift(&caps, sizeof(info)); + if (copy_to_user((void __user *)(arg + + sizeof(info)), caps.buf, + caps.size)) { + kfree(caps.buf); + return -EFAULT; + } + info.cap_offset = sizeof(info); + } + + kfree(caps.buf); + } + break; + case VFIO_PCI_BAR3_REGION_INDEX: + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.size = ICE_VDCM_BAR3_SIZE; + info.flags = VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE; + break; + case VFIO_PCI_BAR1_REGION_INDEX: + case VFIO_PCI_BAR2_REGION_INDEX: + case VFIO_PCI_BAR4_REGION_INDEX: + case VFIO_PCI_BAR5_REGION_INDEX: + case VFIO_PCI_VGA_REGION_INDEX: + case VFIO_PCI_ROM_REGION_INDEX: + info.offset = 0; + info.size = 0; + info.flags = 0; + break; + default: + return -EINVAL; + } + + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; +} + +/** + * ice_vdcm_vfio_device_get_irq_info - get VFIO device IRQ info + * @ivdm: pointer to VDCM + * @arg: IOCTL command arguments + * + * Return 0 for success, negative for failure. + */ +static long +ice_vdcm_vfio_device_get_irq_info(struct ice_vdcm *ivdm, unsigned long arg) +{ + struct vfio_irq_info info; + unsigned long minsz; + int irq_count; + + minsz = offsetofend(struct vfio_irq_info, count); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS) + return -EINVAL; + + /* Only MSI-X interrupts are supported */ + if (info.index != VFIO_PCI_MSIX_IRQ_INDEX) + return -EINVAL; + + irq_count = ivdm->adi->get_vector_num(ivdm->adi); + if (irq_count <= 0) + return -EINVAL; + + info.flags = VFIO_IRQ_INFO_EVENTFD | VFIO_IRQ_INFO_NORESIZE; + info.count = irq_count; + + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; +} + +/** + * ice_vdcm_msix_handler - VDCM MSIX interrupt handler + * @irq: OS IRQ number + * @arg: IRQ data + * + * Return 0 or positive for success, negative for failure. + */ +static irqreturn_t ice_vdcm_msix_handler(int irq, void *arg) +{ + struct ice_vdcm_irq_ctx *ctx = (struct ice_vdcm_irq_ctx *)arg; + + eventfd_signal(ctx->trigger, 1); + return IRQ_HANDLED; +} + +/** + * ice_vdcm_set_vector_signal - set single signal notification for vector + * @ivdm: pointer to VDCM + * @vector: vector number + * @fd: eventfd descriptor + * + * This function is used to register a signal notification trigger associated + * with this vector number when fd is 0 or positive. If fd is negative, the + * signal notification trigger associated with this vector number will be + * unregistered. + * + * Return 0 for success, negative for failure. + */ +static int +ice_vdcm_set_vector_signal(struct ice_vdcm *ivdm, int vector, int fd) +{ + struct eventfd_ctx *trigger; + int irq, err; + char *name; + + irq = ivdm->adi->get_vector_irq(ivdm->adi, vector); + if (irq < 0) + return irq; + + if (ivdm->ctx[vector].trigger) { +#if IS_ENABLED(CONFIG_IRQ_BYPASS_MANAGER) + irq_bypass_unregister_producer(&ivdm->ctx[vector].producer); +#endif /* CONFIG_IRQ_BYPASS_MANAGER */ + free_irq(irq, &ivdm->ctx[vector]); + kfree(ivdm->ctx[vector].name); + eventfd_ctx_put(ivdm->ctx[vector].trigger); + ivdm->ctx[vector].trigger = NULL; + } + + if (fd < 0) + return 0; + + name = kasprintf(GFP_KERNEL, "ice_vdcm-msix[%d](%s)", + vector, dev_name(ivdm->dev)); + if (!name) + return -ENOMEM; + + trigger = eventfd_ctx_fdget(fd); + if (IS_ERR(trigger)) { + kfree(name); + return PTR_ERR(trigger); + } + + ivdm->ctx[vector].name = name; + ivdm->ctx[vector].trigger = trigger; + + err = request_irq(irq, ice_vdcm_msix_handler, 0, name, + &ivdm->ctx[vector]); + if (err < 0) + goto irq_err; + + ivdm->ctx[vector].irq = irq; + +#if IS_ENABLED(CONFIG_IRQ_BYPASS_MANAGER) + ivdm->ctx[vector].producer.token = trigger; + ivdm->ctx[vector].producer.irq = irq; + + err = irq_bypass_register_producer(&ivdm->ctx[vector].producer); + + if (err) { + dev_info(ivdm->dev, + "irq bypass producer (token %p) registration fails: %d\n ", + ivdm->ctx[vector].producer.token, err); + + ivdm->ctx[vector].producer.token = NULL; + } +#endif /* CONFIG_IRQ_BYPASS_MANAGER */ + + return 0; + +irq_err: + kfree(name); + eventfd_ctx_put(trigger); + ivdm->ctx[vector].trigger = NULL; + return err; +} + +/** + * ice_vdcm_set_vector_signals - set signal notification for vector set + * @ivdm: pointer to VDCM + * @start: vector start + * @count: vector number + * @fds: array to store eventfd descriptor + * + * Return 0 for success, negative for failure. + */ +static int +ice_vdcm_set_vector_signals(struct ice_vdcm *ivdm, u32 start, + u32 count, int *fds) +{ + int i, j, err = 0; + + if (start >= ivdm->num_ctx || start + count > ivdm->num_ctx) + return -EINVAL; + + for (i = 0, j = start; i < (int)count; i++, j++) { + int fd = fds ? fds[i] : -1; + + err = ice_vdcm_set_vector_signal(ivdm, j, fd); + if (err) + break; + } + + if (err) { + for (; j >= (int)start; j--) + ice_vdcm_set_vector_signal(ivdm, j, -1); + } + + return err; +} + +/** + * ice_vdcm_msix_enable - enable MSIX interrupt + * @ivdm: pointer to VDCM + * @nvec: vector numbers + * + * Return 0 for success, negative for failure. + */ +static int ice_vdcm_msix_enable(struct ice_vdcm *ivdm, int nvec) +{ + if (nvec < 1) + return -EINVAL; + + ivdm->ctx = kcalloc(nvec, sizeof(ivdm->ctx[0]), GFP_KERNEL); + if (!ivdm->ctx) + return -ENOMEM; + + ivdm->irq_type = VFIO_PCI_MSIX_IRQ_INDEX; + ivdm->num_ctx = nvec; + + return 0; +} + +/** + * ice_vdcm_msix_disable - disable MSIX interrupt + * @ivdm: pointer to VDCM + */ +static void ice_vdcm_msix_disable(struct ice_vdcm *ivdm) +{ + ice_vdcm_set_vector_signals(ivdm, 0, ivdm->num_ctx, NULL); + + ivdm->irq_type = VFIO_PCI_NUM_IRQS; + ivdm->num_ctx = 0; + kfree(ivdm->ctx); + ivdm->ctx = NULL; +} + +/** + * ice_vdcm_set_msix_trigger - set MSIX trigger + * @ivdm: pointer to VDCM + * @hdr: vfio_irq_set header + * @data: vfio_irq_set appended data + * + * Return 0 for success, negative for failure. + */ +static long +ice_vdcm_set_msix_trigger(struct ice_vdcm *ivdm, struct vfio_irq_set *hdr, + void *data) +{ + /* Checking ivdm->irq_type == hdr->index is used to skip the + * unnecessary ice_vdcm_msix_disable() calling. + * For example, when hypervisor starts, it will release all the + * IRQ context by sending VFIO_DEVICE_SET_IRQS UAPI. If the IRQ + * context is not setup before, ivdm->irq_type is VFIO_PCI_NUM_IRQS + * by default and nothing should be done here. + */ + if (ivdm->irq_type == hdr->index && + !hdr->count && (hdr->flags & VFIO_IRQ_SET_DATA_NONE)) { + ice_vdcm_msix_disable(ivdm); + return 0; + } + + if (hdr->flags & VFIO_IRQ_SET_DATA_EVENTFD) { + int *fds = (int *)data; + int err; + + if (ivdm->irq_type == hdr->index) + return ice_vdcm_set_vector_signals(ivdm, hdr->start, + hdr->count, fds); + + err = ice_vdcm_msix_enable(ivdm, hdr->start + hdr->count); + if (err) + return err; + + err = ice_vdcm_set_vector_signals(ivdm, hdr->start, hdr->count, + fds); + if (err) + ice_vdcm_msix_disable(ivdm); + + return err; + } + + return 0; +} + +/** + * ice_vdcm_pre_rebuild_irqctx - Free IRQ before rebuild IRQ context + * @token: pinter to VDCM + * + * This function is called by ADI resource manager to free current IRQ. + * It should be called with ice_vdcm_rebuild_irqctx in pair. + * When AVF reset happens, VSI is rebuilt and previously setup IRQ + * which is associated with this VSI should be freed. + * + * Return 0 for success, negative for failure. + */ +void ice_vdcm_pre_rebuild_irqctx(void *token) +{ + struct ice_vdcm *ivdm = (struct ice_vdcm *)token; + int vector; + + if (WARN_ON(!ivdm)) + return; + + if (ivdm->irq_type >= VFIO_PCI_NUM_IRQS) + return; + + for (vector = 0; vector < (int)ivdm->num_ctx; vector++) { + if (!ivdm->ctx[vector].trigger) + continue; + if (WARN_ON_ONCE(!ivdm->ctx[vector].irq)) + return; + +#if IS_ENABLED(CONFIG_IRQ_BYPASS_MANAGER) + irq_bypass_unregister_producer(&ivdm->ctx[vector].producer); +#endif /* CONFIG_IRQ_BYPASS_MANAGER */ + free_irq(ivdm->ctx[vector].irq, &ivdm->ctx[vector]); + } +} + +/** + * ice_vdcm_rebuild_irqctx - rebuild VDCM IRQ context + * @token: pinter to VDCM + * + * This function is called by ADI resource manager to request the IRQ + * for new adi and associate with previous IRQ context. + * When AVF reset happens, VSI is rebuilt and previously setup IRQ context + * will be associated with new adi IRQ. + * + * Return 0 for success, negative for failure. + */ +int ice_vdcm_rebuild_irqctx(void *token) +{ + struct ice_vdcm *ivdm = (struct ice_vdcm *)token; + int irq, vector; + int err; + + if (WARN_ON(!ivdm)) + return -EINVAL; + if (ivdm->irq_type >= VFIO_PCI_NUM_IRQS) + return 0; + + for (vector = 0 ; vector < (int)ivdm->num_ctx; vector++) { + if (!ivdm->ctx[vector].trigger) + continue; + + irq = ivdm->adi->get_vector_irq(ivdm->adi, vector); + if (irq < 0) + return irq; + + err = request_irq(irq, ice_vdcm_msix_handler, 0, + ivdm->ctx[vector].name, &ivdm->ctx[vector]); + if (err < 0) { + kfree(ivdm->ctx[vector].name); + ivdm->ctx[vector].trigger = NULL; + return err; + } + + ivdm->ctx[vector].irq = irq; +#if IS_ENABLED(CONFIG_IRQ_BYPASS_MANAGER) + ivdm->ctx[vector].producer.token = ivdm->ctx[vector].trigger; + ivdm->ctx[vector].producer.irq = irq; + + err = irq_bypass_register_producer(&ivdm->ctx[vector].producer); + + if (err) { + dev_info(ivdm->dev, + "irq bypass producer (token %p) registration fails: %d\n ", + ivdm->ctx[vector].producer.token, err); + + ivdm->ctx[vector].producer.token = NULL; + } +#endif /* CONFIG_IRQ_BYPASS_MANAGER */ + } + + return 0; +} + +/** + * ice_vdcm_vfio_device_set_irqs - set VFIO device IRQ + * @ivdm: pointer to VDCM + * @arg: IOCTL command arguments + * + * Return 0 for success, negative for failure. + */ +static long +ice_vdcm_vfio_device_set_irqs(struct ice_vdcm *ivdm, unsigned long arg) +{ + struct vfio_irq_set hdr; + size_t data_size = 0; + unsigned long minsz; + u8 *data = NULL; + int total; + int err; + + minsz = offsetofend(struct vfio_irq_set, count); + + if (copy_from_user(&hdr, (void __user *)arg, minsz)) + return -EFAULT; + + if (hdr.argsz < minsz) + return -EINVAL; + + total = ivdm->adi->get_vector_num(ivdm->adi); + if (total <= 0) + return -EINVAL; + + err = vfio_set_irqs_validate_and_prepare(&hdr, + total, + VFIO_PCI_NUM_IRQS, + &data_size); + if (err) + return -EINVAL; + + if (data_size) { + data = (u8 *)memdup_user((void __user *)(arg + minsz), + data_size); + if (IS_ERR(data)) + return PTR_ERR(data); + } + + switch (hdr.index) { + case VFIO_PCI_MSIX_IRQ_INDEX: + switch (hdr.flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { + case VFIO_IRQ_SET_ACTION_TRIGGER: + err = ice_vdcm_set_msix_trigger(ivdm, &hdr, data); + break; + default: + err = -ENOTTY; + break; + } + break; + default: + err = -ENOTTY; + break; + } + + kfree(data); + + return err; +} + +/** + * ice_vdcm_zap - remove all the previously setup VMA mmap + * @token: pointer to VDCM + * + * Return 0 for success, negative for failure. + */ +int ice_vdcm_zap(void *token) +{ + struct ice_vdcm *ivdm = (struct ice_vdcm *)token; + struct ice_vdcm_mmap_vma *mmap_vma, *tmp; + + if (!ivdm) + return -EINVAL; + + /* There are two loops inside while(1) loop in order to gurantee the + * locking order: locking mm first then vma_lock. Because when page + * fault happens, kernel will lock the mm first and then call the + * page fault handler registered, in the ice_vdcm_mmap_fault callback, + * vma_lock is acquired to protect the vma_list. So locking the vma_lock + * after the mm must be followed in the driver to prevent deadlock. + * + * The first loop is to fetch the first valid mm_struct in preparation + * for the next loop mmap_read_lock usage, which must be called before + * vma_lock is acquired. Since VDCM may record VMAs from multi process, + * this behavior will delete the VMAs belonging to the same process one + * by one. + */ + while (1) { + struct mm_struct *mm = NULL; + + mutex_lock(&ivdm->vma_lock); + while (!list_empty(&ivdm->vma_list)) { + mmap_vma = list_first_entry(&ivdm->vma_list, + struct ice_vdcm_mmap_vma, + vma_next); + /* Fetch the first task memory context*/ + mm = mmap_vma->vma->vm_mm; + if (mmget_not_zero(mm)) + break; + + /* If there are no lightweight processes sharing the + * mm_struct data structure, delete the list node. + */ + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + mm = NULL; + } + /* Return when vma_list is empty */ + if (!mm) { + mutex_unlock(&ivdm->vma_lock); + return 0; + } + mutex_unlock(&ivdm->vma_lock); + + mmap_read_lock(mm); + mutex_lock(&ivdm->vma_lock); + list_for_each_entry_safe(mmap_vma, tmp, + &ivdm->vma_list, vma_next) { + struct vm_area_struct *vma = mmap_vma->vma; + + /* Skip all the VMAs which don't belong to this task + * memory context. We'll zap the VMAs sharing the same + * mm_struct which means they belong the same process. + */ + if (vma->vm_mm != mm) + continue; + + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + + zap_vma_ptes(vma, vma->vm_start, + vma->vm_end - vma->vm_start); + dev_dbg(ivdm->dev, "zap start HVA:0x%lx GPA:0x%lx size:0x%lx", + vma->vm_start, vma->vm_pgoff << PAGE_SHIFT, + vma->vm_end - vma->vm_start); + } + mutex_unlock(&ivdm->vma_lock); + mmap_read_unlock(mm); + mmput(mm); + } +} + +/** + * ice_vdcm_vfio_device_reset - VFIO device reset + * @ivdm: pointer to VDCM + * + * Return 0 for success, negative for failure. + */ +static long ice_vdcm_vfio_device_reset(struct ice_vdcm *ivdm) +{ + ice_vdcm_zap(ivdm); + return ivdm->adi->reset(ivdm->adi); +} + +/** + * ice_vdcm_ioctl - IOCTL function entry + * @mdev: emulated device instance pointer + * @cmd: pre defined ioctls + * @arg: cmd arguments + * + * This function is called when VFIO consumer (like QEMU) wants to config + * emulated device. + * Return 0 for success, negative for failure. + */ +static long +ice_vdcm_ioctl(struct mdev_device *mdev, unsigned int cmd, unsigned long arg) +{ + struct ice_vdcm *ivdm = mdev_get_drvdata(mdev); + + switch (cmd) { + case VFIO_DEVICE_GET_INFO: + return ice_vdcm_vfio_device_get_info(ivdm, arg); + case VFIO_DEVICE_GET_REGION_INFO: + return ice_vdcm_vfio_device_get_region_info(ivdm, arg); + case VFIO_DEVICE_GET_IRQ_INFO: + return ice_vdcm_vfio_device_get_irq_info(ivdm, arg); + case VFIO_DEVICE_SET_IRQS: + return ice_vdcm_vfio_device_set_irqs(ivdm, arg); + case VFIO_DEVICE_RESET: + return ice_vdcm_vfio_device_reset(ivdm); + default: + break; + } + + return -ENOTTY; +} + +/** + * ice_vdcm_get_pasid - Get PASID value + * @ivdm: pointer to VDCM + * + * Return valid PASID value on success, negative for failure. + */ +static int ice_vdcm_get_pasid(struct ice_vdcm *ivdm) +{ + struct device *dev = ivdm->dev; + struct vfio_group *vfio_group; + struct iommu_domain *domain; + int pasid; + + /** + * vfio_group_get_external_user_from_dev() will increase kobj ref + * counter, so vfio_group should be cached to be passed to + * vfio_group_put_external_user() to decrease kobj ref counter. + */ + vfio_group = vfio_group_get_external_user_from_dev(dev); + if (IS_ERR_OR_NULL(vfio_group)) + return -EFAULT; + + domain = vfio_group_iommu_domain(vfio_group); + if (IS_ERR_OR_NULL(domain)) { + vfio_group_put_external_user(vfio_group); + return -EFAULT; + } + + pasid = iommu_aux_get_pasid(domain, ivdm->parent_dev); + if (pasid < 0) { + vfio_group_put_external_user(vfio_group); + return -EFAULT; + } + + ivdm->vfio_group = vfio_group; + + return pasid; +} + +/** + * ice_vdcm_open_device - open emulated device + * @mdev: emulated device instance pointer + * + * This function is called when VFIO consumer (like QEMU) wants to open + * emulated device. + * Return 0 for success, negative for failure. + */ +static int ice_vdcm_open_device(struct mdev_device *mdev) +{ + struct ice_vdcm *ivdm = mdev_get_drvdata(mdev); + int pasid; + int ret; + + pasid = ice_vdcm_get_pasid(ivdm); + if (pasid < 0) + return pasid; + + ret = ivdm->adi->cfg_pasid(ivdm->adi, pasid, true); + if (ret) { + vfio_group_put_external_user(ivdm->vfio_group); + return ret; + } + + return 0; +} + +/** + * ice_vdcm_close_device - close a mediated device + * @mdev: emulated device instance pointer + * + * This function is called when VFIO consumer (like QEMU) wants to close + * emulated device. + */ +static void ice_vdcm_close_device(struct mdev_device *mdev) +{ + struct ice_vdcm *ivdm = mdev_get_drvdata(mdev); + + if (ivdm->irq_type < VFIO_PCI_NUM_IRQS) + ice_vdcm_msix_disable(ivdm); + + ivdm->adi->cfg_pasid(ivdm->adi, 0, false); + ivdm->adi->close(ivdm->adi); + + vfio_group_put_external_user(ivdm->vfio_group); +} + +/** + * name_show - SYSFS show function + * @kobj: kernel object pointer + * @dev: linux device pointer + * @buf: return buffer + * + * This function is called when SYSFS file entry is read by user + * Return number of read bytes. + */ +static ssize_t +#ifdef HAVE_KOBJ_IN_MDEV_PARENT_OPS_CREATE +name_show(struct kobject *kobj, struct device *dev, char *buf) +#else +name_show(struct mdev_type *mtype, + struct mdev_type_attribute *attr, char *buf) +#endif +{ +#ifndef HAVE_KOBJ_IN_MDEV_PARENT_OPS_CREATE + struct device *dev = mtype_get_parent_dev(mtype); +#endif /* !HAVE_KOBJ_IN_MDEV_PARENT_OPS_CREATE */ + return sprintf(buf, "%s\n", dev_name(dev)); +} +static MDEV_TYPE_ATTR_RO(name); + +/** + * available_instances_show - SYSFS show function + * @kobj: kernel object pointer + * @dev: device pointer + * @buf: return buffer + * + * This function is called when SYSFS file entry is read by user + * Return number of read bytes. + */ +static ssize_t +#ifdef HAVE_KOBJ_IN_MDEV_PARENT_OPS_CREATE +available_instances_show(struct kobject *kobj, struct device *dev, char *buf) +#else +available_instances_show(struct mdev_type *mtype, + struct mdev_type_attribute *attr, char *buf) +#endif +{ + return sprintf(buf, "ivdcm\n"); +} +static MDEV_TYPE_ATTR_RO(available_instances); + +/** + * device_api_show - SYSFS show function + * @kobj: kernel object pointer + * @dev: device pointer + * @buf: return buffer + * + * This function is called when SYSFS file entry is read by user + * Return number of read bytes. + */ +static ssize_t +#ifdef HAVE_KOBJ_IN_MDEV_PARENT_OPS_CREATE +device_api_show(struct kobject *kobj, struct device *dev, char *buf) +#else +device_api_show(struct mdev_type *mtype, + struct mdev_type_attribute *attr, char *buf) +#endif +{ + return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING); +} +static MDEV_TYPE_ATTR_RO(device_api); + +static struct attribute *ice_vdcm_types_attrs[] = { + &mdev_type_attr_name.attr, + &mdev_type_attr_device_api.attr, + &mdev_type_attr_available_instances.attr, + NULL, +}; + +static struct attribute_group ice_vdcm_type_group0 = { + .name = "vdcm", + .attrs = ice_vdcm_types_attrs, +}; + +static struct attribute_group *ice_vdcm_type_groups[] = { + &ice_vdcm_type_group0, + NULL, +}; + +static const struct mdev_parent_ops ice_vdcm_parent_ops = { + .supported_type_groups = ice_vdcm_type_groups, + .create = ice_vdcm_create, + .remove = ice_vdcm_remove, +#ifdef HAVE_DEVICE_IN_MDEV_PARENT_OPS + .open_device = ice_vdcm_open_device, + .close_device = ice_vdcm_close_device, +#else + .open = ice_vdcm_open_device, + .release = ice_vdcm_close_device, +#endif + .read = ice_vdcm_read, + .write = ice_vdcm_write, + .ioctl = ice_vdcm_ioctl, + .mmap = ice_vdcm_mmap, +}; + +/* + * ice_vdcm_init - VDCM initialization routine + * @pdev: the parent pci device + * + * Return 0 for success, negative for failure. + */ +int ice_vdcm_init(struct pci_dev *pdev) +{ + int err; + + err = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_AUX); + if (err) { + dev_err(&pdev->dev, "Failed to enable aux-domain: %d", err); + return err; + } + + err = mdev_register_device(&pdev->dev, &ice_vdcm_parent_ops); + if (err) { + dev_err(&pdev->dev, "S-IOV device register failed, err %d", + err); + iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_AUX); + return err; + } + + return 0; +} + +/* + * ice_vdcm_deinit - VDCM deinitialization routine + * @pdev: the parent pci device + */ +void ice_vdcm_deinit(struct pci_dev *pdev) +{ + mdev_unregister_device(&pdev->dev); + iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_AUX); +} diff --git a/drivers/thirdparty/ice/ice_vdcm.h b/drivers/thirdparty/ice/ice_vdcm.h new file mode 100644 index 000000000000..96bafbc2453b --- /dev/null +++ b/drivers/thirdparty/ice/ice_vdcm.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_VDCM_H_ +#define _ICE_VDCM_H_ + +#include "ice.h" +#include +#if IS_ENABLED(CONFIG_VFIO_MDEV) +#include +#endif /* CONFIG_VFIO_MDEV */ +#include +#include +#if IS_ENABLED(CONFIG_IRQ_BYPASS_MANAGER) +#include +#endif /* CONFIG_IRQ_BYPASS_MANAGER */ + +#define ICE_VDCM_CFG_SIZE 256 +#define ICE_VDCM_BAR0_SIZE SZ_64M + +struct ice_vdcm_irq_ctx { + struct eventfd_ctx *trigger; + char *name; + unsigned int irq; +#if IS_ENABLED(CONFIG_IRQ_BYPASS_MANAGER) + struct irq_bypass_producer producer; +#endif /* CONFIG_IRQ_BYPASS_MANAGER */ +}; + +/** + * struct ice_vdcm - The abstraction for VDCM + * + * @dev: linux device for this VDCM + * @parent_dev: linux parent device for this VDCM + * @vfio_group: vfio group for this device + * @pci_cfg_space: PCI configuration space buffer + * @vma_lock: protects access to vma_list + * @vma_list: linked list for VMA + * @ctx: IRQ context + * @num_ctx: number of requested IRQ context + * @irq_type: IRQ type + * @adi: ADI attribute + */ +struct ice_vdcm { + /* Common attribute */ + struct device *dev; + struct device *parent_dev; + struct vfio_group *vfio_group; + + u8 pci_cfg_space[ICE_VDCM_CFG_SIZE]; + struct mutex vma_lock; /* protects access to vma_list */ + struct list_head vma_list; + + /* IRQ context */ + struct ice_vdcm_irq_ctx *ctx; + unsigned int num_ctx; + unsigned int irq_type; + + /* Device Specific */ + struct ice_adi *adi; +}; + +/** + * struct ice_adi - Assignable Device Interface attribute + * + * This structure defines the device specific resource and callbacks + * + * It is expected to be embedded in a private container structure allocated by + * the driver. Use container_of to get the private structure pointer back from + * a pointer to the ice_adi structure. + * + * @get_vector_num: get number of vectors assigned to this ADI + * @get_vector_irq: get OS IRQ number per vector + * @reset: This function is called when VDCM wants to reset ADI + * @cfg_pasid: This function is called when VDCM wants to configure ADI's PASID + * @close: This function is called when VDCM wants to close ADI + * @read_reg32: This function is called when VDCM wants to read ADI register + * @write_reg32: This function is called when VDCM wants to write ADI register + * @get_sparse_mmap_hpa: This function is called when VDCM wants to get ADI HPA + * @get_sparse_mmap_num: This function is called when VDCM wants to get + * the number of sparse memory areas + * @get_sparse_mmap_area: This function is called when VDCM wants to get + * layout of sparse memory + */ +struct ice_adi { + int (*get_vector_num)(struct ice_adi *adi); + int (*get_vector_irq)(struct ice_adi *adi, u32 vector); + int (*reset)(struct ice_adi *adi); + int (*cfg_pasid)(struct ice_adi *adi, u32 pasid, bool ena); + int (*close)(struct ice_adi *adi); + u32 (*read_reg32)(struct ice_adi *adi, size_t offs); + void (*write_reg32)(struct ice_adi *adi, size_t offs, u32 val); + int (*get_sparse_mmap_hpa)(struct ice_adi *adi, u32 index, u64 pg_off, + u64 *addr); + int (*get_sparse_mmap_num)(struct ice_adi *adi); + int (*get_sparse_mmap_area)(struct ice_adi *adi, int index, + u64 *offset, u64 *size); +}; + +#if IS_ENABLED(CONFIG_VFIO_MDEV) && defined(HAVE_PASID_SUPPORT) +struct ice_adi *ice_vdcm_alloc_adi(struct device *dev, void *token); +void ice_vdcm_free_adi(struct ice_adi *adi); +void ice_vdcm_pre_rebuild_irqctx(void *token); +int ice_vdcm_rebuild_irqctx(void *token); +int ice_vdcm_zap(void *token); +int ice_vdcm_init(struct pci_dev *pdev); +void ice_vdcm_deinit(struct pci_dev *pdev); +#else +static inline int ice_vdcm_init(struct pci_dev *pdev) +{ + return 0; +} + +static inline void ice_vdcm_deinit(struct pci_dev *pdev) { } +#endif /* CONFIG_VFIO_MDEV && HAVE_PASID_SUPPORT */ + +#endif /* _ICE_VDCM_H_ */ diff --git a/drivers/thirdparty/ice/ice_vf_adq.c b/drivers/thirdparty/ice/ice_vf_adq.c new file mode 100644 index 000000000000..b023787a5f01 --- /dev/null +++ b/drivers/thirdparty/ice/ice_vf_adq.c @@ -0,0 +1,1469 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#include "ice.h" +#include "ice_lib.h" +#include "ice_vf_adq.h" +#include "ice_tc_lib.h" +#include "ice_vf_lib_private.h" + +struct ice_vsi *ice_get_vf_adq_vsi(struct ice_vf *vf, u8 tc) +{ + return vf->pf->vsi[vf->ch[tc].vsi_idx]; +} + +/** + * ice_is_vf_adq_ena - is VF ADQ enabled + * @vf: pointer to the VF info + * + * This function returns true if VF ADQ is enabled. It is must to check + * VF's num_tc as well, it must be more than ICE_VF_CHNL_START_TC for + * valid ADQ configuration + */ +bool ice_is_vf_adq_ena(struct ice_vf *vf) +{ + return vf->adq_enabled && (vf->num_tc > ICE_VF_CHNL_START_TC); +} + +/** + * ice_vf_adq_vsi_stop_rings - stops the VF ADQ VSI rings + * @vf: pointer to the VF info + * @tc: VF ADQ TC number + * + * This function stops Tx and Rx ring specific to VF ADQ VSI + */ +static void ice_vf_adq_vsi_stop_rings(struct ice_vf *vf, int tc) +{ + struct ice_vsi *vsi = ice_get_vf_adq_vsi(vf, tc); + + if (!vsi) + return; + ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); + ice_vsi_stop_all_rx_rings(vsi); +} + +/** + * ice_vf_adq_vsi_disable_txqs - disable Tx queues for VF ADQ + * @vf: pointer to the VF info + * @tc: VF ADQ TC number + * + * This function disabled Tx queues specific to VF ADQ VSI + */ +static void ice_vf_adq_vsi_disable_txqs(struct ice_vf *vf, int tc) +{ + struct ice_vsi *vsi = ice_get_vf_adq_vsi(vf, tc); + + if (!vsi) + return; + ice_dis_vsi_txq(vsi->port_info, vf->ch[tc].vsi_idx, 0, 0, NULL, NULL, + NULL, vf->vf_ops->reset_type, vf->vf_id, NULL); +} + +/** + * ice_vf_adq_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access + * @vf: VF that ADQ VSI is being invalidated on + * @tc: TC used to access channel specific vsi_idx/vsi_num + */ +static void ice_vf_adq_invalidate_vsi(struct ice_vf *vf, u8 tc) +{ + vf->ch[tc].vsi_idx = ICE_NO_VSI; + vf->ch[tc].vsi_num = ICE_NO_VSI; +} + +/** + * ice_vf_adq_vsi_valid - is ADQ VSI valid? + * @vf: VF that ADQ VSI is being validated + * @tc: TC used to access channel specific vsi_idx/vsi_num + * + * vsi_idx must be non-zero, and vsi_idx and vsi_num must not be ICE_NO_VSI + */ +bool ice_vf_adq_vsi_valid(struct ice_vf *vf, u8 tc) +{ + return (vf->ch[tc].vsi_idx && vf->ch[tc].vsi_idx != ICE_NO_VSI && + vf->ch[tc].vsi_num != ICE_NO_VSI); +} + +/** + * ice_vf_adq_vsi_release - release VF ADQ VSI resources + * @vf: VF that ADQ VSI is being released on + * @tc: TC used to access channel specific VSI + * + * This function stops Tx and Rx queues if specified, disables Tx queues if + * specified, releases VSI resources, and invalidates it + * + */ +static void ice_vf_adq_vsi_release(struct ice_vf *vf, u8 tc) +{ + ice_vsi_release(ice_get_vf_adq_vsi(vf, tc)); + ice_vf_adq_invalidate_vsi(vf, tc); +} + +/** + * ice_vf_adq_cfg_cleanup - invalidate the VF's channel software info + * @vf: VF that ADQ VSI is being released on + * @tc: TC used to access channel specific VSI + * + * This function invalidates software data structures specific to channel + * such as num_qps, tx_rate, etc... This is called from places like: + * when ADQ VSI is released either from rebuild path "ice_vf_adq_release" + * or during rebuild ADQ config if failed to create/setup VF ADQ VSIs + */ +static void ice_vf_adq_cfg_cleanup(struct ice_vf *vf, u8 tc) +{ + vf->ch[tc].num_qps = 0; + vf->ch[tc].offset = 0; + vf->ch[tc].max_tx_rate = 0; +} + +#ifdef HAVE_TC_SETUP_CLSFLOWER +/** + * ice_del_all_adv_switch_fltr + * @vf: pointer to the VF info + * + * This function deletes all advanced switch filters specific to the VF and + * releases filter memory and updates all book-keeping. This function to be + * used when delete channel message is received before deleting channel VSIs + */ +void ice_del_all_adv_switch_fltr(struct ice_vf *vf) +{ + struct ice_rule_query_data rule; + struct ice_tc_flower_fltr *f; + struct ice_pf *pf = vf->pf; + struct hlist_node *node; + struct device *dev; + int err; + + dev = ice_pf_to_dev(pf); + hlist_for_each_entry_safe(f, node, &vf->tc_flower_fltr_list, + tc_flower_node) { + if (!f->dest_vsi) + continue; + + /* Deleting TC filter */ + rule.rid = f->rid; + rule.rule_id = f->rule_id; + rule.vsi_handle = f->dest_vsi_handle; + err = ice_rem_adv_rule_by_id(&pf->hw, &rule); + if (err) { + if (err == -ENOENT) + dev_dbg(dev, "VF %d: filter (rule_id %u) for dest VSI %u DOES NOT EXIST in hw table\n", + vf->vf_id, f->rule_id, + f->dest_vsi_handle); + else + dev_err(dev, "VF %d: Failed to delete switch filter for VSI handle %u, err %d\n", + vf->vf_id, f->dest_vsi_handle, err); + } + + /* book-keeping and update filter type if filter count + * reached zero + */ + f->dest_vsi->num_chnl_fltr--; + hlist_del(&f->tc_flower_node); + devm_kfree(dev, f); + vf->num_dmac_chnl_fltrs--; + } +} +#endif /* HAVE_TC_SETUP_CLSFLOWER */ + +/** + * ice_vf_adq_release - perform VF ADQ resource cleanup only + * @vf: pointer to the VF structure + * + * Delete all VF ADQ filters, release VF ADQ VSIs, cleanup internal data + * structues which keeps track of per TC infor including TC0. This function + * is invoked only when VFLR based VF Reset. + */ +void ice_vf_adq_release(struct ice_vf *vf) +{ + u8 tc; + + /* no ADQ configured, nothing to do */ + if (!ice_is_vf_adq_ena(vf)) + return; + +#ifdef HAVE_TC_SETUP_CLSFLOWER + /* release VF ADQ specific filters and eventually VF driver + * will trigger replay of VF ADQ filters as needed, just like + * other MAC, VLAN filters + */ + ice_del_all_adv_switch_fltr(vf); +#endif + + for (tc = ICE_VF_CHNL_START_TC; tc < vf->num_tc; tc++) { + if (!ice_vf_adq_vsi_valid(vf, tc)) + continue; + /* Tx queues are disabled before VF reset is scheduled as part + * of VFLR flow. Disabling TX queues again causes error + * such as EINVAL from admin command because underlying + * scheduler configs are cleared as part of disabling once + */ + if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) + ice_vf_adq_vsi_stop_rings(vf, tc); + ice_vf_adq_vsi_release(vf, tc); + /* clear per TC info to avoid stale information such as + * num_qps, tx_rate, etc... + */ + ice_vf_adq_cfg_cleanup(vf, tc); + } + + /* to avoid rebuilding of VF ADQ VSIs by mistake */ + vf->adq_enabled = false; + vf->num_tc = 0; + + /* main VF VSI should be built with default, hence clear related + * data structures otherwise vf->ch[0].num_qps and tx_rate will + * still have stale information as stored from "add channel" + * virtchnl message + */ + ice_vf_adq_cfg_cleanup(vf, 0); +} + +/** + * ice_vf_adq_vsi_setup - Set up a VF channel VSI + * @vf: VF to setup VSI for + * @tc: TC to setup the channel VSI for + */ +static struct ice_vsi *ice_vf_adq_vsi_setup(struct ice_vf *vf, u8 tc) +{ + struct ice_port_info *pi = ice_vf_get_port_info(vf); + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + + vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf, NULL, tc); + if (!vsi) { + dev_err(ice_pf_to_dev(pf), "Failed to create VF ADQ VSI for TC %d\n", + tc); + ice_vf_adq_invalidate_vsi(vf, tc); + return NULL; + } + + vf->ch[tc].vsi_idx = vsi->idx; + vf->ch[tc].vsi_num = vsi->vsi_num; + + return vsi; +} + +/** + * ice_vf_rebuild_adq_port_vlan_cfg - set the port VLAN for VF ADQ VSIs + * @vf: VF to add MAC filters for + * + * Called after a VF ADQ VSI has been re-added/rebuilt during reset. + */ +static int ice_vf_rebuild_adq_port_vlan_cfg(struct ice_vf *vf) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + int err, tc; + + for (tc = ICE_VF_CHNL_START_TC; tc < vf->num_tc; tc++) { + struct ice_vsi *vsi; + + if (!ice_vf_adq_vsi_valid(vf, tc)) + continue; + + vsi = ice_get_vf_adq_vsi(vf, tc); + err = ice_vf_rebuild_host_vlan_cfg(vf, vsi); + if (err) { + ice_dev_err_errno(dev, err, + "failed to configure port VLAN via VSI parameters for VF %u, ADQ VSI(num %u)", + vf->vf_id, vsi->vsi_num); + return err; + } + } + return 0; +} + +/** + * ice_vf_rebuild_adq_spoofchk_cfg - set the spoofchk config for VF ADQ VSIs + * @vf: VF to set spoofchk for + * + * Called after a VF ADQ VSI has been re-added/rebuilt during reset. + */ +static int ice_vf_rebuild_adq_spoofchk_cfg(struct ice_vf *vf) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + int err, tc; + + for (tc = ICE_VF_CHNL_START_TC; tc < vf->num_tc; tc++) { + struct ice_vsi *vsi; + + if (!ice_vf_adq_vsi_valid(vf, tc)) + continue; + + vsi = ice_get_vf_adq_vsi(vf, tc); + err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk); + if (err) { + ice_dev_err_errno(dev, err, + "failed to configure spoofchk via VSI parameters for VF %u, ADQ VSI(num %u)", + vf->vf_id, vsi->vsi_num); + return err; + } + } + return 0; +} + +/** + * ice_vf_rebuild_adq_aggregator_node - move ADQ VSIs into aggregator node + * @vf: VF to rebuild ADQ VSI(s) Tx rate configuration on + * + * If VF ADQ is enabled, replay scheduler aggregator node config + */ +static void ice_vf_rebuild_adq_aggregator_node(struct ice_vf *vf) +{ + int tc; + + if (!ice_is_vf_adq_ena(vf)) + return; + + for (tc = ICE_VF_CHNL_START_TC; tc < vf->num_tc; tc++) { + struct ice_vsi *vsi; + + if (!ice_vf_adq_vsi_valid(vf, tc)) + continue; + vsi = ice_get_vf_adq_vsi(vf, tc); + ice_vf_rebuild_aggregator_node_cfg(vsi); + } +} + +/** + * ice_vf_rebuild_adq_tx_rate_cfg - rebuild ADQ VSI(s) Tx rate configuration + * @vf: VF to rebuild ADQ VSI(s) Tx rate configuration on + */ +static void ice_vf_rebuild_adq_tx_rate_cfg(struct ice_vf *vf) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vsi *vsi; + u64 max_tx_rate; + u8 tc; + + if (!ice_is_vf_adq_ena(vf)) + return; + + /* Host may have set Tx rate for VF, but use the TC0's specified + * max Tx rate for main VF VSI. + * Iterate thru' all VSI (hence for loop starts with zero) shared by + * given VF and set the BW limit if specified as part of + * VF ADQ TC config + */ + for (tc = 0; tc < vf->num_tc; tc++) { + if (!ice_vf_adq_vsi_valid(vf, tc)) + continue; + + max_tx_rate = vf->ch[tc].max_tx_rate; + if (!max_tx_rate) + continue; + + if (!tc && vf->max_tx_rate) + dev_dbg(dev, "Host managed VF rate limit %u for VF %d are being changed to %llu\n", + vf->max_tx_rate, vf->vf_id, max_tx_rate); + + vsi = ice_get_vf_adq_vsi(vf, tc); + if (ice_set_max_bw_limit(vsi, max_tx_rate * 1000)) + dev_err(dev, "Unable to set Tx rate %llu in Mbps for VF %u TC %d\n", + max_tx_rate, vf->vf_id, tc); + } +} + +/** + * ice_vf_rebuild_adq_host_cfg - host admin config is persistent across reset + * @vf: VF to rebuild ADQ host configuration on + */ +void ice_vf_rebuild_adq_host_cfg(struct ice_vf *vf) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + + ice_vf_rebuild_adq_aggregator_node(vf); + ice_vf_rebuild_adq_tx_rate_cfg(vf); + if (ice_vf_rebuild_adq_port_vlan_cfg(vf)) + dev_err(dev, "failed to rebuild port VLAN configuration for ADQ enabled VF %u\n", + vf->vf_id); + if (ice_vf_rebuild_adq_spoofchk_cfg(vf)) + dev_err(dev, "failed to rebuild spoofchk configuration for ADQ enabled VF %u\n", + vf->vf_id); +} + +/** + * ice_vf_recreate_adq_vsi - release and recreate each ADQ VSI + * @vf: VF to re-apply ADQ configuration for + * + * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF + * configuration change, etc.). + * + * This cannot be called for the reset all VFs case as ice_vf_adq_vsi_release() + * will fail because there are no VF VSI(s) in firmware at this point. + */ +int ice_vf_recreate_adq_vsi(struct ice_vf *vf) +{ + u8 tc; + + if (!ice_is_vf_adq_ena(vf)) + return 0; + + for (tc = ICE_VF_CHNL_START_TC; tc < vf->num_tc; tc++) { + if (ice_vf_adq_vsi_valid(vf, tc)) { + ice_vf_adq_vsi_stop_rings(vf, tc); + ice_vf_adq_vsi_disable_txqs(vf, tc); + ice_vf_adq_vsi_release(vf, tc); + } + + if (!ice_vf_adq_vsi_setup(vf, tc)) { + dev_err(ice_pf_to_dev(vf->pf), "failed to setup ADQ VSI for VF %u, TC %d, disabling VF ADQ VSI\n", + vf->vf_id, tc); + goto adq_cfg_failed; + } + } + + /* must to store away TC0's info because it is used later */ + vf->ch[0].vsi_idx = vf->lan_vsi_idx; + vf->ch[0].vsi_num = vf->lan_vsi_num; + + return 0; + +adq_cfg_failed: + /* perform VSI release for ADQ VSI if some of them were + * created successfully. + */ + for (tc = ICE_VF_CHNL_START_TC; tc < vf->num_tc; tc++) { + if (ice_vf_adq_vsi_valid(vf, tc)) { + ice_vf_adq_vsi_disable_txqs(vf, tc); + ice_vf_adq_vsi_release(vf, tc); + } + ice_vf_adq_cfg_cleanup(vf, tc); + } + vf->adq_enabled = false; + vf->num_tc = 0; + /* Upon failure also clean up tc=0 specific info from + * software data structs, to avoid having stale info + */ + ice_vf_adq_invalidate_vsi(vf, 0); + ice_vf_adq_cfg_cleanup(vf, 0); + return -ENOMEM; +} + +/** + * ice_vf_rebuild_adq_vsi - rebuild ADQ VSI(s) on the VF + * @vf: VF to rebuild ADQ VSI(s) on + */ +int ice_vf_rebuild_adq_vsi(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + int tc; + + /* no ADQ configured, nothing to do */ + if (!ice_is_vf_adq_ena(vf)) + return 0; + + for (tc = ICE_VF_CHNL_START_TC; tc < vf->num_tc; tc++) { + struct ice_vsi *vsi; + int ret; + + if (!ice_vf_adq_vsi_valid(vf, tc)) + continue; + + vsi = ice_get_vf_adq_vsi(vf, tc); + ret = ice_vsi_rebuild(vsi, true); + if (ret) { + dev_err(ice_pf_to_dev(pf), "failed to rebuild ADQ VSI for VF %u, disabling VF ADQ VSI\n", + vf->vf_id); + vf->adq_enabled = false; + ice_vf_adq_invalidate_vsi(vf, tc); + return ret; + } + + vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); + vf->ch[tc].vsi_num = vsi->vsi_num; + vf->ch[tc].vsi_idx = vsi->idx; + } + + /* must to store away TC0's info because it is use later */ + vf->ch[0].vsi_idx = vf->lan_vsi_idx; + vf->ch[0].vsi_num = vf->lan_vsi_num; + + return 0; +} + +/** + * ice_vf_get_tc_based_qid - get the updated QID based on offset + * @qid: queue ID + * @offset : TC specific queue offset + * + * This function returns updated queueID based on offset. This is + * meant to be used only with VF ADQ. Queue ID will always be + * 0-based from the specified offset + */ +u16 ice_vf_get_tc_based_qid(u16 qid, u16 offset) +{ + return (qid >= offset) ? (qid - offset) : qid; +} + +/** + * ice_vf_q_id_get_vsi_q_id + * @vf: pointer to the VF info + * @vf_q_id: VF relative queue ID + * @t_tc: traffic class for indexing the VSIs + * @vqs: the VFs virtual queue selection + * @vsi_p: pointer to VSI pointer, which changes based on TC for ADQ + * @vsi_id: VSI ID specific to desired queue ID + * @q_id: queue ID of the VSI + * + * provides ADQ queue enablement support by mapping the VF queue ID and TC to + * VSI ID and queue ID. call while iterating through VF queue IDs, VF VSIs and + * TCs. + */ +void ice_vf_q_id_get_vsi_q_id(struct ice_vf *vf, u16 vf_q_id, u16 *t_tc, + struct virtchnl_queue_select *vqs, + struct ice_vsi **vsi_p, u16 *vsi_id, + u16 *q_id) +{ + struct ice_vsi *vsi = *vsi_p; + u32 max_chnl_tc; + u16 tc = *t_tc; + + max_chnl_tc = ice_vc_get_max_chnl_tc_allowed(vf); + + /* Update the VSI and TC based on per TC queue region and offset */ + if (tc + 1U < max_chnl_tc && vf_q_id == vf->ch[tc + 1].offset && + tc < vf->num_tc && ice_is_vf_adq_ena(vf)) { + vsi = vf->pf->vsi[vf->ch[tc + 1].vsi_idx]; + tc++; + } + + /* Update vsi_id and queue_id based on TC if TC is VF ADQ TC, then + * use VF ADQ VSI otherwise main VF VSI + */ + if (tc >= ICE_VF_CHNL_START_TC && ice_is_vf_adq_ena(vf)) { + *vsi_id = vsi->vsi_num; + *q_id = ice_vf_get_tc_based_qid(vf_q_id, vf->ch[tc].offset); + } else { + *vsi_id = vqs->vsi_id; + *q_id = vf_q_id; + } + + *vsi_p = vsi; + *t_tc = tc; +} + +#ifdef HAVE_TC_SETUP_CLSFLOWER +/** + * ice_validate_cloud_filter + * @vf: pointer to the VF info + * @tc_filter: pointer to virtchnl_filter + * + * This function validates cloud filter programmed as TC filter for ADQ + */ +static int +ice_validate_cloud_filter(struct ice_vf *vf, struct virtchnl_filter *tc_filter) +{ + struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec; + struct virtchnl_l4_spec data = tc_filter->data.tcp_spec; + struct ice_pf *pf = vf->pf; + struct device *dev; + + dev = ice_pf_to_dev(pf); + if (!tc_filter->action) { + dev_err(dev, "VF %d: Currently ADQ doesn't support Drop Action\n", + vf->vf_id); + return -EOPNOTSUPP; + } + + /* Check filter if it's programmed for advanced mode or basic mode. + * There are two ADQ modes (for VF only), + * 1. Basic mode: intended to allow as many filter options as possible + * to be added to a VF in Non-trusted mode. Main goal is + * to add filters to its own MAC and VLAN ID. + * 2. Advanced mode: is for allowing filters to be applied other than + * its own MAC or VLAN. This mode requires the VF to be + * Trusted. + */ + if (mask.dst_mac[0] && !mask.dst_ip[0]) { + /* As of now supporting, MAC filter if MAC address is the + * default LAN addr for this VF + */ + if (!ice_mac_fltr_exist(&pf->hw, data.dst_mac, + vf->lan_vsi_idx)) { + dev_err(dev, "Destination MAC %pM doesn't belong to VF %d\n", + data.dst_mac, vf->vf_id); + return -EINVAL; + } + } else if (!ice_is_vf_trusted(vf)) { + /* Check if VF is trusted */ + dev_err(dev, "VF %d not trusted, make VF trusted to add ADQ filters\n", + vf->vf_id); + return -EOPNOTSUPP; + } + + if (mask.dst_mac[0] & data.dst_mac[0]) { + if (is_broadcast_ether_addr(data.dst_mac) || + is_zero_ether_addr(data.dst_mac)) { + dev_err(dev, "VF %d: Invalid Dest MAC addr %pM\n", + vf->vf_id, data.dst_mac); + return -EINVAL; + } + } + + if (mask.src_mac[0] & data.src_mac[0]) { + if (is_broadcast_ether_addr(data.src_mac) || + is_zero_ether_addr(data.src_mac)) { + dev_err(dev, "VF %d: Invalid Source MAC addr %pM\n", + vf->vf_id, data.src_mac); + return -EINVAL; + } + } + + if (mask.dst_port & data.dst_port) { + if (!data.dst_port) { + dev_err(dev, "VF %d: Invalid Dest port\n", vf->vf_id); + return -EINVAL; + } + } + + if (mask.src_port & data.src_port) { + if (!data.src_port) { + dev_err(dev, "VF %d: Invalid Source port\n", vf->vf_id); + return -EINVAL; + } + } + + if (mask.vlan_id & data.vlan_id) { + if (ntohs(data.vlan_id) >= VLAN_N_VID) { + dev_err(dev, "VF %d: invalid VLAN ID\n", vf->vf_id); + return -EINVAL; + } + /* Validate VLAN for the VF the same way we do for the PF */ + if (!ice_vlan_fltr_exist(&pf->hw, ntohs(data.vlan_id), + vf->lan_vsi_idx)) { + dev_err(dev, "specified VLAN %u doesn't belong to this VF %d\n", + ntohs(data.vlan_id), vf->vf_id); + return -EINVAL; + } + } + + return 0; +} + +/** + * ice_get_tc_flower_fltr - locate the TC flower filter + * @vf: pointer to the VF info + * @fltr: pointer to the tc_flower filter + * @mask: ptr to filter mask (representing filter data specification) + * + * This function is used to locate specific filter in filter list. It returns + * NULL if unable to locate such filter otherwise returns found filter + */ +static struct ice_tc_flower_fltr * +ice_get_tc_flower_fltr(struct ice_vf *vf, struct ice_tc_flower_fltr *fltr, + struct virtchnl_l4_spec *mask) +{ + struct ice_tc_flower_lyr_2_4_hdrs *hdrs; + struct ice_tc_l2_hdr *l2_key; + struct ice_tc_l3_hdr *l3_key; + struct ice_tc_l4_hdr *l4_key; + struct ice_tc_flower_fltr *f; + struct hlist_node *node; + + hdrs = &fltr->outer_headers; + if (!hdrs) + return NULL; + + l2_key = &hdrs->l2_key; + l3_key = &hdrs->l3_key; + l4_key = &hdrs->l4_key; + + hlist_for_each_entry_safe(f, node, + &vf->tc_flower_fltr_list, tc_flower_node) { + struct ice_tc_flower_lyr_2_4_hdrs *f_hdrs; + + if (!f->dest_vsi || fltr->dest_vsi != f->dest_vsi || + fltr->dest_vsi->idx != f->dest_vsi->idx) + continue; + + f_hdrs = &f->outer_headers; + + /* handle L2 fields if specified and do not match */ + if ((mask->src_mac[0] && + !ether_addr_equal(l2_key->src_mac, + f_hdrs->l2_key.src_mac)) || + (mask->dst_mac[0] && + !ether_addr_equal(l2_key->dst_mac, + f_hdrs->l2_key.dst_mac))) + continue; + + /* handle VLAN if specified and do not match */ + if (mask->vlan_id && hdrs->vlan_hdr.vlan_id != + f_hdrs->vlan_hdr.vlan_id) + continue; + + /* handle L3 IPv4 if specified and do not match + * for ipv4 data to be valid, check only first dword of mask + */ + if (l2_key->n_proto == htons(ETH_P_IP)) + if ((mask->dst_ip[0] && + l3_key->dst_ipv4 != f_hdrs->l3_key.dst_ipv4) || + (mask->src_ip[0] && + l3_key->src_ipv4 != f_hdrs->l3_key.src_ipv4)) + continue; + + /* handle L3 IPv6 if specified and do not match + * for ipv6 to be valid, last dword from mask must be valid + * hence check only last dword of mask + */ + if (l2_key->n_proto == htons(ETH_P_IPV6) && mask->dst_ip[3]) + if (memcmp(&l3_key->ip.v6.dst_ip6, + &f_hdrs->l3_key.ip.v6.dst_ip6, + sizeof(l3_key->ip.v6.dst_ip6))) + continue; + if (l2_key->n_proto == htons(ETH_P_IPV6) && mask->src_ip[3]) + if (memcmp(&l3_key->ip.v6.src_ip6, + &f_hdrs->l3_key.ip.v6.src_ip6, + sizeof(l3_key->ip.v6.src_ip6))) + continue; + + /* make sure "ip_proto" is same */ + if (l3_key->ip_proto != f_hdrs->l3_key.ip_proto) + continue; + + /* handle L4 fields if specified and do not match */ + if ((mask->dst_port && + l4_key->dst_port != f_hdrs->l4_key.dst_port) || + (mask->src_port && + l4_key->src_port != f_hdrs->l4_key.src_port)) + continue; + + /* if reached here, means found matching filter entry */ + return f; + } + + return NULL; +} + +/** + * ice_vc_chnl_fltr_state_verify - verify general state of VF + * @vf: pointer to the VF info + * @vcf: pointer to virtchannel_filter + * + * This function performs general validation including validation of filter + * message and content + */ +static enum virtchnl_status_code +ice_vc_chnl_fltr_state_verify(struct ice_vf *vf, struct virtchnl_filter *vcf) +{ + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + u32 max_tc_allowed; + struct device *dev; + + dev = ice_pf_to_dev(pf); + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) + return VIRTCHNL_STATUS_ERR_PARAM; + + if (!ice_is_vf_adq_ena(vf)) { + dev_err(dev, "VF %d: ADQ is not enabled, can't apply switch filter\n", + vf->vf_id); + return VIRTCHNL_STATUS_ERR_PARAM; + } + + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + dev_err(dev, "VF %d: No corresponding VF VSI\n", vf->vf_id); + return VIRTCHNL_STATUS_ERR_PARAM; + } + + max_tc_allowed = ice_vc_get_max_chnl_tc_allowed(vf); + if (vcf->action == VIRTCHNL_ACTION_TC_REDIRECT && + vcf->action_meta >= max_tc_allowed) { + dev_err(dev, "VF %d: Err: action(%u)_meta(TC): %u >= max_tc_allowed (%u)\n", + vf->vf_id, vcf->action, vcf->action_meta, + max_tc_allowed); + return VIRTCHNL_STATUS_ERR_PARAM; + } + + /* enforce supported flow_type based on negotiated capability */ + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ_V2) { + if (!(vcf->flow_type == VIRTCHNL_TCP_V4_FLOW || + vcf->flow_type == VIRTCHNL_TCP_V6_FLOW || + vcf->flow_type == VIRTCHNL_UDP_V4_FLOW || + vcf->flow_type == VIRTCHNL_UDP_V6_FLOW)) { + dev_err(ice_pf_to_dev(pf), "VF %d: Invalid input/s, unsupported flow_type %u\n", + vf->vf_id, vcf->flow_type); + return VIRTCHNL_STATUS_ERR_PARAM; + } + } else { + if (!(vcf->flow_type == VIRTCHNL_TCP_V4_FLOW || + vcf->flow_type == VIRTCHNL_TCP_V6_FLOW)){ + dev_err(ice_pf_to_dev(pf), "VF %d: Invalid input/s, unsupported flow_type %u\n", + vf->vf_id, vcf->flow_type); + return VIRTCHNL_STATUS_ERR_PARAM; + } + } + + if (ice_validate_cloud_filter(vf, vcf)) { + dev_err(dev, "VF %d: Invalid input/s, can't apply switch filter\n", + vf->vf_id); + return VIRTCHNL_STATUS_ERR_PARAM; + } + + /* filter state fully verified, return SUCCESS */ + return VIRTCHNL_STATUS_SUCCESS; +} + +/** + * ice_setup_fltr - populate fields in TC flower filter structure + * @vf: ptr to VF + * @vcf: ptr to virt channel message + * @fltr: pointer to the TC filter structure + * @dest_vsi: pointer to destination VSI for filter + * @tc_class: TC number when action type to FWD_TO_VSI, counter index when + * action is count, queue number when action is FWD_TO_QUEUE, + * queue group ID when action is FWD_TO_QGRP + */ +static void +ice_setup_fltr(struct ice_vf *vf, struct ice_tc_flower_fltr *fltr, + struct virtchnl_filter *vcf, struct ice_vsi *dest_vsi, + int tc_class) +{ + struct virtchnl_l4_spec *mask = &vcf->mask.tcp_spec; + struct virtchnl_l4_spec *tcf = &vcf->data.tcp_spec; + struct ice_tc_flower_lyr_2_4_hdrs *hdrs; + + memset(fltr, 0, sizeof(*fltr)); + + hdrs = &fltr->outer_headers; + if (!hdrs) + return; + + /* copy L2 MAC address and MAC mask */ + ether_addr_copy(hdrs->l2_key.dst_mac, tcf->dst_mac); + ether_addr_copy(hdrs->l2_mask.dst_mac, mask->dst_mac); + if (!is_zero_ether_addr(hdrs->l2_key.dst_mac)) + fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC; + + /* copy L2 source address and MAC mask */ + ether_addr_copy(hdrs->l2_key.src_mac, tcf->src_mac); + ether_addr_copy(hdrs->l2_mask.src_mac, mask->src_mac); + if (!is_zero_ether_addr(hdrs->l2_key.src_mac)) + fltr->flags |= ICE_TC_FLWR_FIELD_SRC_MAC; + + /* copy VLAN info */ + hdrs->vlan_hdr.vlan_id = mask->vlan_id & tcf->vlan_id; + if (hdrs->vlan_hdr.vlan_id) + fltr->flags |= ICE_TC_FLWR_FIELD_VLAN; + + /* copy L4 fields */ + hdrs->l4_key.dst_port = mask->dst_port & tcf->dst_port; + hdrs->l4_mask.dst_port = mask->dst_port; + if (hdrs->l4_key.dst_port) + fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT; + + hdrs->l4_key.src_port = mask->src_port & tcf->src_port; + hdrs->l4_mask.src_port = mask->src_port; + if (hdrs->l4_key.src_port) + fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT; + + /* copy L3 fields, IPv4[6] */ + if (vcf->flow_type == VIRTCHNL_TCP_V4_FLOW || + vcf->flow_type == VIRTCHNL_UDP_V4_FLOW) { + struct ice_tc_l3_hdr *key, *msk; + + key = &hdrs->l3_key; + msk = &hdrs->l3_mask; + + /* set n_proto based on flow_type */ + hdrs->l2_key.n_proto = htons(ETH_P_IP); + if (mask->dst_ip[0] & tcf->dst_ip[0]) { + key->dst_ipv4 = tcf->dst_ip[0]; + msk->dst_ipv4 = mask->dst_ip[0]; + fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV4; + } + if (mask->src_ip[0] & tcf->src_ip[0]) { + key->src_ipv4 = tcf->src_ip[0]; + msk->src_ipv4 = mask->src_ip[0]; + fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV4; + } + } else if (vcf->flow_type == VIRTCHNL_TCP_V6_FLOW || + vcf->flow_type == VIRTCHNL_UDP_V6_FLOW) { + struct ice_tc_l3_hdr *key, *msk; + + key = &hdrs->l3_key; + msk = &hdrs->l3_mask; + + /* set n_proto based on flow_type */ + hdrs->l2_key.n_proto = htons(ETH_P_IPV6); + if (mask->dst_ip[3] & tcf->dst_ip[3]) { + memcpy(&key->ip.v6.dst_ip6, tcf->dst_ip, + sizeof(key->ip.v6.dst_ip6)); + memcpy(&msk->ip.v6.dst_ip6, mask->dst_ip, + sizeof(msk->ip.v6.dst_ip6)); + fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV6; + } + if (mask->src_ip[3] & tcf->src_ip[3]) { + memcpy(&key->ip.v6.src_ip6, tcf->src_ip, + sizeof(key->ip.v6.src_ip6)); + memcpy(&msk->ip.v6.src_ip6, mask->src_ip, + sizeof(msk->ip.v6.src_ip6)); + fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV6; + } + } + + /* get the VSI to which the TC belongs to */ + fltr->dest_vsi = dest_vsi; + if (vcf->action == VIRTCHNL_ACTION_TC_REDIRECT) + fltr->action.fltr_act = ICE_FWD_TO_VSI; + else + fltr->action.fltr_act = ICE_DROP_PACKET; + + /* make sure to include VF's MAC address when adding ADQ filter */ + if ((!(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC)) && + fltr->action.fltr_act == ICE_FWD_TO_VSI) { + fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC; + ether_addr_copy(hdrs->l2_key.dst_mac, vf->dev_lan_addr.addr); + eth_broadcast_addr(hdrs->l2_mask.dst_mac); + } + + /* 'tc_class' could be TC/QUEUE/QUEUE_GRP number */ + fltr->action.fwd.tc.tc_class = tc_class; + + /* must to set the tunnel_type to be INVALID, otherwise if left as zero, + * it gets treated as VxLAN tunnel since definition of VxLAN tunnel + * type is zero + */ + fltr->tunnel_type = TNL_LAST; + + /* set ip_proto in headers based on flow_type which is part of VIRTCHNL + * message, "add filter" + */ + if (vcf->flow_type == VIRTCHNL_TCP_V4_FLOW || + vcf->flow_type == VIRTCHNL_TCP_V6_FLOW) + hdrs->l3_key.ip_proto = IPPROTO_TCP; + else + hdrs->l3_key.ip_proto = IPPROTO_UDP; +} + +/** + * ice_vc_del_switch_filter + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * This function deletes a cloud filter programmed as TC filter for ADQ + */ +int ice_vc_del_switch_filter(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; + struct virtchnl_l4_spec *mask = &vcf->mask.tcp_spec; + struct ice_rule_query_data rule; + enum virtchnl_status_code v_ret; + struct ice_tc_flower_fltr fltr; + struct ice_tc_flower_fltr *f; + struct ice_pf *pf = vf->pf; + struct ice_vsi *dest_vsi; + struct device *dev; + int err; + + dev = ice_pf_to_dev(pf); + /* Advanced switch filters and DCF are mutually exclusive. */ + if (ice_is_dcf_enabled(pf)) { + dev_err(dev, "Device Control Functionality is currently enabled. Advanced switch filters cannot be deleted.\n"); + v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; + goto err; + } + + v_ret = ice_vc_chnl_fltr_state_verify(vf, vcf); + if (v_ret) { + dev_err(dev, "VF %d: failed to verify ADQ state during filter message processing\n", + vf->vf_id); + goto err; + } + + dest_vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; + + /* prepare the TC flower filter based on input */ + ice_setup_fltr(vf, &fltr, vcf, dest_vsi, vcf->action_meta); + + /* locate the filter in VF tc_flower filter list */ + f = ice_get_tc_flower_fltr(vf, &fltr, mask); + if (!f) { + dev_err(dev, "VF %d: Invalid input/s, unable to locate filter due to mismatch\n", + vf->vf_id); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + /* Deleting TC filter */ + rule.rid = f->rid; + rule.rule_id = f->rule_id; + rule.vsi_handle = f->dest_vsi_handle; + err = ice_rem_adv_rule_by_id(&pf->hw, &rule); + if (err) { + dev_err(dev, "VF %d: Failed to delete switch filter for tc %u, err %d\n", + vf->vf_id, vcf->action_meta, err); + v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; + goto err; + } + + /* book-keeping and update filter type if filter count reached zero */ + dest_vsi->num_chnl_fltr--; + + hlist_del(&f->tc_flower_node); + devm_kfree(dev, f); + if (f->flags & ICE_TC_FLWR_FIELD_DST_MAC) + vf->num_dmac_chnl_fltrs--; + v_ret = VIRTCHNL_STATUS_SUCCESS; +err: + /* send the response back to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER, v_ret, + NULL, 0); +} + +/** + * ice_vc_add_switch_filter + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * This function adds a switch filter programmed as TC filter for ADQ + * + * General info about filtering mode: + * VF ADQ has two different modes when it comes to applying the switch + * filters + * 1. basic mode: only dst MAC and dst VLAN filters supported + * 2. advanced mode: all combination of filters including dst MAC and + * dst VLAN ex: + * a. dst IP + dst PORT + * b. dst MAC + src PORT + * c. dst MAC + dst PORT + * basic mode is for 'untrusted VFs' and advanced mode is only for + * 'trusted VFs'. When a VF is toggled from being 'trusted' to + * 'untrusted' we remove all filters irrespective if it's basic or + * advanced. + * when ADQ is enabled we need to do ice_down irrespective if VF is + * 'trusted' or not and delete switch filters only if a 'trusted' VF + * is made 'untrusted'. + */ +int ice_vc_add_switch_filter(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; + struct ice_tc_flower_fltr *fltr = NULL; + enum virtchnl_status_code v_ret; + struct ice_vsi *dest_vsi, *vsi; + struct ice_pf *pf = vf->pf; + struct device *dev; + int ret; + + dev = ice_pf_to_dev(pf); + /* Advanced switch filters and DCF are mutually exclusive. */ + if (ice_is_dcf_enabled(pf)) { + dev_err(dev, "Device Control Functionality is currently enabled. Advanced switch filters cannot be added\n"); + v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; + goto err; + } + + v_ret = ice_vc_chnl_fltr_state_verify(vf, vcf); + if (v_ret) { + dev_err(dev, "VF %d: failed to verify ADQ state during filter message processing\n", + vf->vf_id); + goto err; + } + + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + dev_err(dev, "VF %d: No corresponding VF VSI\n", vf->vf_id); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + dest_vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; + + fltr = devm_kzalloc(dev, sizeof(*fltr), GFP_KERNEL); + if (!fltr) { + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + goto err; + } + + /* prepare the TC flower filter based on input */ + ice_setup_fltr(vf, fltr, vcf, dest_vsi, vcf->action_meta); + + /* call function which adds advanced switch filter */ + ret = ice_add_tc_flower_adv_fltr(vsi, fltr); + if (ret) { + dev_err(dev, "Failed to add TC Flower filter using advance filter recipe\n"); + v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; + devm_kfree(dev, fltr); + goto err; + } + + INIT_HLIST_NODE(&fltr->tc_flower_node); + hlist_add_head(&fltr->tc_flower_node, &vf->tc_flower_fltr_list); + if (fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC) + vf->num_dmac_chnl_fltrs++; + + v_ret = VIRTCHNL_STATUS_SUCCESS; + vf->adq_fltr_ena = true; + +err: + /* send the response back to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER, v_ret, + NULL, 0); +} + +/** + * ice_conv_virtchnl_speed_to_mbps + * @virt_speed: virt speed that needs to be converted from + * + * convert virt channel speeds to mbps, return link speed on success, + * '0' otherwise + */ +static u32 ice_conv_virtchnl_speed_to_mbps(u16 virt_speed) +{ + u32 speed, link_speed; + + speed = ice_conv_link_speed_to_virtchnl(false, virt_speed); + + /* get link speed in MB to validate rate limit */ + switch (speed) { + case VIRTCHNL_LINK_SPEED_100MB: + link_speed = SPEED_100; + break; + case VIRTCHNL_LINK_SPEED_1GB: + link_speed = SPEED_1000; + break; + case VIRTCHNL_LINK_SPEED_10GB: + link_speed = SPEED_10000; + break; + case VIRTCHNL_LINK_SPEED_20GB: + link_speed = SPEED_20000; + break; + case VIRTCHNL_LINK_SPEED_25GB: + link_speed = SPEED_25000; + break; + case VIRTCHNL_LINK_SPEED_40GB: + link_speed = SPEED_40000; + break; + default: + /* on failure to detect link speed the expectation of the caller + * to this function is '0'. + */ + link_speed = 0; + break; + } + + return link_speed; +} + +/** + * ice_vc_add_qch_msg: Add queue channel and enable ADQ + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + */ +int ice_vc_add_qch_msg(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_tc_info *tci = + (struct virtchnl_tc_info *)msg; + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct ice_pf *pf = vf->pf; + int adq_request_qps = 0; + struct ice_link_status *ls; + u16 available_vsis = 0; + u64 total_max_rate = 0; + u32 max_tc_allowed; + struct device *dev; + u16 total_qs = 0; + u32 link_speed; + unsigned int i; + + dev = ice_pf_to_dev(pf); + ls = &pf->hw.port_info->phy.link_info; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + /* check if VF has negotiated this capability before anything else */ + if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) { + dev_dbg(dev, "VF %d attempting to enable ADQ, but hasn't properly negotiated that capability\n", + vf->vf_id); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + /* Currently ADQ and DCB are mutually exclusive and keeping in sync + * with PF, don't allow VF ADQ configuration when DCB Firmware LLDP + * agent is already running/enabled. + */ + if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) { + dev_err(dev, "FW LLDP is enabled, cannot enable ADQ on VF %d\n", + vf->vf_id); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + /* VF ADQ and DCF are mutually exclusive. */ + if (ice_is_dcf_enabled(pf)) { + dev_err(dev, "Device Control Functionality is currently enabled. VF ADQ cannot be enabled\n"); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + /* ADQ cannot be applied if spoof check is ON */ + if (vf->spoofchk) { + dev_err(dev, "Spoof check is ON, turn it OFF to enable ADQ\n"); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + ice_for_each_vsi(pf, i) { + if (!pf->vsi[i]) + ++available_vsis; + } + + if (available_vsis < tci->num_tc - 1) { + dev_err(dev, "Not enough VSIs left to enable ADQ on VF %d\n", + vf->vf_id); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + max_tc_allowed = ice_vc_get_max_chnl_tc_allowed(vf); + /* max number of traffic classes for VF currently capped at 4 for legacy + * ADQ and 16 for ADQ V2. + */ + if (!tci->num_tc || tci->num_tc > max_tc_allowed) { + dev_dbg(dev, "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n", + vf->vf_id, tci->num_tc, max_tc_allowed); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + /* validate queues for each TC */ + for (i = 0; i < tci->num_tc; i++) { + if (!tci->list[i].count) { + dev_err(dev, "VF %d: TC %d trying to set %u queues, should be > 0 per TC\n", + vf->vf_id, i, tci->list[i].count); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + total_qs += tci->list[i].count; + } + + if (total_qs > ICE_MAX_DFLT_QS_PER_VF) { + dev_err(dev, "VF %d: Total number of queues of all TCs cannot exceed %u\n", + vf->vf_id, ICE_MAX_DFLT_QS_PER_VF); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + /* Speed in Mbps */ + if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) + link_speed = ice_conv_link_speed_to_virtchnl(true, + ls->link_speed); + else + link_speed = ice_conv_virtchnl_speed_to_mbps(ls->link_speed); + + if (!link_speed) { + dev_err(dev, "Cannot detect link speed on VF %d\n", vf->vf_id); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + for (i = 0; i < tci->num_tc; i++) + if (tci->list[i].max_tx_rate) + total_max_rate += tci->list[i].max_tx_rate; + + if (total_max_rate > link_speed) { + dev_err(dev, "Invalid tx rate specified for ADQ on VF %d\n", + vf->vf_id); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + if (vf->max_tx_rate && total_max_rate > vf->max_tx_rate) { + dev_err(dev, "Invalid tx rate specified for ADQ on VF %d, total_max_rate %llu Mpbs > host set max_tx_rate %u Mbps\n", + vf->vf_id, total_max_rate, vf->max_tx_rate); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + /* need Max VF queues but already have default number of queues */ + adq_request_qps = ICE_MAX_DFLT_QS_PER_VF - pf->vfs.num_qps_per; + + if (ice_get_avail_txq_count(pf) < adq_request_qps) { + dev_err(dev, "No queues left to allocate to VF %d\n", + vf->vf_id); + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + goto err; + } else { + /* we need to allocate max VF queues to enable ADQ so as to + * make sure ADQ enabled VF always gets back queues when it + * goes through a reset. + */ + vf->num_vf_qs = ICE_MAX_DFLT_QS_PER_VF; + } + + /* parse data from the queue channel info */ + vf->num_tc = tci->num_tc; + + for (i = 0; i < vf->num_tc; i++) { + if (tci->list[i].max_tx_rate) + vf->ch[i].max_tx_rate = tci->list[i].max_tx_rate; + + vf->ch[i].num_qps = tci->list[i].count; + vf->ch[i].offset = tci->list[i].offset; + } + + /* set this flag only after making sure all inputs are sane */ + vf->adq_enabled = true; + /* initialize filter enable flag, set it only if filters are applied */ + vf->adq_fltr_ena = false; + + /* reset the VF in order to allocate resources. Don't reset if ADQ_V2 + * capability is negotiated, since in that case AVF driver will request + * for a reset. + */ + if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ_V2)) + ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); + + /* send the response to the VF */ +err: + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ_V2) + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS, + v_ret, (u8 *)tci, sizeof(*tci)); + else + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS, + v_ret, NULL, 0); +} + +/** + * ice_vc_del_qch_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * delete the additional VSIs which are created as part of ADQ + */ +int ice_vc_del_qch_msg(struct ice_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + struct device *dev; + u8 tc; + + dev = ice_pf_to_dev(pf); + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + /* VF ADQ and DCF are mutually exclusive. */ + if (ice_is_dcf_enabled(pf)) { + dev_err(dev, "Device Control Functionality is currently enabled. VF ADQ cannot be enabled\n"); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + if (ice_is_vf_adq_ena(vf)) { + /* if ADQ_V2 is set, perform inline cleanup of ADQ resources and + * return success and eventually VF driver will initiate reset + * as per design + */ + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ_V2) { + dev_info(ice_pf_to_dev(pf), + "Deleting Queue Channels for ADQ on VF %d and ADQ_V2 is set\n", + vf->vf_id); + + /* release VF ADQ filters and VSIs inline */ + ice_vf_adq_release(vf); + v_ret = VIRTCHNL_STATUS_SUCCESS; + goto err; + } + +#ifdef HAVE_TC_SETUP_CLSFLOWER + /* delete all ADQ filters for given VF */ + ice_del_all_adv_switch_fltr(vf); +#endif /* HAVE_TC_SETUP_CLSFLOWER */ + + /* stop all Tx/Rx rings and clean them before deleting the ADQ + * resources, if not it will throw fail to set the LAN Tx queue + * context error. This is needed irrespective of ADQ_V2. Channel + * related TC starts at 1. Don't down the VSI and related + * resources for TC 0 because it is primary VF VSI and downing + * that VSI is handled somewhere else. + */ + for (tc = ICE_VF_CHNL_START_TC; tc < vf->num_tc; tc++) { + vsi = ice_get_vf_adq_vsi(vf, tc); + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + if (vf->ch[tc].vsi_num) { + set_bit(ICE_VSI_DOWN, vsi->state); + ice_down(vsi); + } + } + + /* this order of code is very important, if num_tc is not + * cleared, VF again rebuilds as ADQ enabled clearly contrary + * to what we're trying to do. Also clearing num_tc before + * deleting ADQ filters leads to the condition where the code + * will try to delete filters when none are configured. + */ + vf->num_tc = 0; + dev_info(ice_pf_to_dev(pf), "Deleting Queue Channels for ADQ on VF %d\n", + vf->vf_id); + + /* reset needs to happen first, before we clear the adq_enabled + * flag, since freeing up of ADQ resources happens based off of + * this flag in reset path. Doing a reset after clearing the + * flag will leave the ADQ resources in zombie state which in + * turn creates undesired problems such as system lock up, stack + * trace etc., + * Also we shouldn't be doing a reset if ADQ flag is cleared in + * some other place, hence sending the failure response back to + * the VF. + */ + ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); + if (ice_is_vf_link_up(vf)) { + /* bring the VSI 0 back up again */ + vsi = ice_get_vf_adq_vsi(vf, 0); + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + ice_up(vsi); + } + + vf->adq_enabled = false; + } else { + dev_info(dev, "VF %d trying to delete queue channels but ADQ isn't enabled\n", + vf->vf_id); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + } + + /* send the response to the VF */ +err: + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ_V2) + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS, + v_ret, msg, + sizeof(struct virtchnl_tc_info)); + else + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS, + v_ret, NULL, 0); +} + +/** + * ice_vf_adq_total_max_tx_rate - cummulative max_tx_rate when VF ADQ is enabled + * @vf: Pointer to VF + * + * This function cummulative max Tx rate of all TCs if VF ADQ is enabled + */ +u64 ice_vf_adq_total_max_tx_rate(struct ice_vf *vf) +{ + u64 cummulative_max_tx_rate = 0; + int i; + + if (!ice_is_vf_adq_ena(vf)) + return 0; + + for (i = 0; i < vf->num_tc; i++) + cummulative_max_tx_rate += vf->ch[i].max_tx_rate; + + return cummulative_max_tx_rate; +} +#endif /* HAVE_TC_SETUP_CLSFLOWER */ diff --git a/drivers/thirdparty/ice/ice_vf_adq.h b/drivers/thirdparty/ice/ice_vf_adq.h new file mode 100644 index 000000000000..92593982bbec --- /dev/null +++ b/drivers/thirdparty/ice/ice_vf_adq.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_VF_ADQ_H_ +#define _ICE_VF_ADQ_H_ + +struct ice_vsi *ice_get_vf_adq_vsi(struct ice_vf *vf, u8 tc); +bool ice_is_vf_adq_ena(struct ice_vf *vf); +bool ice_vf_adq_vsi_valid(struct ice_vf *vf, u8 tc); +void ice_del_all_adv_switch_fltr(struct ice_vf *vf); +void ice_vf_adq_release(struct ice_vf *vf); +void ice_vf_rebuild_adq_host_cfg(struct ice_vf *vf); +int ice_vf_recreate_adq_vsi(struct ice_vf *vf); +int ice_vf_rebuild_adq_vsi(struct ice_vf *vf); +u16 ice_vf_get_tc_based_qid(u16 qid, u16 offset); +void ice_vf_q_id_get_vsi_q_id(struct ice_vf *vf, u16 vf_q_id, u16 *t_tc, + struct virtchnl_queue_select *vqs, + struct ice_vsi **vsi_p, u16 *vsi_id, + u16 *q_id); +int ice_vc_del_switch_filter(struct ice_vf *vf, u8 *msg); +int ice_vc_add_switch_filter(struct ice_vf *vf, u8 *msg); +int ice_vc_add_qch_msg(struct ice_vf *vf, u8 *msg); +int ice_vc_del_qch_msg(struct ice_vf *vf, u8 *msg); +u64 ice_vf_adq_total_max_tx_rate(struct ice_vf *vf); + +#endif /* _ICE_VF_ADQ_H_ */ diff --git a/drivers/thirdparty/ice/ice_vf_lib.c b/drivers/thirdparty/ice/ice_vf_lib.c new file mode 100644 index 000000000000..3fec748f6778 --- /dev/null +++ b/drivers/thirdparty/ice/ice_vf_lib.c @@ -0,0 +1,1525 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#include "ice_vf_lib_private.h" +#include "ice.h" +#include "ice_lib.h" +#include "ice_fltr.h" +#include "ice_vf_vsi_vlan_ops.h" +#include "ice_virtchnl_allowlist.h" +#include "ice_vf_adq.h" + +/* Public functions which may be accessed by core driver files */ + +/** + * ice_get_vf_by_id - Get pointer to VF by ID + * @pf: the PF private structure + * @vf_id: the VF ID to locate + * + * Locate and return a pointer to the VF structure associated with a given ID. + * Returns NULL if the ID does not have a valid VF structure associated with + * it. + * + * Note that valid vf_id values cannot exceed U16_MAX. However, some code + * flows receive, store, and report VF IDs using a u32 field. This is done so + * that those flows can use the upper bits for special values such as the + * broadcast VF ID. The u32 parameter prevents implicit truncation which would + * cause the driver to misinterpret these special values. + * + * This function takes a reference on the VF which must be released by calling + * ice_put_vf() when the caller has finished accessing the VF structure. + */ +struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u32 vf_id) +{ + struct ice_vf *vf; + + /* All valid VF IDs are 16 bits */ + if (vf_id > U16_MAX) { + dev_err(ice_pf_to_dev(pf), "Out of range VF ID: %u\n", + vf_id); + return NULL; + } + + rcu_read_lock(); + hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) { + if (vf->vf_id == vf_id) { + struct ice_vf *found; + + if (kref_get_unless_zero(&vf->refcnt)) + found = vf; + else + found = NULL; + + rcu_read_unlock(); + return found; + } + } + + rcu_read_unlock(); + return NULL; +} + +/** + * ice_release_vf - Release VF associated with a refcount + * @ref: the kref decremented to zero + * + * Callback function for kref_put to release a VF once its reference count has + * hit zero. + */ +static void ice_release_vf(struct kref *ref) +{ + struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt); + + vf->vf_ops->free(vf); +} + +/** + * ice_put_vf - Release a reference to a VF + * @vf: the VF structure to decrease reference count on + * + * Decrease the reference count for a VF, and free the entry if it is no + * longer in use. + * + * This must be called after ice_get_vf_by_id() once the reference to the VF + * structure is no longer used. Otherwise, the VF structure will never be + * freed. + */ +void ice_put_vf(struct ice_vf *vf) +{ + kref_put(&vf->refcnt, ice_release_vf); +} + +/** + * ice_is_valid_vf_id - helper to check if VF ID is valid + * @pf: pointer to the PF structure + * @vf_id: the ID of the VF to check + * + * Note that valid vf_id values cannot exceed U16_MAX. However, some code + * flows receive, store, and report VF IDs using a u32. This function takes + * a u32 in order to prevent implicit truncation of such values to a u16. + * + * Note that this function only guarantees that the VF ID is valid at the + * point of calling it. It does not guarantee the VF ID will remain valid + * after it exits. It is possible that the VF will be released at any time + * after this function exits. + */ +bool ice_is_valid_vf_id(struct ice_pf *pf, u32 vf_id) +{ + struct ice_vf *vf; + + /* VF IDs are not necessarily contiguous. Validate that the VF ID + * exists in the VF hash table. + */ + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) { + dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id); + return false; + } + + ice_put_vf(vf); + return true; +} + +/** + * ice_has_vfs - Return true if the PF has any associated VFs + * @pf: the PF private structure + * + * Return whether or not the PF has any allocated VFs. + * + * Note that this function only guarantees that there are no VFs at the point + * of calling it. It does not guarantee that no more VFs will be added. + */ +bool ice_has_vfs(struct ice_pf *pf) +{ + /* A simple check that the hash table is not empty does not require + * the mutex or rcu_read_lock. + */ + return !hash_empty(pf->vfs.table); +} + +/** + * ice_get_num_vfs - Get number of allocated VFs + * @pf: the PF private structure + * + * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed + * to be contiguous. Do not assume that a VF ID is guaranteed to be less than + * the output of this function. + */ +u16 ice_get_num_vfs(struct ice_pf *pf) +{ + struct ice_vf *vf; + unsigned int bkt; + u16 num_vfs = 0; + + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) + num_vfs++; + rcu_read_unlock(); + + return num_vfs; +} + +/** + * ice_get_vf_vsi - get VF's VSI based on the stored index + * @vf: VF used to get VSI + */ +struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf) +{ + if (vf->lan_vsi_idx == ICE_NO_VSI) + return NULL; + + return vf->pf->vsi[vf->lan_vsi_idx]; +} + +/** + * ice_is_vf_disabled + * @vf: pointer to the VF info + * + * If the PF has been disabled, there is no need resetting VF until PF is + * active again. Similarly, if the VF has been disabled, this means something + * else is resetting the VF, so we shouldn't continue. + * + * Returns true if the caller should consider the VF as disabled whether + * because that single VF is explicitly disabled or because the PF is + * currently disabled. + */ +bool ice_is_vf_disabled(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + + return (test_bit(ICE_VF_DIS, pf->state) || + test_bit(ICE_VF_STATE_DIS, vf->vf_states)); +} + +/** + * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset + * @vf: The VF being resseting + * + * The max poll time is about ~800ms, which is about the maximum time it takes + * for a VF to be reset and/or a VF driver to be removed. + */ +static void ice_wait_on_vf_reset(struct ice_vf *vf) +{ + int i; + + for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) { + if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) + break; + msleep(ICE_MAX_VF_RESET_SLEEP_MS); + } +} + +/** + * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried + * @vf: VF to check if it's ready to be configured/queried + * + * The purpose of this function is to make sure the VF is not in reset, not + * disabled, and initialized so it can be configured and/or queried by a host + * administrator. + */ +int ice_check_vf_ready_for_cfg(struct ice_vf *vf) +{ + ice_wait_on_vf_reset(vf); + + if (ice_is_vf_disabled(vf)) + return -EINVAL; + + if (ice_check_vf_init(vf)) + return -EBUSY; + + return 0; +} + +/** + * ice_trigger_vf_reset - Reset a VF on HW + * @vf: pointer to the VF structure + * @is_vflr: true if VFLR was issued, false if not + * @is_pfr: true if the reset was triggered due to a previous PFR + * + * Trigger hardware to start a reset for a particular VF. Expects the caller + * to wait the proper amount of time to allow hardware to reset the VF before + * it cleans up and restores VF functionality. + */ +static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr) +{ + /* Inform VF that it is no longer active, as a warning */ + clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); + + /* Disable VF's configuration API during reset. The flag is re-enabled + * when it's safe again to access VF's VSI. + */ + clear_bit(ICE_VF_STATE_INIT, vf->vf_states); + + /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver + * needs to clear them in the case of VFR/VFLR. If this is done for + * PFR, it can mess up VF resets because the VF driver may already + * have started cleanup by the time we get here. + */ + if (!is_pfr) + vf->vf_ops->clear_mbx_register(vf); + + vf->vf_ops->trigger_reset_register(vf, is_vflr); +} + +static void ice_vf_clear_counters(struct ice_vf *vf) +{ + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + + if (vsi) + vsi->num_vlan = 0; + + vf->num_mac = 0; + memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); + memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); +} + +/** + * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild + * @vf: VF to perform pre VSI rebuild tasks + * + * These tasks are items that don't need to be amortized since they are most + * likely called in a for loop with all VF(s) in the reset_all_vfs() case. + */ +static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf) +{ + /* Close any IRQ mapping now */ + if (vf->vf_ops->irq_close) + vf->vf_ops->irq_close(vf); + + /* Remove switch rules associated with the reset VF */ + ice_rm_dcf_sw_vsi_rule(vf->pf, vf->lan_vsi_num); + + if (ice_is_vf_dcf(vf)) { + if (vf->pf->hw.dcf_caps & DCF_ACL_CAP) + ice_acl_destroy_tbl(&vf->pf->hw); + ice_clear_dcf_udp_tunnel_cfg(vf->pf); + } + + ice_vf_clear_counters(vf); + vf->vf_ops->clear_reset_trigger(vf); +} + +/** + * ice_vf_recreate_vsi - Release and re-create the VF's VSI + * @vf: VF to recreate the VSI for + * + * This is only called when a single VF is being reset (i.e. VVF, VFLR, host + * VF configuration change, etc) + * + * It releases and then re-creates a new VSI. + */ +static int ice_vf_recreate_vsi(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + int err; + + ice_vf_vsi_release(vf); + + err = vf->vf_ops->create_vsi(vf); + if (err) { + dev_err(ice_pf_to_dev(pf), + "Failed to recreate the VF%u's VSI, error %d\n", + vf->vf_id, err); + return err; + } + + ice_vf_recreate_adq_vsi(vf); + + return 0; +} + +/** + * ice_vf_rebuild_vsi - rebuild the VF's VSI + * @vf: VF to rebuild the VSI for + * + * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the + * host, PFR, CORER, etc.). + * + * It reprograms the VSI configuration back into hardware. + */ +static int ice_vf_rebuild_vsi(struct ice_vf *vf) +{ + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + struct ice_pf *pf = vf->pf; + + if (WARN_ON(!vsi)) + return -EINVAL; + + if (ice_vsi_rebuild(vsi, true)) { + dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n", + vf->vf_id); + return -EIO; + } + /* vsi->idx will remain the same in this case so don't update + * vf->lan_vsi_idx + */ + vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); + vf->lan_vsi_num = vsi->vsi_num; + + if (ice_vf_rebuild_adq_vsi(vf)) { + dev_err(ice_pf_to_dev(pf), "failed to rebuild ADQ configuration for VF %d\n", + vf->vf_id); + return -EIO; + } + + return 0; +} + +/** + * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild + * @vf: the VF being reset + * + * Perform reset tasks which must occur after the VSI has been re-created or + * rebuilt during a VF reset. + */ +static void ice_vf_post_vsi_rebuild(struct ice_vf *vf) +{ + ice_vf_rebuild_host_cfg(vf); + ice_vf_set_initialized(vf); + + vf->vf_ops->post_vsi_rebuild(vf); +} + +/** + * ice_is_any_vf_in_unicast_promisc - check if any VF(s) + * are in unicast promiscuous mode + * @pf: PF structure for accessing VF(s) + * + * Return false if no VF(s) are in unicast promiscuous mode, + * else return true + */ +bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf) +{ + bool is_vf_promisc = false; + struct ice_vf *vf; + unsigned int bkt; + + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) { + /* found a VF that has promiscuous mode configured */ + if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) { + is_vf_promisc = true; + break; + } + } + rcu_read_unlock(); + + return is_vf_promisc; +} + +/** + * ice_vf_get_promisc_masks - Calculate masks for promiscuous modes + * @vf: the VF pointer + * @vsi: the VSI to configure + * @ucast_m: promiscuous mask to apply to unicast + * @mcast_m: promiscuous mask to apply to multicast + * + * Decide which mask should be used for unicast and multicast filter, + * based on presence of VLANs + */ +void ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi, + u8 *ucast_m, u8 *mcast_m) +{ + if (ice_vf_is_port_vlan_ena(vf) || + ice_vsi_has_non_zero_vlans(vsi)) { + *mcast_m = ICE_MCAST_VLAN_PROMISC_BITS; + *ucast_m = ICE_VF_UCAST_VLAN_PROMISC_BITS; + } else { + *mcast_m = ICE_MCAST_PROMISC_BITS; + *ucast_m = ICE_VF_UCAST_PROMISC_BITS; + } +} + +/** + * ice_vf_clear_all_promisc_modes - Clear promisc/allmulticast on VF VSI + * @vf: the VF pointer + * @vsi: the VSI to configure + * + * Clear all promiscuous/allmulticast filters for a VF + */ +static int ice_vf_clear_all_promisc_modes(struct ice_vf *vf, + struct ice_vsi *vsi) +{ + struct ice_pf *pf = vf->pf; + u8 ucast_m, mcast_m; + int ret = 0; + + ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m); + if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) { + if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) + ret = ice_clear_dflt_vsi(vsi); + else + ret = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m); + + if (ret) { + dev_err(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode failed\n"); + } else { + clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); + dev_info(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode succeeded\n"); + } + } + + if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) { + ret = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m); + if (ret) { + dev_err(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode failed\n"); + } else { + clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); + dev_info(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode succeeded\n"); + } + } + return ret; +} + +/** + * ice_vf_set_vsi_promisc - Enable promiscuous traffic for a VF VSI + * @vf: the VF pointer + * @vsi: the VSI to configure + * @promisc_m: the promiscuous mask to apply + * + * Enable promiscuous traffic to the VF VSI for the provided traffic types in + * the promisc_m mask. + */ +int +ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) +{ + struct ice_hw *hw = &vsi->back->hw; + u8 lport = vsi->port_info->lport; + int status; + + if (ice_vf_is_port_vlan_ena(vf)) + status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, + ice_vf_get_port_vlan_id(vf), + lport); + else if (ice_vsi_has_non_zero_vlans(vsi)) + status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m); + else + status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0, + lport); + + if (status && status != -EEXIST) { + dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", + vf->vf_id, status); + return status; + } + + return 0; +} + +/** + * ice_vf_clear_vsi_promisc - Disable promiscuous traffic for a VF VSI + * @vf: the VF pointer + * @vsi: the VSI to configure + * @promisc_m: the promiscuous mask to apply + * + * Disable promiscuous traffic to the VF VSI for the provided traffic types in + * the promisc_m mask. + */ +int +ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) +{ + struct ice_hw *hw = &vsi->back->hw; + u8 lport = vsi->port_info->lport; + int status; + + if (ice_vf_is_port_vlan_ena(vf)) + status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, + ice_vf_get_port_vlan_id(vf), + lport); + else if (ice_vsi_has_non_zero_vlans(vsi)) + status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m); + else + status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0, + lport); + + if (status && status != -ENOENT) { + dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", + vf->vf_id, status); + return status; + } + + return 0; +} + +/** + * ice_reset_all_vfs - reset all allocated VFs in one go + * @pf: pointer to the PF structure + * + * Reset all VFs at once, in response to a PF or other device reset. + * + * First, tell the hardware to reset each VF, then do all the waiting in one + * chunk, and finally finish restoring each VF after the wait. This is useful + * during PF routines which need to reset all VFs, as otherwise it must perform + * these resets in a serialized fashion. + */ +void ice_reset_all_vfs(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + struct ice_vf *vf; + unsigned int bkt; + + /* If we don't have any VFs, then there is nothing to reset */ + if (!ice_has_vfs(pf)) + return; + + mutex_lock(&pf->vfs.table_lock); + + /* clear all malicious info if the VFs are getting reset */ + ice_for_each_vf(pf, bkt, vf) + if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs, + ICE_MAX_SRIOV_VFS, vf->vf_id)) + dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", + vf->vf_id); + + /* If VFs have been disabled, there is no need to reset */ + if (test_and_set_bit(ICE_VF_DIS, pf->state)) { + mutex_unlock(&pf->vfs.table_lock); + return; + } + + ice_clear_dcf_acl_cfg(pf); + ice_clear_dcf_udp_tunnel_cfg(pf); + pf->hw.dcf_caps &= ~(DCF_ACL_CAP | DCF_UDP_TUNNEL_CAP); + + /* Begin reset on all VFs at once */ + ice_for_each_vf(pf, bkt, vf) + ice_trigger_vf_reset(vf, true, true); + + /* HW requires some time to make sure it can flush the FIFO for a VF + * when it resets it. Now that we've triggered all of the VFs, iterate + * the table again and wait for each VF to complete. + */ + ice_for_each_vf(pf, bkt, vf) { + if (!vf->vf_ops->poll_reset_status(vf)) { + /* Display a warning if at least one VF didn't manage + * to reset in time, but continue on with the + * operation. + */ + dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id); + break; + } + } + + /* free VF resources to begin resetting the VSI state */ + ice_for_each_vf(pf, bkt, vf) { + mutex_lock(&vf->cfg_lock); + + vf->driver_caps = 0; + ice_vc_set_default_allowlist(vf); + +#ifdef HAVE_TC_SETUP_CLSFLOWER + /* always release VF ADQ filters since those filters will be + * replayed by VF driver. This is needed to avoid stale + * filters in software internal data structues + */ + ice_del_all_adv_switch_fltr(vf); +#endif + + ice_vf_fdir_exit(vf); + ice_vf_fdir_init(vf); + /* clean VF control VSI when resetting VFs since it should be + * setup only when iAVF creates its first FDIR rule. + */ + if (vf->ctrl_vsi_idx != ICE_NO_VSI) + ice_vf_ctrl_invalidate_vsi(vf); + + ice_vf_fsub_exit(vf); + ice_vf_fsub_init(vf); + + ice_vf_pre_vsi_rebuild(vf); + ice_vf_rebuild_vsi(vf); + ice_vf_post_vsi_rebuild(vf); + + mutex_unlock(&vf->cfg_lock); + } + + if (ice_is_eswitch_mode_switchdev(pf)) + if (ice_eswitch_rebuild(pf)) + dev_warn(dev, "eswitch rebuild failed\n"); + + ice_flush(hw); + clear_bit(ICE_VF_DIS, pf->state); + + mutex_unlock(&pf->vfs.table_lock); +} + +/** + * ice_notify_vf_reset - Notify VF of a reset event + * @vf: pointer to the VF structure + */ +static void ice_notify_vf_reset(struct ice_vf *vf) +{ + struct ice_hw *hw = &vf->pf->hw; + struct virtchnl_pf_event pfe; + + /* Bail out if VF is in disabled state, neither initialized, nor active + * state - otherwise proceed with notifications + */ + if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && + !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) || + test_bit(ICE_VF_STATE_DIS, vf->vf_states)) + return; + + pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; + pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; + ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, + VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe), + NULL); +} + +/** + * ice_reset_vf - Reset a particular VF + * @vf: pointer to the VF structure + * @flags: flags controlling behavior of the reset + * + * Flags: + * ICE_VF_RESET_VFLR - Indicates a reset is due to VFLR event + * ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset + * ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting + * + * Returns 0 if the VF is currently in reset, if resets are disabled, or if + * the VF resets successfully. Returns an error code if the VF fails to + * rebuild. + */ +int ice_reset_vf(struct ice_vf *vf, u32 flags) +{ + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + struct device *dev; + struct ice_hw *hw; + int err = 0; + bool rsd; + + dev = ice_pf_to_dev(pf); + hw = &pf->hw; + + if (flags & ICE_VF_RESET_NOTIFY) + ice_notify_vf_reset(vf); + + if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { + dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n", + vf->vf_id); + return 0; + } + + if (ice_is_vf_disabled(vf)) { + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + dev_dbg(dev, "VF %d is already removed\n", vf->vf_id); + return -EINVAL; + } + ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); + ice_vsi_stop_all_rx_rings(vsi); + dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n", + vf->vf_id); + return 0; + } + + if (flags & ICE_VF_RESET_LOCK) + mutex_lock(&vf->cfg_lock); + else + lockdep_assert_held(&vf->cfg_lock); + + /* Set VF disable bit state here, before triggering reset */ + set_bit(ICE_VF_STATE_DIS, vf->vf_states); + ice_send_vf_reset_to_aux(ice_find_cdev_info_by_id(pf, IIDC_RDMA_ID), + ice_abs_vf_id(hw, vf->vf_id)); + ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false); + + if (ice_dcf_get_state(pf) == ICE_DCF_STATE_ON) + ice_dcf_set_state(pf, ICE_DCF_STATE_BUSY); + + vsi = ice_get_vf_vsi(vf); + if (WARN_ON(!vsi)) { + err = -EIO; + goto out_unlock; + } + + ice_dis_vf_qs(vf); + + /* Call Disable LAN Tx queue AQ whether or not queues are + * enabled. This is needed for successful completion of VFR. + */ + ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, + NULL, vf->vf_ops->reset_type, vf->vf_id, NULL); + + /* Likewise Disable LAN Tx queues for VF ADQ VSIs */ + if (ice_is_vf_adq_ena(vf)) { + int tc; + + for (tc = ICE_VF_CHNL_START_TC; tc < vf->num_tc; tc++) { + if (!ice_vf_adq_vsi_valid(vf, tc)) + continue; + ice_dis_vsi_txq(vsi->port_info, vf->ch[tc].vsi_idx, 0, + 0, NULL, NULL, NULL, + vf->vf_ops->reset_type, + vf->vf_id, NULL); + } + } + + if (vf->driver_caps & VIRTCHNL_VF_CAP_RDMA) + vf->vf_ops->clear_rdma_irq_map(vf); + + /* poll VPGEN_VFRSTAT reg to make sure + * that reset is complete + */ + rsd = vf->vf_ops->poll_reset_status(vf); + + /* Display a warning if VF didn't manage to reset in time, but need to + * continue on with the operation. + */ + if (!rsd) + dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id); + + vf->driver_caps = 0; + ice_vc_set_default_allowlist(vf); + + /* disable promiscuous modes in case they were enabled + * ignore any error if disabling process failed + */ + ice_vf_clear_all_promisc_modes(vf, vsi); + +#ifdef HAVE_TC_SETUP_CLSFLOWER + /* always release VF ADQ filters since those filters will be + * replayed by VF driver. This is needed to avoid stale filters in + * software internal data structures + */ + ice_del_all_adv_switch_fltr(vf); +#endif + /* VF driver gets reloaded on VFLR, so clear ADQ configuration */ + if (flags & ICE_VF_RESET_VFLR) + ice_vf_adq_release(vf); + + ice_eswitch_del_vf_mac_rule(vf); + + ice_vf_fdir_exit(vf); + ice_vf_fdir_init(vf); + + /* clean VF control VSI when resetting VF since it should be setup + * only when iAVF creates its first FDIR rule. + */ + if (vf->ctrl_vsi_idx != ICE_NO_VSI) + ice_vf_ctrl_vsi_release(vf); + + ice_vf_fsub_exit(vf); + ice_vf_fsub_init(vf); + + ice_vf_pre_vsi_rebuild(vf); + + if (ice_vf_recreate_vsi(vf)) { + dev_err(dev, "Failed to release and setup the VF%u's VSI\n", + vf->vf_id); + err = -EFAULT; + goto out_unlock; + } + + ice_vf_post_vsi_rebuild(vf); + vsi = ice_get_vf_vsi(vf); + if (WARN_ON(!vsi)) { + err = -EINVAL; + goto out_unlock; + } + + ice_eswitch_update_repr(vsi); + ice_eswitch_replay_vf_mac_rule(vf); + + if (ice_dcf_get_state(pf) == ICE_DCF_STATE_BUSY) { + struct virtchnl_pf_event pfe = { 0 }; + + ice_dcf_set_state(pf, ICE_DCF_STATE_PAUSE); + + pfe.event = VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE; + pfe.event_data.vf_vsi_map.vf_id = vf->vf_id; + pfe.event_data.vf_vsi_map.vsi_id = vf->lan_vsi_num; + + ice_aq_send_msg_to_vf(&pf->hw, ICE_DCF_VFID, + VIRTCHNL_OP_EVENT, + VIRTCHNL_STATUS_SUCCESS, + (u8 *)&pfe, sizeof(pfe), NULL); + } + + /* if the VF has been reset allow it to come up again */ + if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs, + ICE_MAX_SRIOV_VFS, vf->vf_id)) + dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", + vf->vf_id); + +out_unlock: + if (flags & ICE_VF_RESET_LOCK) + mutex_unlock(&vf->cfg_lock); + + return err; +} + +/** + * ice_set_vf_state_qs_dis - Set VF queues state to disabled + * @vf: pointer to the VF structure + */ +void ice_set_vf_state_qs_dis(struct ice_vf *vf) +{ + /* Clear Rx/Tx enabled queues flag */ + bitmap_zero(vf->txq_ena, ICE_MAX_QS_PER_VF); + bitmap_zero(vf->rxq_ena, ICE_MAX_QS_PER_VF); + clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); +} + +/* Private functions only accessed from other virtualization files */ + +static void +ice_vf_hash_ctx_init(struct ice_vf *vf) +{ + memset(&vf->hash_ctx, 0, sizeof(vf->hash_ctx)); +} + +/** + * ice_initialize_vf_entry - Initialize a VF entry + * @vf: pointer to the VF structure + * + * Returns 0 on success or an integer error code on failure. + */ +int ice_initialize_vf_entry(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + struct ice_vfs *vfs; + + vfs = &pf->vfs; + + /* assign default capabilities */ + vf->spoofchk = true; + vf->num_vf_qs = vfs->num_qps_per; + ice_vc_set_default_allowlist(vf); + ice_virtchnl_set_dflt_ops(vf); + + /* ctrl_vsi_idx will be set to a valid value only when iAVF + * creates its first fdir rule. + */ + ice_vf_ctrl_invalidate_vsi(vf); + ice_vf_fdir_init(vf); + + ice_vf_hash_ctx_init(vf); + + ice_vf_fsub_init(vf); + + mutex_init(&vf->cfg_lock); + + return 0; +} + +/** + * ice_dis_vf_qs - Disable the VF queues + * @vf: pointer to the VF structure + */ +void ice_dis_vf_qs(struct ice_vf *vf) +{ + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + + if (WARN_ON(!vsi)) + return; + + ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); + ice_vsi_stop_all_rx_rings(vsi); + /* Likewise if VF ADQ is enabled, stop Tx and Rx rings of VF ADQ VSI */ + if (ice_is_vf_adq_ena(vf)) { + int tc; + + for (tc = ICE_VF_CHNL_START_TC; tc < vf->num_tc; tc++) { + if (!ice_vf_adq_vsi_valid(vf, tc)) + continue; + vsi = ice_get_vf_adq_vsi(vf, tc); + ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); + ice_vsi_stop_all_rx_rings(vsi); + } + } + ice_set_vf_state_qs_dis(vf); +} + +/** + * ice_err_to_virt_err - translate errors for VF return code + * @err: error return code + */ +enum virtchnl_status_code ice_err_to_virt_err(int err) +{ + switch (err) { + case 0: + return VIRTCHNL_STATUS_SUCCESS; + case -EINVAL: + case -ENODEV: + return VIRTCHNL_STATUS_ERR_PARAM; + case -ENOMEM: + return VIRTCHNL_STATUS_ERR_NO_MEMORY; + case -EALREADY: + case -EBUSY: + case -EIO: + case -ENOSPC: + return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; + default: + return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; + } +} + +/** + * ice_check_vf_init - helper to check if VF init complete + * @vf: the pointer to the VF to check + */ +int ice_check_vf_init(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n", + vf->vf_id); + return -EBUSY; + } + return 0; +} + +/** + * ice_vf_get_port_info - Get the VF's port info structure + * @vf: VF used to get the port info structure for + */ +struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf) +{ + return vf->pf->hw.port_info; +} + +/** + * ice_cfg_mac_antispoof - Configure MAC antispoof checking behavior + * @vsi: the VSI to configure + * @enable: whether to enable or disable the spoof checking + * + * Configure a VSI to enable (or disable) spoof checking behavior. + */ +static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable) +{ + struct ice_vsi_ctx *ctx; + int err; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->info.sec_flags = vsi->info.sec_flags; + ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); + + if (enable) + ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; + else + ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; + + err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL); + if (err) + dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n", + enable ? "ON" : "OFF", vsi->vsi_num, err); + else + vsi->info.sec_flags = ctx->info.sec_flags; + + kfree(ctx); + + return err; +} + +/** + * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI + * @vsi: VSI to enable Tx spoof checking for + * + * This also enables Tx filtering of the VLANs for this VSI. + */ +static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi) +{ + struct ice_vsi_vlan_ops *vlan_ops; + int err; + + vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); + + err = vlan_ops->ena_tx_filtering(vsi); + if (err) + return err; + + return ice_cfg_mac_antispoof(vsi, true); +} + +/** + * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI + * @vsi: VSI to disable Tx spoof checking for + * + * This also disables Tx filtering of the VLANs for this VSI + */ +static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi) +{ + struct ice_vsi_vlan_ops *vlan_ops; + int err; + + vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); + + err = vlan_ops->dis_tx_filtering(vsi); + if (err) + return err; + + return ice_cfg_mac_antispoof(vsi, false); +} + +/** + * ice_vsi_apply_spoofchk - Apply Tx spoof checking setting to a VSI + * @vsi: VSI associated to the VF + * @enable: whether to enable or disable the spoof checking + */ +int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable) +{ + int err; + + if (enable) + err = ice_vsi_ena_spoofchk(vsi); + else + err = ice_vsi_dis_spoofchk(vsi); + + return err; +} + +/** + * ice_is_vf_trusted + * @vf: pointer to the VF info + */ +bool ice_is_vf_trusted(struct ice_vf *vf) +{ + return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); +} + +/** + * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled + * @vf: the VF to check + * + * Returns true if the VF has no Rx and no Tx queues enabled and returns false + * otherwise + */ +bool ice_vf_has_no_qs_ena(struct ice_vf *vf) +{ + return (!bitmap_weight(vf->rxq_ena, ICE_MAX_QS_PER_VF) && + !bitmap_weight(vf->txq_ena, ICE_MAX_QS_PER_VF)); +} + +/** + * ice_is_vf_link_up - check if the VF's link is up + * @vf: VF to check if link is up + */ +bool ice_is_vf_link_up(struct ice_vf *vf) +{ + struct ice_port_info *pi = ice_vf_get_port_info(vf); + struct ice_pf *pf = vf->pf; + + if (ice_check_vf_init(vf)) + return false; + + if (test_bit(ICE_BAD_EEPROM, pf->state)) + return false; + + if (ice_vf_has_no_qs_ena(vf)) + return false; + else if (vf->link_forced) + return vf->link_up; + else + return pi->phy.link_info.link_info & + ICE_AQ_LINK_UP; +} + +/** + * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value + * @vf: VF to configure trust setting for + */ +static void ice_vf_set_host_trust_cfg(struct ice_vf *vf) +{ + if (vf->trusted) + set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); + else + clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); +} + +/** + * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA + * @vf: VF to add MAC filters for + * + * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver + * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset. + */ +static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + u8 broadcast[ETH_ALEN]; + int status; + + if (WARN_ON(!vsi)) + return -EINVAL; + + if (ice_is_eswitch_mode_switchdev(vf->pf)) + return 0; + + eth_broadcast_addr(broadcast); + status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); + if (status) { + dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n", + vf->vf_id, status); + return status; + } + + vf->num_mac++; + + if (is_valid_ether_addr(vf->hw_lan_addr.addr)) { + status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr, + ICE_FWD_TO_VSI); + if (status) { + dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n", + &vf->hw_lan_addr.addr[0], vf->vf_id, + status); + return status; + } + vf->num_mac++; + + ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr); + } + + return 0; +} + +/** + * ice_vf_rebuild_dcf_vlan_cfg - Config DCF outer VLAN for VF + * @vf: VF to add outer VLAN for + * @vsi: Pointer to VSI + */ +static int ice_vf_rebuild_dcf_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi) +{ + struct ice_dcf_vlan_info *dcf_vlan = &vf->dcf_vlan_info; + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vlan *outer_vlan; + int err; + + if (!ice_is_dcf_enabled(vf->pf) || !dcf_vlan->applying) { + memset(dcf_vlan, 0, sizeof(*dcf_vlan)); + return 0; + } + + dcf_vlan->applying = 0; + + outer_vlan = &dcf_vlan->outer_port_vlan; + + if (outer_vlan->vid) { + err = ice_vf_vsi_dcf_set_outer_port_vlan(vsi, outer_vlan); + if (err) { + ice_dev_err_errno(dev, err, + "failed to configure outer port VLAN via DCF for VF %u", + vf->vf_id); + return err; + } + } + + if (dcf_vlan->outer_stripping_ena) { + u16 tpid = dcf_vlan->outer_stripping_tpid; + + err = ice_vf_vsi_dcf_ena_outer_vlan_stripping(vsi, tpid); + if (err) { + ice_dev_err_errno(dev, err, + "failed to enable outer VLAN stripping via DCF for VF %u", + vf->vf_id); + return err; + } + } + + return 0; +} + +/** + * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN + * @vf: VF to add MAC filters for + * @vsi: Pointer to VSI + * + * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver + * always re-adds either a VLAN 0 or port VLAN based filter after reset. + */ +int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi) +{ + struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); + struct device *dev = ice_pf_to_dev(vf->pf); + int err; + + if (ice_vf_is_port_vlan_ena(vf)) { + err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info); + if (err) { + ice_dev_err_errno(dev, err, + "failed to configure port VLAN via VSI parameters for VF %u", + vf->vf_id); + return err; + } + + err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info); + } else { + err = ice_vsi_add_vlan_zero(vsi); + } + + if (err) { + ice_dev_err_errno(dev, err, + "failed to add VLAN %u filter for VF %u during VF rebuild", + ice_vf_is_port_vlan_ena(vf) ? + ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id); + return err; + } + + err = vlan_ops->ena_rx_filtering(vsi); + if (err) { + ice_dev_err_errno(dev, err, + "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild", + vf->vf_id, vsi->idx); + } + + return 0; +} + +/** + * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration + * @vf: VF to re-apply the configuration for + * + * Called after a VF VSI has been re-added/rebuild during reset. The PF driver + * needs to re-apply the host configured Tx rate limiting configuration. + */ +static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + int err; + + if (WARN_ON(!vsi)) + return -EINVAL; + + if (vf->min_tx_rate) { + err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000); + if (err) { + ice_dev_err_errno(dev, err, + "failed to set min Tx rate to %d Mbps for VF %u", + vf->min_tx_rate, vf->vf_id); + return err; + } + } + + if (vf->max_tx_rate) { + err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000); + if (err) { + ice_dev_err_errno(dev, err, + "failed to set max Tx rate to %d Mbps for VF %u", + vf->max_tx_rate, vf->vf_id); + return err; + } + } + + return 0; +} + +/** + * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config + * @vsi: Pointer to VSI + * + * This function moves VSI into corresponding scheduler aggregator node + * based on cached value of "aggregator node info" per VSI + */ +void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + struct device *dev; + int status; + + if (!vsi->agg_node) + return; + + dev = ice_pf_to_dev(pf); + if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { + dev_dbg(dev, + "agg_id %u already has reached max_num_vsis %u\n", + vsi->agg_node->agg_id, vsi->agg_node->num_vsis); + return; + } + + status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id, + vsi->idx, (u8)vsi->tc_cfg.ena_tc); + if (status) + dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node", + vsi->idx, vsi->agg_node->agg_id); + else + vsi->agg_node->num_vsis++; +} + +/** + * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset + * @vf: VF to rebuild host configuration on + */ +void ice_vf_rebuild_host_cfg(struct ice_vf *vf) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + + if (WARN_ON(!vsi)) + return; + + ice_vf_set_host_trust_cfg(vf); + + if (ice_vf_rebuild_host_mac_cfg(vf)) + dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n", + vf->vf_id); + + if (ice_vf_rebuild_dcf_vlan_cfg(vf, vsi)) + dev_err(dev, "failed to rebuild DCF VLAN configuration for VF %u\n", + vf->vf_id); + + if (ice_vf_rebuild_host_vlan_cfg(vf, vsi)) + dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n", + vf->vf_id); + + if (ice_vf_rebuild_host_tx_rate_cfg(vf)) + dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n", + vf->vf_id); + + if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk)) + dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n", + vf->vf_id); + + /* rebuild aggregator node config for main VF VSI */ + ice_vf_rebuild_aggregator_node_cfg(vsi); +} + +/** + * ice_vf_init_host_cfg - Initialize host admin configuration + * @vf: VF to initialize + * @vsi: the VSI created at initialization + * + * Initialize the VF host configuration. Called during VF creation to setup + * VLAN 0, add the VF VSI broadcast filter, and setup spoof checking. It + * should only be called during VF creation. + */ +int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi) +{ + struct ice_vsi_vlan_ops *vlan_ops; + struct ice_pf *pf = vf->pf; + u8 broadcast[ETH_ALEN]; + struct device *dev; + int err; + + dev = ice_pf_to_dev(pf); + + err = ice_vsi_add_vlan_zero(vsi); + if (err) { + dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n", + vf->vf_id); + return err; + } + + vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); + err = vlan_ops->ena_rx_filtering(vsi); + if (err) { + dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n", + vf->vf_id); + return err; + } + + eth_broadcast_addr(broadcast); + err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); + if (err) { + dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %d\n", + vf->vf_id, err); + return err; + } + + vf->num_mac = 1; + + err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk); + if (err) { + dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n", + vf->vf_id); + return err; + } + + return 0; +} + +/** + * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access + * @vf: VF that control VSI is being invalidated on + */ +void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf) +{ + vf->ctrl_vsi_idx = ICE_NO_VSI; +} + +/** + * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it + * @vf: VF that control VSI is being released on + */ +void ice_vf_ctrl_vsi_release(struct ice_vf *vf) +{ + ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]); + ice_vf_ctrl_invalidate_vsi(vf); +} + +/** + * ice_vf_ctrl_vsi_setup - Set up a VF control VSI + * @vf: VF to setup control VSI for + * + * Returns pointer to the successfully allocated VSI struct on success, + * otherwise returns NULL on failure. + */ +struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf) +{ + struct ice_port_info *pi = ice_vf_get_port_info(vf); + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + + vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf, NULL, 0); + + if (!vsi) { + dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n"); + ice_vf_ctrl_invalidate_vsi(vf); + } + + return vsi; +} + +/** + * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access + * @vf: VF to remove access to VSI for + */ +void ice_vf_invalidate_vsi(struct ice_vf *vf) +{ + vf->lan_vsi_idx = ICE_NO_VSI; + vf->lan_vsi_num = ICE_NO_VSI; +} + +/** + * ice_vf_vsi_release - Release the VF VSI and invalidate indexes + * @vf: pointer to the VF structure + * + * Release the VF associated with this VSI and then invalidate the VSI + * indexes. + */ +void ice_vf_vsi_release(struct ice_vf *vf) +{ + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + + if (WARN_ON(!vsi)) + return; + + ice_vsi_release(vsi); + ice_vf_invalidate_vsi(vf); +} + +/** + * ice_vf_set_initialized - VF is ready for VIRTCHNL communication + * @vf: VF to set in initialized state + * + * After this function the VF will be ready to receive/handle the + * VIRTCHNL_OP_GET_VF_RESOURCES message + */ +void ice_vf_set_initialized(struct ice_vf *vf) +{ + ice_set_vf_state_qs_dis(vf); + clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); + clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); + clear_bit(ICE_VF_STATE_DIS, vf->vf_states); + set_bit(ICE_VF_STATE_INIT, vf->vf_states); + memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps)); +} diff --git a/drivers/thirdparty/ice/ice_vf_lib.h b/drivers/thirdparty/ice/ice_vf_lib.h new file mode 100644 index 000000000000..2f7aa6ebcb11 --- /dev/null +++ b/drivers/thirdparty/ice/ice_vf_lib.h @@ -0,0 +1,415 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_VF_LIB_H_ +#define _ICE_VF_LIB_H_ + +#include +#include +#include +#include +#include +#include +#include +#if IS_ENABLED(CONFIG_NET_DEVLINK) +#include +#endif /* CONFIG_NET_DEVLINK */ +#include "virtchnl.h" +#include "ice_type.h" +#include "ice_flow.h" +#include "ice_virtchnl_fdir.h" +#include "ice_virtchnl_fsub.h" +#include "ice_dcf.h" +#include "ice_vsi_vlan_ops.h" + +#define ICE_MAX_SRIOV_VFS 256 + +/* VF resource constraints */ +#define ICE_MAX_QS_PER_VF 256 + +struct ice_pf; +struct ice_vf; +struct ice_virtchnl_ops; + +/* VF capabilities */ +enum ice_virtchnl_cap { + ICE_VIRTCHNL_VF_CAP_PRIVILEGE = 0, +}; + +/* Specific VF states */ +enum ice_vf_states { + ICE_VF_STATE_INIT = 0, /* PF is initializing VF */ + ICE_VF_STATE_ACTIVE, /* VF resources are allocated for use */ + ICE_VF_STATE_QS_ENA, /* VF queue(s) enabled */ + ICE_VF_STATE_DIS, + ICE_VF_STATE_MC_PROMISC, + ICE_VF_STATE_UC_PROMISC, + ICE_VF_STATES_NBITS +}; + +struct ice_time_mac { + unsigned long time_modified; + u8 addr[ETH_ALEN]; +}; + +/* VF MDD events print structure */ +struct ice_mdd_vf_events { + u16 count; /* total count of Rx|Tx events */ + /* count number of the last printed event */ + u16 last_printed; +}; + +#define ICE_HASH_IP_CTX_IP 0 +#define ICE_HASH_IP_CTX_IP_ESP 1 +#define ICE_HASH_IP_CTX_IP_UDP_ESP 2 +#define ICE_HASH_IP_CTX_IP_AH 3 +#define ICE_HASH_IP_CTX_IP_L2TPV3 4 +#define ICE_HASH_IP_CTX_IP_PFCP 5 +#define ICE_HASH_IP_CTX_IP_UDP 6 +#define ICE_HASH_IP_CTX_IP_TCP 7 +#define ICE_HASH_IP_CTX_IP_SCTP 8 +#define ICE_HASH_IP_CTX_MAX 9 + +struct ice_vf_hash_ip_ctx { + struct ice_rss_hash_cfg ctx[ICE_HASH_IP_CTX_MAX]; +}; + +#define ICE_HASH_GTPU_CTX_EH_IP 0 +#define ICE_HASH_GTPU_CTX_EH_IP_UDP 1 +#define ICE_HASH_GTPU_CTX_EH_IP_TCP 2 +#define ICE_HASH_GTPU_CTX_UP_IP 3 +#define ICE_HASH_GTPU_CTX_UP_IP_UDP 4 +#define ICE_HASH_GTPU_CTX_UP_IP_TCP 5 +#define ICE_HASH_GTPU_CTX_DW_IP 6 +#define ICE_HASH_GTPU_CTX_DW_IP_UDP 7 +#define ICE_HASH_GTPU_CTX_DW_IP_TCP 8 +#define ICE_HASH_GTPU_CTX_MAX 9 + +struct ice_vf_hash_gtpu_ctx { + struct ice_rss_hash_cfg ctx[ICE_HASH_GTPU_CTX_MAX]; +}; + +struct ice_vf_hash_ctx { + struct ice_vf_hash_ip_ctx v4; + struct ice_vf_hash_ip_ctx v6; + struct ice_vf_hash_gtpu_ctx ipv4; + struct ice_vf_hash_gtpu_ctx ipv6; +}; + +/* In ADQ, max 4 VSI's can be allocated per VF including primary VF VSI. + * These variables are used to store indices, ID's and number of queues + * for each VSI including that of primary VF VSI. Each Traffic class is + * termed as channel and each channel can in-turn have 4 queues which + * means max 16 queues overall per VF. + */ +struct ice_channel_vf { + u16 vsi_idx; /* index in PF struct for all channel VSIs */ + u16 vsi_num; /* HW (absolute) index of this VSI */ + u16 num_qps; /* number of queue pairs requested by user */ + u16 offset; + u64 max_tx_rate; /* Tx rate limiting for channels */ +}; + +/* The VF VLAN information controlled by DCF */ +struct ice_dcf_vlan_info { + struct ice_vlan outer_port_vlan; + u16 outer_stripping_tpid; + u8 outer_stripping_ena:1; + u8 applying:1; +}; + +/* Structure to store fdir fv entry */ +struct ice_fdir_prof_info { + struct ice_parser_profile prof; + u64 fdir_active_cnt; +}; + +/* Structure to store RSS field vector entry */ +struct ice_rss_prof_info { + struct ice_parser_profile prof; + bool symm; +}; + +/* VF operations */ +struct ice_vf_ops { + enum ice_disq_rst_src reset_type; + void (*free)(struct ice_vf *vf); + void (*clear_reset_state)(struct ice_vf *vf); + void (*clear_mbx_register)(struct ice_vf *vf); + void (*trigger_reset_register)(struct ice_vf *vf, bool is_vflr); + bool (*poll_reset_status)(struct ice_vf *vf); + void (*clear_reset_trigger)(struct ice_vf *vf); + void (*irq_close)(struct ice_vf *vf); + int (*create_vsi)(struct ice_vf *vf); + void (*post_vsi_rebuild)(struct ice_vf *vf); + struct ice_q_vector* (*get_q_vector)(struct ice_vf *vf, + struct ice_vsi *vsi, + u16 vector_id); + void (*cfg_rdma_irq_map)(struct ice_vf *vf, + struct virtchnl_rdma_qv_info *qv_info); + void (*clear_rdma_irq_map)(struct ice_vf *vf); +}; + +/* Virtchnl/SR-IOV config info */ +struct ice_vfs { + DECLARE_HASHTABLE(table, 8); /* table of VF entries */ + struct mutex table_lock; /* Lock for protecting the hash table */ + u16 num_supported; /* max supported VFs on this PF */ + u16 num_qps_per; /* number of queue pairs per VF */ + u16 num_msix_per; /* number of MSI-X vectors per VF */ + unsigned long last_printed_mdd_jiffies; /* MDD message rate limit */ + DECLARE_BITMAP(malvfs, ICE_MAX_SRIOV_VFS); /* malicious VF indicator */ +}; + +struct ice_vf_qs_bw { + u16 queue_id; + u32 committed; + u32 peak; + u8 tc; +}; + +/* VF information structure */ +struct ice_vf { + struct hlist_node entry; + struct rcu_head rcu; + struct kref refcnt; + struct ice_pf *pf; + + /* Used during virtchnl message handling and NDO ops against the VF + * that will trigger a VFR + */ + struct mutex cfg_lock; + + u16 vf_id; /* VF ID in the PF space */ + u16 lan_vsi_idx; /* index into PF struct */ + u16 ctrl_vsi_idx; + struct ice_vf_fdir fdir; + struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS]; + struct ice_vf_fsub fsub; + struct ice_vf_hash_ctx hash_ctx; + struct ice_rss_prof_info rss_prof_info[ICE_MAX_PTGS]; + struct ice_vf_qs_bw qs_bw[ICE_MAX_QS_PER_VF]; + /* first vector index of this VF in the PF space */ + int first_vector_idx; + struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */ + struct virtchnl_version_info vf_ver; + u32 driver_caps; /* reported by VF driver */ + u16 stag; /* VF Port Extender (PE) stag if used */ + struct virtchnl_ether_addr dev_lan_addr; + struct virtchnl_ether_addr hw_lan_addr; + struct ice_time_mac legacy_last_added_umac; + DECLARE_BITMAP(txq_ena, ICE_MAX_QS_PER_VF); + DECLARE_BITMAP(rxq_ena, ICE_MAX_QS_PER_VF); + struct ice_vlan port_vlan_info; /* Port VLAN ID, QoS, and TPID */ + struct virtchnl_vlan_caps vlan_v2_caps; + struct ice_dcf_vlan_info dcf_vlan_info; + u8 pf_set_mac:1; /* VF MAC address set by VMM admin */ + u8 trusted:1; + u8 spoofchk:1; +#ifdef HAVE_NDO_SET_VF_LINK_STATE + u8 link_forced:1; + u8 link_up:1; /* only valid if VF link is forced */ +#endif + /* VSI indices - actual VSI pointers are maintained in the PF structure + * When assigned, these will be non-zero, because VSI 0 is always + * the main LAN VSI for the PF. + */ + u16 lan_vsi_num; /* ID as used by firmware */ + unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */ + unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */ + DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */ + + unsigned long vf_caps; /* VF's adv. capabilities */ + u16 num_req_qs; /* num of queue pairs requested by VF */ + u16 num_mac; + u16 num_vf_qs; /* num of queue configured per VF */ + u8 vlan_strip_ena; /* Outer and Inner VLAN strip enable */ +#define ICE_INNER_VLAN_STRIP_ENA BIT(0) +#define ICE_OUTER_VLAN_STRIP_ENA BIT(1) + /* ADQ related variables */ + u8 adq_enabled; /* flag to enable ADQ */ + u8 adq_fltr_ena; /* flag to denote that ADQ filters are applied */ + u8 num_tc; + u16 num_dmac_chnl_fltrs; + struct ice_channel_vf ch[VIRTCHNL_MAX_ADQ_V2_CHANNELS]; + struct hlist_head tc_flower_fltr_list; + struct ice_mdd_vf_events mdd_rx_events; + struct ice_mdd_vf_events mdd_tx_events; + DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX); + + struct ice_repr *repr; + const struct ice_virtchnl_ops *virtchnl_ops; + const struct ice_vf_ops *vf_ops; + +#if IS_ENABLED(CONFIG_NET_DEVLINK) + /* devlink port data */ + struct devlink_port devlink_port; +#endif /* CONFIG_NET_DEVLINK */ +}; + +/* Flags for controlling behavior of ice_reset_vf */ +enum ice_vf_reset_flags { + ICE_VF_RESET_VFLR = BIT(0), /* Indicate a VFLR reset */ + ICE_VF_RESET_NOTIFY = BIT(1), /* Notify VF prior to reset */ + ICE_VF_RESET_LOCK = BIT(2), /* Acquire the VF cfg_lock */ +}; + +static inline u16 ice_vf_get_port_vlan_id(struct ice_vf *vf) +{ + return vf->port_vlan_info.vid; +} + +static inline u8 ice_vf_get_port_vlan_prio(struct ice_vf *vf) +{ + return vf->port_vlan_info.prio; +} + +static inline bool ice_vf_is_port_vlan_ena(struct ice_vf *vf) +{ + return (ice_vf_get_port_vlan_id(vf) || ice_vf_get_port_vlan_prio(vf)); +} + +static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf) +{ + return vf->port_vlan_info.tpid; +} + +/* VF Hash Table access functions + * + * These functions provide abstraction for interacting with the VF hash table. + * In general, direct access to the hash table should be avoided outside of + * these functions where possible. + * + * The VF entries in the hash table are protected by reference counting to + * track lifetime of accesses from the table. The ice_get_vf_by_id() function + * obtains a reference to the VF structure which must be dropped by using + * ice_put_vf(). + */ + +/** + * ice_for_each_vf - Iterate over each VF entry + * @pf: pointer to the PF private structure + * @bkt: bucket index used for iteration + * @vf: pointer to the VF entry currently being processed in the loop + * + * The bkt variable is an unsigned integer iterator used to traverse the VF + * entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is. + * Use vf->vf_id to get the id number if needed. + * + * The caller is expected to be under the table_lock mutex for the entire + * loop. Use this iterator if your loop is long or if it might sleep. + */ +#define ice_for_each_vf(pf, bkt, vf) \ + hash_for_each((pf)->vfs.table, (bkt), (vf), entry) + +/** + * ice_for_each_vf_rcu - Iterate over each VF entry protected by RCU + * @pf: pointer to the PF private structure + * @bkt: bucket index used for iteration + * @vf: pointer to the VF entry currently being processed in the loop + * + * The bkt variable is an unsigned integer iterator used to traverse the VF + * entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is. + * Use vf->vf_id to get the id number if needed. + * + * The caller is expected to be under rcu_read_lock() for the entire loop. + * Only use this iterator if your loop is short and you can guarantee it does + * not sleep. + */ +#define ice_for_each_vf_rcu(pf, bkt, vf) \ + hash_for_each_rcu((pf)->vfs.table, (bkt), (vf), entry) + +#ifdef CONFIG_PCI_IOV +/* The vf_id parameter is a u32 in order to handle IDs stored as u32 values + * without implicit truncation. + */ +struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u32 vf_id); +void ice_put_vf(struct ice_vf *vf); +bool ice_is_valid_vf_id(struct ice_pf *pf, u32 vf_id); +bool ice_has_vfs(struct ice_pf *pf); +u16 ice_get_num_vfs(struct ice_pf *pf); +struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf); +bool ice_is_vf_disabled(struct ice_vf *vf); +int ice_check_vf_ready_for_cfg(struct ice_vf *vf); +void ice_set_vf_state_qs_dis(struct ice_vf *vf); +bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf); +void ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi, + u8 *ucast_m, u8 *mcast_m); +int +ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m); +int +ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m); +int ice_reset_vf(struct ice_vf *vf, u32 flags); +void ice_reset_all_vfs(struct ice_pf *pf); +#else /* CONFIG_PCI_IOV */ +static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u32 vf_id) +{ + return NULL; +} + +static inline void ice_put_vf(struct ice_vf *vf) +{ +} + +static inline bool ice_is_valid_vf_id(struct ice_pf *pf, u32 vf_id) +{ + return false; +} + +static inline bool ice_has_vfs(struct ice_pf *pf) +{ + return false; +} + +static inline u16 ice_get_num_vfs(struct ice_pf *pf) +{ + return 0; +} + +static inline struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf) +{ + return NULL; +} + +static inline bool ice_is_vf_disabled(struct ice_vf *vf) +{ + return true; +} + +static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf) +{ + return -EOPNOTSUPP; +} + +static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf) +{ +} + +static inline bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf) +{ + return false; +} + +static inline int +ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) +{ + return -EOPNOTSUPP; +} + +static inline int +ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) +{ + return -EOPNOTSUPP; +} + +static inline int ice_reset_vf(struct ice_vf *vf, bool is_vflr) +{ + return 0; +} + +static inline void ice_reset_all_vfs(struct ice_pf *pf) +{ +} +#endif /* !CONFIG_PCI_IOV */ + +#endif /* _ICE_VF_LIB_H_ */ diff --git a/drivers/thirdparty/ice/ice_vf_lib_private.h b/drivers/thirdparty/ice/ice_vf_lib_private.h new file mode 100644 index 000000000000..6c9af6233367 --- /dev/null +++ b/drivers/thirdparty/ice/ice_vf_lib_private.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_VF_LIB_PRIVATE_H_ +#define _ICE_VF_LIB_PRIVATE_H_ + +#include "ice_vf_lib.h" + +/* This header file is for exposing functions in ice_vf_lib.c to other files + * which are also conditionally compiled depending on CONFIG_PCI_IOV. + * Functions which may be used by other files should be exposed as part of + * ice_vf_lib.h + * + * Functions in this file are exposed only when CONFIG_PCI_IOV is enabled, and + * thus this header must not be included by .c files which may be compiled + * with CONFIG_PCI_IOV disabled. + * + * To avoid this, only include this header file directly within .c files that + * are conditionally enabled in the "ice-$(CONFIG_PCI_IOV)" block. + */ + +#ifndef CONFIG_PCI_IOV +#warning "Only include ice_vf_lib_private.h in CONFIG_PCI_IOV virtualization files" +#endif + +int ice_initialize_vf_entry(struct ice_vf *vf); +void ice_dis_vf_qs(struct ice_vf *vf); +enum virtchnl_status_code ice_err_to_virt_err(int err); +int ice_check_vf_init(struct ice_vf *vf); +struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf); +int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable); +bool ice_is_vf_trusted(struct ice_vf *vf); +bool ice_vf_has_no_qs_ena(struct ice_vf *vf); +bool ice_is_vf_link_up(struct ice_vf *vf); +int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi); +void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi); +void ice_vf_rebuild_host_cfg(struct ice_vf *vf); +int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi); +void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf); +void ice_vf_ctrl_vsi_release(struct ice_vf *vf); +struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf); +void ice_vf_invalidate_vsi(struct ice_vf *vf); +void ice_vf_vsi_release(struct ice_vf *vf); +void ice_vf_set_initialized(struct ice_vf *vf); + +#endif /* _ICE_VF_LIB_PRIVATE_H_ */ diff --git a/drivers/thirdparty/ice/ice_vf_mbx.c b/drivers/thirdparty/ice/ice_vf_mbx.c new file mode 100644 index 000000000000..c0de86600814 --- /dev/null +++ b/drivers/thirdparty/ice/ice_vf_mbx.c @@ -0,0 +1,534 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#include "ice_common.h" +#include "ice_vf_mbx.h" + +/** + * ice_aq_send_msg_to_vf + * @hw: pointer to the hardware structure + * @vfid: VF ID to send msg + * @v_opcode: opcodes for VF-PF communication + * @v_retval: return error code + * @msg: pointer to the msg buffer + * @msglen: msg length + * @cd: pointer to command details + * + * Send message to VF driver (0x0802) using mailbox + * queue and asynchronously sending message via + * ice_sq_send_cmd() function + */ +int +ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, + u8 *msg, u16 msglen, struct ice_sq_cd *cd) +{ + struct ice_aqc_pf_vf_msg *cmd; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf); + + cmd = &desc.params.virt; + cmd->id = cpu_to_le32(vfid); + + desc.cookie_high = cpu_to_le32(v_opcode); + desc.cookie_low = cpu_to_le32(v_retval); + + if (msglen) + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd); +} + +/** + * ice_conv_link_speed_to_virtchnl + * @adv_link_support: determines the format of the returned link speed + * @link_speed: variable containing the link_speed to be converted + * + * Convert link speed supported by HW to link speed supported by virtchnl. + * If adv_link_support is true, then return link speed in Mbps. Else return + * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller + * needs to cast back to an enum virtchnl_link_speed in the case where + * adv_link_support is false, but when adv_link_support is true the caller can + * expect the speed in Mbps. + */ +u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed) +{ + u32 speed; + + if (adv_link_support) + switch (link_speed) { + case ICE_AQ_LINK_SPEED_10MB: + speed = SPEED_10; + break; + case ICE_AQ_LINK_SPEED_100MB: + speed = SPEED_100; + break; + case ICE_AQ_LINK_SPEED_1000MB: + speed = SPEED_1000; + break; + case ICE_AQ_LINK_SPEED_2500MB: + speed = SPEED_2500; + break; + case ICE_AQ_LINK_SPEED_5GB: + speed = SPEED_5000; + break; + case ICE_AQ_LINK_SPEED_10GB: + speed = SPEED_10000; + break; + case ICE_AQ_LINK_SPEED_20GB: + speed = SPEED_20000; + break; + case ICE_AQ_LINK_SPEED_25GB: + speed = SPEED_25000; + break; + case ICE_AQ_LINK_SPEED_40GB: + speed = SPEED_40000; + break; + case ICE_AQ_LINK_SPEED_50GB: + speed = SPEED_50000; + break; + case ICE_AQ_LINK_SPEED_100GB: + speed = SPEED_100000; + break; + default: + speed = 0; + break; + } + else + /* Virtchnl speeds are not defined for every speed supported in + * the hardware. To maintain compatibility with older AVF + * drivers, while reporting the speed the new speed values are + * resolved to the closest known virtchnl speeds + */ + switch (link_speed) { + case ICE_AQ_LINK_SPEED_10MB: + case ICE_AQ_LINK_SPEED_100MB: + speed = (u32)VIRTCHNL_LINK_SPEED_100MB; + break; + case ICE_AQ_LINK_SPEED_1000MB: + case ICE_AQ_LINK_SPEED_2500MB: + case ICE_AQ_LINK_SPEED_5GB: + speed = (u32)VIRTCHNL_LINK_SPEED_1GB; + break; + case ICE_AQ_LINK_SPEED_10GB: + speed = (u32)VIRTCHNL_LINK_SPEED_10GB; + break; + case ICE_AQ_LINK_SPEED_20GB: + speed = (u32)VIRTCHNL_LINK_SPEED_20GB; + break; + case ICE_AQ_LINK_SPEED_25GB: + speed = (u32)VIRTCHNL_LINK_SPEED_25GB; + break; + case ICE_AQ_LINK_SPEED_40GB: + case ICE_AQ_LINK_SPEED_50GB: + case ICE_AQ_LINK_SPEED_100GB: + speed = (u32)VIRTCHNL_LINK_SPEED_40GB; + break; + default: + speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN; + break; + } + + return speed; +} + +/* The mailbox overflow detection algorithm helps to check if there + * is a possibility of a malicious VF transmitting too many MBX messages to the + * PF. + * 1. The mailbox snapshot structure, ice_mbx_snapshot, is initialized during + * driver initialization in ice_init_hw() using ice_mbx_init_snapshot(). + * The struct ice_mbx_snapshot helps to track and traverse a static window of + * messages within the mailbox queue while looking for a malicious VF. + * + * 2. When the caller starts processing its mailbox queue in response to an + * interrupt, the structure ice_mbx_snapshot is expected to be cleared before + * the algorithm can be run for the first time for that interrupt. This can be + * done via ice_mbx_reset_snapshot(). + * + * 3. For every message read by the caller from the MBX Queue, the caller must + * call the detection algorithm's entry function ice_mbx_vf_state_handler(). + * Before every call to ice_mbx_vf_state_handler() the struct ice_mbx_data is + * filled as it is required to be passed to the algorithm. + * + * 4. Every time a message is read from the MBX queue, a VFId is received which + * is passed to the state handler. The boolean output is_malvf of the state + * handler ice_mbx_vf_state_handler() serves as an indicator to the caller + * whether this VF is malicious or not. + * + * 5. When a VF is identified to be malicious, the caller can send a message + * to the system administrator. The caller can invoke ice_mbx_report_malvf() + * to help determine if a malicious VF is to be reported or not. This function + * requires the caller to maintain a global bitmap to track all malicious VFs + * and pass that to ice_mbx_report_malvf() along with the VFID which was identified + * to be malicious by ice_mbx_vf_state_handler(). + * + * 6. The global bitmap maintained by PF can be cleared completely if PF is in + * reset or the bit corresponding to a VF can be cleared if that VF is in reset. + * When a VF is shut down and brought back up, we assume that the new VF + * brought up is not malicious and hence report it if found malicious. + * + * 7. The function ice_mbx_reset_snapshot() is called to reset the information + * in ice_mbx_snapshot for every new mailbox interrupt handled. + * + * 8. The memory allocated for variables in ice_mbx_snapshot is de-allocated + * when driver is unloaded. + */ +#define ICE_RQ_DATA_MASK(rq_data) ((rq_data) & PF_MBX_ARQH_ARQH_M) +/* Using the highest value for an unsigned 16-bit value 0xFFFF to indicate that + * the max messages check must be ignored in the algorithm + */ +#define ICE_IGNORE_MAX_MSG_CNT 0xFFFF + +/** + * ice_mbx_traverse - Pass through mailbox snapshot + * @hw: pointer to the HW struct + * @new_state: new algorithm state + * + * Traversing the mailbox static snapshot without checking + * for malicious VFs. + */ +static void +ice_mbx_traverse(struct ice_hw *hw, + enum ice_mbx_snapshot_state *new_state) +{ + struct ice_mbx_snap_buffer_data *snap_buf; + u32 num_iterations; + + snap_buf = &hw->mbx_snapshot.mbx_buf; + + /* As mailbox buffer is circular, applying a mask + * on the incremented iteration count. + */ + num_iterations = ICE_RQ_DATA_MASK(++snap_buf->num_iterations); + + /* Checking either of the below conditions to exit snapshot traversal: + * Condition-1: If the number of iterations in the mailbox is equal to + * the mailbox head which would indicate that we have reached the end + * of the static snapshot. + * Condition-2: If the maximum messages serviced in the mailbox for a + * given interrupt is the highest possible value then there is no need + * to check if the number of messages processed is equal to it. If not + * check if the number of messages processed is greater than or equal + * to the maximum number of mailbox entries serviced in current work item. + */ + if (num_iterations == snap_buf->head || + (snap_buf->max_num_msgs_mbx < ICE_IGNORE_MAX_MSG_CNT && + ++snap_buf->num_msg_proc >= snap_buf->max_num_msgs_mbx)) + *new_state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; +} + +/** + * ice_mbx_detect_malvf - Detect malicious VF in snapshot + * @hw: pointer to the HW struct + * @vf_id: relative virtual function ID + * @new_state: new algorithm state + * @is_malvf: boolean output to indicate if VF is malicious + * + * This function tracks the number of asynchronous messages + * sent per VF and marks the VF as malicious if it exceeds + * the permissible number of messages to send. + */ +static int +ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id, + enum ice_mbx_snapshot_state *new_state, + bool *is_malvf) +{ + struct ice_mbx_snapshot *snap = &hw->mbx_snapshot; + + if (vf_id >= snap->mbx_vf.vfcntr_len) + return -EIO; + + /* increment the message count in the VF array */ + snap->mbx_vf.vf_cntr[vf_id]++; + + if (snap->mbx_vf.vf_cntr[vf_id] >= ICE_ASYNC_VF_MSG_THRESHOLD) + *is_malvf = true; + + /* continue to iterate through the mailbox snapshot */ + ice_mbx_traverse(hw, new_state); + + return 0; +} + +/** + * ice_mbx_reset_snapshot - Reset mailbox snapshot structure + * @snap: pointer to mailbox snapshot structure in the ice_hw struct + * + * Reset the mailbox snapshot structure and clear VF counter array. + */ +static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap) +{ + u32 vfcntr_len; + + if (!snap || !snap->mbx_vf.vf_cntr) + return; + + /* Clear VF counters. */ + vfcntr_len = snap->mbx_vf.vfcntr_len; + if (vfcntr_len) + memset(snap->mbx_vf.vf_cntr, 0, + (vfcntr_len * sizeof(*snap->mbx_vf.vf_cntr))); + + /* Reset mailbox snapshot for a new capture. */ + memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf)); + snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; +} + +/** + * ice_mbx_vf_state_handler - Handle states of the overflow algorithm + * @hw: pointer to the HW struct + * @mbx_data: pointer to structure containing mailbox data + * @vf_id: relative virtual function (VF) ID + * @is_malvf: boolean output to indicate if VF is malicious + * + * The function serves as an entry point for the malicious VF + * detection algorithm by handling the different states and state + * transitions of the algorithm: + * New snapshot: This state is entered when creating a new static + * snapshot. The data from any previous mailbox snapshot is + * cleared and a new capture of the mailbox head and tail is + * logged. This will be the new static snapshot to detect + * asynchronous messages sent by VFs. On capturing the snapshot + * and depending on whether the number of pending messages in that + * snapshot exceed the watermark value, the state machine enters + * traverse or detect states. + * Traverse: If pending message count is below watermark then iterate + * through the snapshot without any action on VF. + * Detect: If pending message count exceeds watermark traverse + * the static snapshot and look for a malicious VF. + */ +int +ice_mbx_vf_state_handler(struct ice_hw *hw, + struct ice_mbx_data *mbx_data, u16 vf_id, + bool *is_malvf) +{ + struct ice_mbx_snapshot *snap = &hw->mbx_snapshot; + struct ice_mbx_snap_buffer_data *snap_buf; + struct ice_ctl_q_info *cq = &hw->mailboxq; + enum ice_mbx_snapshot_state new_state; + int status = 0; + + if (!is_malvf || !mbx_data) + return -EINVAL; + + /* When entering the mailbox state machine assume that the VF + * is not malicious until detected. + */ + *is_malvf = false; + + /* Checking if max messages allowed to be processed while servicing current + * interrupt is not less than the defined AVF message threshold. + */ + if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD) + return -EINVAL; + + /* The watermark value should not be lesser than the threshold limit + * set for the number of asynchronous messages a VF can send to mailbox + * nor should it be greater than the maximum number of messages in the + * mailbox serviced in current interrupt. + */ + if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD || + mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx) + return -EINVAL; + + new_state = ICE_MAL_VF_DETECT_STATE_INVALID; + snap_buf = &snap->mbx_buf; + + switch (snap_buf->state) { + case ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT: + /* Clear any previously held data in mailbox snapshot structure. */ + ice_mbx_reset_snapshot(snap); + + /* Collect the pending ARQ count, number of messages processed and + * the maximum number of messages allowed to be processed from the + * Mailbox for current interrupt. + */ + snap_buf->num_pending_arq = mbx_data->num_pending_arq; + snap_buf->num_msg_proc = mbx_data->num_msg_proc; + snap_buf->max_num_msgs_mbx = mbx_data->max_num_msgs_mbx; + + /* Capture a new static snapshot of the mailbox by logging the + * head and tail of snapshot and set num_iterations to the tail + * value to mark the start of the iteration through the snapshot. + */ + snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean + + mbx_data->num_pending_arq); + snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1); + snap_buf->num_iterations = snap_buf->tail; + + /* Pending ARQ messages returned by ice_clean_rq_elem + * is the difference between the head and tail of the + * mailbox queue. Comparing this value against the watermark + * helps to check if we potentially have malicious VFs. + */ + if (snap_buf->num_pending_arq >= + mbx_data->async_watermark_val) { + new_state = ICE_MAL_VF_DETECT_STATE_DETECT; + status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf); + } else { + new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE; + ice_mbx_traverse(hw, &new_state); + } + break; + + case ICE_MAL_VF_DETECT_STATE_TRAVERSE: + new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE; + ice_mbx_traverse(hw, &new_state); + break; + + case ICE_MAL_VF_DETECT_STATE_DETECT: + new_state = ICE_MAL_VF_DETECT_STATE_DETECT; + status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf); + break; + + default: + new_state = ICE_MAL_VF_DETECT_STATE_INVALID; + status = -EIO; + } + + snap_buf->state = new_state; + + return status; +} + +/** + * ice_mbx_report_malvf - Track and note malicious VF + * @hw: pointer to the HW struct + * @all_malvfs: all malicious VFs tracked by PF + * @bitmap_len: length of bitmap in bits + * @vf_id: relative virtual function ID of the malicious VF + * @report_malvf: boolean to indicate if malicious VF must be reported + * + * This function will update a bitmap that keeps track of the malicious + * VFs attached to the PF. A malicious VF must be reported only once if + * discovered between VF resets or loading so the function checks + * the input vf_id against the bitmap to verify if the VF has been + * detected in any previous mailbox iterations. + */ +int +ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs, + u16 bitmap_len, u16 vf_id, bool *report_malvf) +{ + if (!all_malvfs || !report_malvf) + return -EINVAL; + + *report_malvf = false; + + if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len) + return -EINVAL; + + if (vf_id >= bitmap_len) + return -EIO; + + /* If the vf_id is found in the bitmap set bit and boolean to true */ + if (!test_bit(vf_id, all_malvfs)) { + set_bit(vf_id, all_malvfs); + *report_malvf = true; + } + + return 0; +} + +/** + * ice_mbx_clear_malvf - Clear VF bitmap and counter for VF ID + * @snap: pointer to the mailbox snapshot structure + * @all_malvfs: all malicious VFs tracked by PF + * @bitmap_len: length of bitmap in bits + * @vf_id: relative virtual function ID of the malicious VF + * + * In case of a VF reset, this function can be called to clear + * the bit corresponding to the VF ID in the bitmap tracking all + * malicious VFs attached to the PF. The function also clears the + * VF counter array at the index of the VF ID. This is to ensure + * that the new VF loaded is not considered malicious before going + * through the overflow detection algorithm. + */ +int +ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs, + u16 bitmap_len, u16 vf_id) +{ + if (!snap || !all_malvfs) + return -EINVAL; + + if (bitmap_len < snap->mbx_vf.vfcntr_len) + return -EINVAL; + + /* Ensure VF ID value is not larger than bitmap or VF counter length */ + if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len) + return -EIO; + + /* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */ + clear_bit(vf_id, all_malvfs); + + /* Clear the VF counter in the mailbox snapshot structure for that VF ID. + * This is to ensure that if a VF is unloaded and a new one brought back + * up with the same VF ID for a snapshot currently in traversal or detect + * state the counter for that VF ID does not increment on top of existing + * values in the mailbox overflow detection algorithm. + */ + snap->mbx_vf.vf_cntr[vf_id] = 0; + + return 0; +} + +/** + * ice_mbx_init_snapshot - Initialize mailbox snapshot structure + * @hw: pointer to the hardware structure + * @vf_count: number of VFs allocated on a PF + * + * Clear the mailbox snapshot structure and allocate memory + * for the VF counter array based on the number of VFs allocated + * on that PF. + * + * Assumption: This function will assume ice_get_caps() has already been + * called to ensure that the vf_count can be compared against the number + * of VFs supported as defined in the functional capabilities of the device. + */ +int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count) +{ + struct ice_mbx_snapshot *snap = &hw->mbx_snapshot; + + /* Ensure that the number of VFs allocated is non-zero and + * is not greater than the number of supported VFs defined in + * the functional capabilities of the PF. + */ + if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs) + return -EINVAL; + + snap->mbx_vf.vf_cntr = devm_kcalloc(ice_hw_to_dev(hw), vf_count, + sizeof(*snap->mbx_vf.vf_cntr), + GFP_KERNEL); + if (!snap->mbx_vf.vf_cntr) + return -ENOMEM; + + /* Setting the VF counter length to the number of allocated + * VFs for given PF's functional capabilities. + */ + snap->mbx_vf.vfcntr_len = vf_count; + + /* Clear mbx_buf in the mailbox snaphot structure and setting the + * mailbox snapshot state to a new capture. + */ + memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf)); + snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; + + return 0; +} + +/** + * ice_mbx_deinit_snapshot - Free mailbox snapshot structure + * @hw: pointer to the hardware structure + * + * Clear the mailbox snapshot structure and free the VF counter array. + */ +void ice_mbx_deinit_snapshot(struct ice_hw *hw) +{ + struct ice_mbx_snapshot *snap = &hw->mbx_snapshot; + + /* Free VF counter array and reset vf counter length */ + devm_kfree(ice_hw_to_dev(hw), snap->mbx_vf.vf_cntr); + snap->mbx_vf.vfcntr_len = 0; + + /* Clear mbx_buf in the mailbox snaphot structure */ + memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf)); +} diff --git a/drivers/thirdparty/ice/ice_vf_mbx.h b/drivers/thirdparty/ice/ice_vf_mbx.h new file mode 100644 index 000000000000..c3ab1689aaf0 --- /dev/null +++ b/drivers/thirdparty/ice/ice_vf_mbx.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_VF_MBX_H_ +#define _ICE_VF_MBX_H_ + +#include "ice_type.h" +#include "ice_controlq.h" + +/* Defining the mailbox message threshold as 63 asynchronous + * pending messages. Normal VF functionality does not require + * sending more than 63 asynchronous pending message. + */ +#define ICE_ASYNC_VF_MSG_THRESHOLD 63 + +#ifdef CONFIG_PCI_IOV +int +ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, + u8 *msg, u16 msglen, struct ice_sq_cd *cd); + +u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed); +int +ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data, + u16 vf_id, bool *is_mal_vf); +int +ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs, + u16 bitmap_len, u16 vf_id); +int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count); +void ice_mbx_deinit_snapshot(struct ice_hw *hw); +int +ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs, + u16 bitmap_len, u16 vf_id, bool *report_malvf); +#else /* CONFIG_PCI_IOV */ +static inline int +ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw, + u16 __always_unused vfid, u32 __always_unused v_opcode, + u32 __always_unused v_retval, u8 __always_unused *msg, + u16 __always_unused msglen, + struct ice_sq_cd __always_unused *cd) +{ + return 0; +} + +static inline u32 +ice_conv_link_speed_to_virtchnl(bool __always_unused adv_link_support, + u16 __always_unused link_speed) +{ + return 0; +} + +#endif /* CONFIG_PCI_IOV */ +#endif /* _ICE_VF_MBX_H_ */ diff --git a/drivers/thirdparty/ice/ice_vf_vsi_vlan_ops.c b/drivers/thirdparty/ice/ice_vf_vsi_vlan_ops.c index bd1254d01b80..4ac8414ecc38 100644 --- a/drivers/thirdparty/ice/ice_vf_vsi_vlan_ops.c +++ b/drivers/thirdparty/ice/ice_vf_vsi_vlan_ops.c @@ -6,12 +6,12 @@ #include "ice_vlan_mode.h" #include "ice.h" #include "ice_vf_vsi_vlan_ops.h" -#include "ice_virtchnl_pf.h" +#include "ice_sriov.h" #include "ice_lib.h" static int noop_vlan_arg(struct ice_vsi __always_unused *vsi, - struct ice_vlan * __always_unused vlan) + struct ice_vlan __always_unused *vlan) { return 0; } @@ -35,22 +35,27 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) { struct ice_vsi_vlan_ops *vlan_ops; struct ice_pf *pf = vsi->back; - struct ice_vf *vf; + struct ice_vf *vf = vsi->vf; - vf = &pf->vf[vsi->vf_id]; + if (WARN_ON(!vf)) + return; if (ice_is_dvm_ena(&pf->hw)) { vlan_ops = &vsi->outer_vlan_ops; /* outer VLAN ops regardless of port VLAN config */ vlan_ops->add_vlan = ice_vsi_add_vlan; - vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering; vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering; vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering; if (ice_vf_is_port_vlan_ena(vf)) { /* setup outer VLAN ops */ vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan; + /* all Rx traffic should be in the domain of the + * assigned port VLAN, so prevent disabling Rx VLAN + * filtering + */ + vlan_ops->dis_rx_filtering = noop_vlan; vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering; @@ -63,7 +68,10 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion; vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion; } else { - if (test_bit(ICE_FLAG_VF_VLAN_PRUNE_DIS, pf->flags)) + vlan_ops->dis_rx_filtering = + ice_vsi_dis_rx_vlan_filtering; + + if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags)) vlan_ops->ena_rx_filtering = noop_vlan; else vlan_ops->ena_rx_filtering = @@ -88,7 +96,6 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) /* inner VLAN ops regardless of port VLAN config */ vlan_ops->add_vlan = ice_vsi_add_vlan; - vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering; vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering; vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering; @@ -96,8 +103,15 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan; vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering; + /* all Rx traffic should be in the domain of the + * assigned port VLAN, so prevent disabling Rx VLAN + * filtering + */ + vlan_ops->dis_rx_filtering = noop_vlan; } else { - if (test_bit(ICE_FLAG_VF_VLAN_PRUNE_DIS, pf->flags)) + vlan_ops->dis_rx_filtering = + ice_vsi_dis_rx_vlan_filtering; + if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags)) vlan_ops->ena_rx_filtering = noop_vlan; else vlan_ops->ena_rx_filtering = @@ -127,9 +141,14 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) */ void ice_vf_vsi_cfg_dvm_legacy_vlan_mode(struct ice_vsi *vsi) { - struct ice_vf *vf = &vsi->back->vf[vsi->vf_id]; struct ice_vsi_vlan_ops *vlan_ops; - struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vf *vf = vsi->vf; + struct device *dev; + + if (WARN_ON(!vf)) + return; + + dev = ice_pf_to_dev(vf->pf); if (!ice_is_dvm_ena(&vsi->back->hw) || ice_vf_is_port_vlan_ena(vf)) return; @@ -193,7 +212,10 @@ void ice_vf_vsi_cfg_dvm_legacy_vlan_mode(struct ice_vsi *vsi) */ void ice_vf_vsi_cfg_svm_legacy_vlan_mode(struct ice_vsi *vsi) { - struct ice_vf *vf = &vsi->back->vf[vsi->vf_id]; + struct ice_vf *vf = vsi->vf; + + if (WARN_ON(!vf)) + return; if (ice_is_dvm_ena(&vsi->back->hw) || ice_vf_is_port_vlan_ena(vf)) return; @@ -209,9 +231,12 @@ void ice_vf_vsi_cfg_svm_legacy_vlan_mode(struct ice_vsi *vsi) */ int ice_vf_vsi_dcf_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan) { - struct ice_vf *vf = &vsi->back->vf[vsi->vf_id]; + struct ice_vf *vf = vsi->vf; int err; + if (WARN_ON(!vf)) + return -EINVAL; + if (!ice_is_dvm_ena(&vsi->back->hw) || ice_vf_is_port_vlan_ena(vf)) return -EOPNOTSUPP; @@ -235,9 +260,12 @@ int ice_vf_vsi_dcf_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vla */ int ice_vf_vsi_dcf_ena_outer_vlan_stripping(struct ice_vsi *vsi, u16 tpid) { - struct ice_vf *vf = &vsi->back->vf[vsi->vf_id]; + struct ice_vf *vf = vsi->vf; int err; + if (WARN_ON(!vf)) + return -EINVAL; + if (!ice_is_dvm_ena(&vsi->back->hw) || ice_vf_is_port_vlan_ena(vf)) return -EOPNOTSUPP; diff --git a/drivers/thirdparty/ice/ice_vfio_pci.c b/drivers/thirdparty/ice/ice_vfio_pci.c new file mode 100644 index 000000000000..72a2c4343057 --- /dev/null +++ b/drivers/thirdparty/ice/ice_vfio_pci.c @@ -0,0 +1,731 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#include +#include +#include + +#include +#include "ice_migration.h" + +#define DRIVER_DESC "ICE VFIO PCI - User Level meta-driver for Intel E800 device family" + +#define VFIO_DEVICE_MIGRATION_OFFSET(x) \ + (offsetof(struct vfio_device_migration_info, x)) +#define ICE_VFIO_MIG_REGION_INFO_SZ (sizeof(struct vfio_device_migration_info)) +#define ICE_VFIO_MIG_REGION_DATA_SZ (sizeof(struct ice_vfio_pci_regs)) + +/* IAVF registers description */ +#define IAVF_VF_ARQBAH1 0x00006000 /* Reset: EMPR */ +#define IAVF_VF_ATQH1 0x00006400 /* Reset: EMPR */ +#define IAVF_VF_ATQLEN1 0x00006800 /* Reset: EMPR */ +#define IAVF_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */ +#define IAVF_VF_ARQT1 0x00007000 /* Reset: EMPR */ +#define IAVF_VF_ARQH1 0x00007400 /* Reset: EMPR */ +#define IAVF_VF_ATQBAH1 0x00007800 /* Reset: EMPR */ +#define IAVF_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */ +#define IAVF_VF_ARQLEN1 0x00008000 /* Reset: EMPR */ +#define IAVF_VF_ATQT1 0x00008400 /* Reset: EMPR */ +#define IAVF_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */ +#define IAVF_VFINT_DYN_CTLN1(_INTVF) \ + (0x00003800 + ((_INTVF) * 4)) /* _INTVF=0...16 */ /* Reset: VFR */ +#define IAVF_VFINT_DYN_CTLN_NUM 16 +#define IAVF_VFINT_ITRN0(_i) \ + (0x00004C00 + (_i) * 4) /* _i=0...2 */ /* Reset: VFR */ +#define IAVF_VFINT_ITRN0_NUM 3 +#define IAVF_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) + /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */ +#define IAVF_VFINT_ITRN_NUM 3 +#define IAVF_QRX_TAIL1(_Q) \ + (0x00002000 + ((_Q) * 4)) /* _Q=0...256 */ /* Reset: CORER */ +#define IAVF_QRX_TAIL_MAX 256 + +/* Registers for saving and loading during live Migration */ +struct ice_vfio_pci_regs { + /* VF interrupts */ + u32 int_dyn_ctl0; + u32 int_dyn_ctln[IAVF_VFINT_DYN_CTLN_NUM]; + u32 int_intr0[IAVF_VFINT_ITRN0_NUM]; + u32 int_intrn[IAVF_VFINT_ITRN_NUM][IAVF_VFINT_DYN_CTLN_NUM]; + + /* VF Control Queues */ + u32 asq_bal; + u32 asq_bah; + u32 asq_len; + u32 asq_head; + u32 asq_tail; + u32 arq_bal; + u32 arq_bah; + u32 arq_len; + u32 arq_head; + u32 arq_tail; + + /* VF LAN RX */ + u32 rx_tail[IAVF_QRX_TAIL_MAX]; +}; + +struct ice_vfio_pci_core_device { + struct vfio_pci_core_device core_device; + struct vfio_device_migration_info mig_info; + struct ice_vfio_pci_regs *regs; + u8 __iomem *io_base; + void *vf_handle; +}; + +/** + * ice_vfio_pci_save_regs - Save migration register data + * @ice_vdev: pointer to ice vfio pci core device structure + * @regs: pointer to ice_vfio_pci_regs structure + * + */ +static void +ice_vfio_pci_save_regs(struct ice_vfio_pci_core_device *ice_vdev, + struct ice_vfio_pci_regs *regs) +{ + u8 __iomem *io_base = ice_vdev->io_base; + int i, j; + + regs->int_dyn_ctl0 = readl(io_base + IAVF_VFINT_DYN_CTL01); + + for (i = 0; i < IAVF_VFINT_DYN_CTLN_NUM; i++) + regs->int_dyn_ctln[i] = + readl(io_base + IAVF_VFINT_DYN_CTLN1(i)); + + for (i = 0; i < IAVF_VFINT_ITRN0_NUM; i++) + regs->int_intr0[i] = readl(io_base + IAVF_VFINT_ITRN0(i)); + + for (i = 0; i < IAVF_VFINT_ITRN_NUM; i++) + for (j = 0; j < IAVF_VFINT_DYN_CTLN_NUM; j++) + regs->int_intrn[i][j] = + readl(io_base + IAVF_VFINT_ITRN1(i, j)); + + regs->asq_bal = readl(io_base + IAVF_VF_ATQBAL1); + regs->asq_bah = readl(io_base + IAVF_VF_ATQBAH1); + regs->asq_len = readl(io_base + IAVF_VF_ATQLEN1); + regs->asq_head = readl(io_base + IAVF_VF_ATQH1); + regs->asq_tail = readl(io_base + IAVF_VF_ATQT1); + regs->arq_bal = readl(io_base + IAVF_VF_ARQBAL1); + regs->arq_bah = readl(io_base + IAVF_VF_ARQBAH1); + regs->arq_len = readl(io_base + IAVF_VF_ARQLEN1); + regs->arq_head = readl(io_base + IAVF_VF_ARQH1); + regs->arq_tail = readl(io_base + IAVF_VF_ARQT1); + + for (i = 0; i < IAVF_QRX_TAIL_MAX; i++) + regs->rx_tail[i] = readl(io_base + IAVF_QRX_TAIL1(i)); +} + +/** + * ice_vfio_pci_load_regs - Load migration register data + * @ice_vdev: pointer to ice vfio pci core device structure + * @regs: pointer to ice_vfio_pci_regs structure + * + */ +static void +ice_vfio_pci_load_regs(struct ice_vfio_pci_core_device *ice_vdev, + struct ice_vfio_pci_regs *regs) +{ + u8 __iomem *io_base = ice_vdev->io_base; + int i, j; + + writel(regs->int_dyn_ctl0, io_base + IAVF_VFINT_DYN_CTL01); + + for (i = 0; i < IAVF_VFINT_DYN_CTLN_NUM; i++) + writel(regs->int_dyn_ctln[i], + io_base + IAVF_VFINT_DYN_CTLN1(i)); + + for (i = 0; i < IAVF_VFINT_ITRN0_NUM; i++) + writel(regs->int_intr0[i], io_base + IAVF_VFINT_ITRN0(i)); + + for (i = 0; i < IAVF_VFINT_ITRN_NUM; i++) + for (j = 0; j < IAVF_VFINT_DYN_CTLN_NUM; j++) + writel(regs->int_intrn[i][j], + io_base + IAVF_VFINT_ITRN1(i, j)); + + writel(regs->asq_bal, io_base + IAVF_VF_ATQBAL1); + writel(regs->asq_bah, io_base + IAVF_VF_ATQBAH1); + writel(regs->asq_len, io_base + IAVF_VF_ATQLEN1); + writel(regs->asq_head, io_base + IAVF_VF_ATQH1); + writel(regs->asq_tail, io_base + IAVF_VF_ATQT1); + writel(regs->arq_bal, io_base + IAVF_VF_ARQBAL1); + writel(regs->arq_bah, io_base + IAVF_VF_ARQBAH1); + writel(regs->arq_len, io_base + IAVF_VF_ARQLEN1); + writel(regs->arq_head, io_base + IAVF_VF_ARQH1); + writel(regs->arq_tail, io_base + IAVF_VF_ARQT1); + + for (i = 0; i < IAVF_QRX_TAIL_MAX; i++) + writel(regs->rx_tail[i], io_base + IAVF_QRX_TAIL1(i)); +} + +/** + * ice_vfio_pci_load_state - VFIO device state reloading + * @ice_vdev: pointer to ice vfio pci core device structure + * + * Load device state and restore it. This function is called when the VFIO uAPI + * consumer wants to load the device state info from VFIO migration region and + * restore them into the device. This function should make sure all the device + * state info is loaded and restored successfully. As a result, return value is + * mandatory to be checked. + * + * Return 0 for success, negative value for failure. + */ +static int __must_check +ice_vfio_pci_load_state(struct ice_vfio_pci_core_device *ice_vdev) +{ + ice_vfio_pci_load_regs(ice_vdev, ice_vdev->regs); + return 0; +} + +/** + * ice_vfio_pci_save_state - VFIO device state saving + * @ice_vdev: pointer to ice vfio pci core device structure + * + * Snapshot the device state and save it. This function is called when the + * VFIO uAPI consumer wants to snapshot the current device state and saves + * it into the VFIO migration region. This function should make sure all + * of the device state info is collectted and saved successfully. As a + * result, return value is mandatory to be checked. + * + * Return 0 for success, negative value for failure. + */ +static int __must_check +ice_vfio_pci_save_state(struct ice_vfio_pci_core_device *ice_vdev) +{ + ice_vfio_pci_save_regs(ice_vdev, ice_vdev->regs); + ice_vdev->mig_info.pending_bytes = ICE_VFIO_MIG_REGION_DATA_SZ; + return 0; +} + +/** + * ice_vfio_pci_reset_mig - Reset migration status + * @ice_vdev: pointer to ice vfio pci core device structure + * + */ +static void ice_vfio_pci_reset_mig(struct ice_vfio_pci_core_device *ice_vdev) +{ + ice_vdev->mig_info.pending_bytes = 0; +} + +/** + * ice_vfio_pci_set_device_state - Config device state + * @ice_vdev: pointer to ice vfio pci core device structure + * @state: device state + * + * Return 0 for success, negative value for failure. + */ +static int +ice_vfio_pci_set_device_state(struct ice_vfio_pci_core_device *ice_vdev, + u32 state) +{ + struct vfio_device_migration_info *mig_info = &ice_vdev->mig_info; + int ret = 0; + + if (state == mig_info->device_state) + return 0; + + switch (state) { + case VFIO_DEVICE_STATE_RUNNING: + if (mig_info->device_state == VFIO_DEVICE_STATE_RESUMING) + ret = ice_vfio_pci_load_state(ice_vdev); + break; + case VFIO_DEVICE_STATE_RUNNING | VFIO_DEVICE_STATE_SAVING: + break; + case VFIO_DEVICE_STATE_SAVING: + ret = ice_vfio_pci_save_state(ice_vdev); + break; + case VFIO_DEVICE_STATE_STOP: + ice_vfio_pci_reset_mig(ice_vdev); + break; + case VFIO_DEVICE_STATE_RESUMING: + break; + default: + return -EFAULT; + } + + if (!ret) + mig_info->device_state = state; + + return ret; +} + +/** + * ice_vfio_pci_mig_rw_data - Read/write vfio migration data section + * @ice_vdev: pointer to ice vfio pci core device structure + * @buf: buffer for data + * @count: size of buffer + * @offset: read/write offset + * @iswrite: write or not + * + * Return the number of read/write bytes for success, negative value for failure + */ +static ssize_t +ice_vfio_pci_mig_rw_data(struct ice_vfio_pci_core_device *ice_vdev, + char __user *buf, size_t count, u64 offset, + bool iswrite) +{ + struct vfio_device_migration_info *mig_info = &ice_vdev->mig_info; + int ret; + + if (offset + count > ICE_VFIO_MIG_REGION_DATA_SZ) + return -EINVAL; + + if (iswrite) { + ret = copy_from_user((u8 *)ice_vdev->regs + offset, buf, count); + if (ret) + return -EFAULT; + } else { + ret = copy_to_user(buf, (u8 *)ice_vdev->regs + offset, count); + if (ret) + return -EFAULT; + + mig_info->pending_bytes -= count; + } + + return count; +} + +/** + * ice_vfio_pci_mig_rw_device_state - Read/write vfio migration device_state + * @ice_vdev: pointer to ice vfio pci core device structure + * @buf: buffer for data + * @count: size of buffer + * @iswrite: write or not + * + * Return the number of read/write bytes for success, negative value for failure + */ +static ssize_t +ice_vfio_pci_mig_rw_device_state(struct ice_vfio_pci_core_device *ice_vdev, + char __user *buf, size_t count, bool iswrite) +{ + int ret; + + if (count != sizeof(ice_vdev->mig_info.device_state)) + return -EINVAL; + + if (iswrite) { + u32 device_state; + + ret = copy_from_user(&device_state, buf, count); + if (ret) + return -EFAULT; + + ret = ice_vfio_pci_set_device_state(ice_vdev, device_state); + if (ret) + return ret; + } else { + ret = copy_to_user(buf, &ice_vdev->mig_info.device_state, + count); + if (ret) + return -EFAULT; + } + + return count; +} + +/** + * ice_vfio_pci_mig_rw_pending_bytes - read/write vfio migration pending_bytes + * @ice_vdev: pointer to ice vfio pci core device structure + * @buf: buffer for data + * @count: size of buffer + * @iswrite: write or not + * + * Return the number of read/write bytes for success, negative value for failure + */ +static ssize_t +ice_vfio_pci_mig_rw_pending_bytes(struct ice_vfio_pci_core_device *ice_vdev, + char __user *buf, size_t count, bool iswrite) +{ + int ret; + + if (count != sizeof(ice_vdev->mig_info.pending_bytes)) + return -EINVAL; + + if (iswrite) + return -EFAULT; + + ret = copy_to_user(buf, &ice_vdev->mig_info.pending_bytes, count); + if (ret) + return -EFAULT; + + return count; +} + +/** + * ice_vfio_pci_mig_rw_data_offset - Read/write vfio migration data_offset + * @ice_vdev: pointer to ice vfio pci core device structure + * @buf: buffer for data + * @count: size of buffer + * @iswrite: write or not + * + * Return the number of read/write bytes for success, negative value for failure + */ +static ssize_t +ice_vfio_pci_mig_rw_data_offset(struct ice_vfio_pci_core_device *ice_vdev, + char __user *buf, size_t count, bool iswrite) +{ + int ret; + + if (count != sizeof(ice_vdev->mig_info.data_offset)) + return -EINVAL; + + if (iswrite) + return -EFAULT; + + ret = copy_to_user(buf, &ice_vdev->mig_info.data_offset, count); + if (ret) + return -EFAULT; + + return count; +} + +/** + * ice_vfio_pci_mig_rw_data_size - Read/write vfio migration data_size + * @ice_vdev: pointer to ice vfio pci core device structure + * @buf: buffer for data + * @count: size of buffer + * @iswrite: write or not + * + * Return the number of read/write bytes for success, negative value for failure + */ +static ssize_t +ice_vfio_pci_mig_rw_data_size(struct ice_vfio_pci_core_device *ice_vdev, + char __user *buf, size_t count, bool iswrite) +{ + struct vfio_device_migration_info *mig_info = &ice_vdev->mig_info; + u64 data_size; + int ret; + + if (count != sizeof(ice_vdev->mig_info.data_size)) + return -EINVAL; + + if (iswrite) { + ret = copy_from_user(&data_size, buf, count); + if (ret) + return -EFAULT; + + /* The user application should write the size in bytes of the + * data copied in the migration region during the _RESUMING + * state + */ + if (mig_info->device_state != VFIO_DEVICE_STATE_RESUMING) + return -EINVAL; + + if (data_size != ICE_VFIO_MIG_REGION_DATA_SZ) + return -EINVAL; + } else { + /* The user application should read data_size to get the size + * in bytes of the data copied in the migration region during + * the _SAVING state + */ + if (mig_info->device_state != VFIO_DEVICE_STATE_SAVING) + return -EINVAL; + + ret = copy_to_user(buf, &mig_info->data_size, count); + if (ret) + return -EFAULT; + } + + return count; +} + +/** + * ice_vfio_pci_mig_rw - Callback for vfio pci region read or write + * @vdev: pointer to vfio pci core device structure + * @buf: buffer for data + * @count: size of buffer + * @ppos: file position offset + * @iswrite: write or not + * + * This is a callback function used by vfio framework to read or write the + * vfio region for the live migration. + * + * Return the number of read/write bytes for success, negative value for failure + */ +static ssize_t +ice_vfio_pci_mig_rw(struct vfio_pci_core_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite) +{ + struct ice_vfio_pci_core_device *ice_vdev = container_of(vdev, + struct ice_vfio_pci_core_device, core_device); + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - + VFIO_PCI_NUM_REGIONS; + struct vfio_pci_region *region = &vdev->region[index]; + u64 pos = *ppos & VFIO_PCI_OFFSET_MASK; + int ret; + + if (region->type != VFIO_REGION_TYPE_MIGRATION || + region->subtype != VFIO_REGION_SUBTYPE_MIGRATION) + return -EINVAL; + + if (pos >= ice_vdev->mig_info.data_offset) + return ice_vfio_pci_mig_rw_data(ice_vdev, buf, count, + pos - ice_vdev->mig_info.data_offset, iswrite); + + switch (pos) { + case VFIO_DEVICE_MIGRATION_OFFSET(device_state): + ret = ice_vfio_pci_mig_rw_device_state(ice_vdev, buf, + count, iswrite); + break; + case VFIO_DEVICE_MIGRATION_OFFSET(pending_bytes): + ret = ice_vfio_pci_mig_rw_pending_bytes(ice_vdev, buf, + count, iswrite); + break; + case VFIO_DEVICE_MIGRATION_OFFSET(data_offset): + ret = ice_vfio_pci_mig_rw_data_offset(ice_vdev, buf, + count, iswrite); + break; + case VFIO_DEVICE_MIGRATION_OFFSET(data_size): + ret = ice_vfio_pci_mig_rw_data_size(ice_vdev, buf, + count, iswrite); + break; + default: + ret = -EFAULT; + break; + } + + return ret; +} + +/** + * ice_vfio_pci_mig_release - Callback for vfio pci region release + * @vdev: pointer to vfio pci core device structure + * @region: pointer to vfio pci region + * + * This is a callback function used by vfio framework to info the driver that + * they will release the vfio region. + * + * Returns 0 on success, negative value on error + */ +static void +ice_vfio_pci_mig_release(struct vfio_pci_core_device *vdev, + struct vfio_pci_region *region) +{ +} + +static struct vfio_pci_regops ice_vfio_pci_regops = { + .rw = ice_vfio_pci_mig_rw, + .release = ice_vfio_pci_mig_release, +}; + +/** + * ice_vfio_migration_init - Initialization for live migration function + * @ice_vdev: pointer to ice vfio pci core device structure + * + * Returns 0 on success, negative value on error + */ +static int ice_vfio_migration_init(struct ice_vfio_pci_core_device *ice_vdev) +{ + struct vfio_device_migration_info *mig_info = &ice_vdev->mig_info; + struct pci_dev *pdev = ice_vdev->core_device.pdev; + int ret = 0; + + ice_vdev->regs = kzalloc(ICE_VFIO_MIG_REGION_DATA_SZ, GFP_KERNEL); + if (!ice_vdev->regs) + return -ENOMEM; + + mig_info->data_size = ICE_VFIO_MIG_REGION_DATA_SZ; + mig_info->data_offset = ICE_VFIO_MIG_REGION_INFO_SZ; + + ice_vdev->vf_handle = ice_migration_get_vf(pdev); + if (!ice_vdev->vf_handle) { + ret = -EFAULT; + goto err_get_vf_handle; + } + + ice_vdev->io_base = (u8 __iomem *)pci_iomap(pdev, 0, 0); + if (!ice_vdev->io_base) { + ret = -EFAULT; + goto err_pci_iomap; + } + + ret = vfio_pci_register_dev_region(&ice_vdev->core_device, + VFIO_REGION_TYPE_MIGRATION, + VFIO_REGION_SUBTYPE_MIGRATION, + &ice_vfio_pci_regops, + ICE_VFIO_MIG_REGION_INFO_SZ + + ICE_VFIO_MIG_REGION_DATA_SZ, + VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE, + NULL); + if (ret) + goto err_dev_region_register; + + return ret; + +err_dev_region_register: + pci_iounmap(pdev, ice_vdev->io_base); +err_get_vf_handle: +err_pci_iomap: + kfree(ice_vdev->regs); + + return ret; +} + +/** + * ice_vfio_migration_uninit - Cleanup for live migration function + * @ice_vdev: pointer to ice vfio pci core device structure + */ +static void ice_vfio_migration_uninit(struct ice_vfio_pci_core_device *ice_vdev) +{ + pci_iounmap(ice_vdev->core_device.pdev, ice_vdev->io_base); + kfree(ice_vdev->regs); +} + +/** + * ice_vfio_pci_open_device - Called when a vfio device is probed by VFIO UAPI + * @core_vdev: the vfio device to open + * + * Initialization of the vfio device + * + * Returns 0 on success, negative value on error + */ +static int ice_vfio_pci_open_device(struct vfio_device *core_vdev) +{ + struct ice_vfio_pci_core_device *ice_vdev = container_of(core_vdev, + struct ice_vfio_pci_core_device, core_device.vdev); + struct vfio_pci_core_device *vdev = &ice_vdev->core_device; + int ret; + + ret = vfio_pci_core_enable(vdev); + if (ret) + return ret; + + ret = ice_vfio_migration_init(ice_vdev); + if (ret) { + vfio_pci_core_disable(vdev); + return ret; + } + + vfio_pci_core_finish_enable(vdev); + + return 0; +} + +/** + * ice_vfio_pci_close_device - Called when a vfio device fd is closed + * @core_vdev: the vfio device to close + */ +static void ice_vfio_pci_close_device(struct vfio_device *core_vdev) +{ + struct ice_vfio_pci_core_device *ice_vdev = container_of(core_vdev, + struct ice_vfio_pci_core_device, core_device.vdev); + + vfio_pci_core_close_device(core_vdev); + ice_vfio_migration_uninit(ice_vdev); +} + +static const struct vfio_device_ops ice_vfio_pci_ops = { + .name = "ice-vfio-pci", + .open_device = ice_vfio_pci_open_device, + .close_device = ice_vfio_pci_close_device, + .read = vfio_pci_core_read, + .write = vfio_pci_core_write, + .ioctl = vfio_pci_core_ioctl, + .mmap = vfio_pci_core_mmap, + .request = vfio_pci_core_request, + .match = vfio_pci_core_match, +}; + +/** + * ice_vfio_pci_probe - Device initialization routine + * @pdev: PCI device information struct + * @id: entry in ice_vfio_pci_table + * + * Returns 0 on success, negative on failure + */ +static int +ice_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct ice_vfio_pci_core_device *ice_vdev; + int ret; + + ice_vdev = kzalloc(sizeof(*ice_vdev), GFP_KERNEL); + if (!ice_vdev) + return -ENOMEM; + + vfio_pci_core_init_device(&ice_vdev->core_device, pdev, + &ice_vfio_pci_ops); + + ret = vfio_pci_core_register_device(&ice_vdev->core_device); + if (ret) + goto out_free; + + dev_set_drvdata(&pdev->dev, ice_vdev); + + return 0; + +out_free: + vfio_pci_core_uninit_device(&ice_vdev->core_device); + kfree(ice_vdev); + return ret; +} + +/** + * ice_vfio_pci_remove - Device removal routine + * @pdev: PCI device information struct + */ +static void ice_vfio_pci_remove(struct pci_dev *pdev) +{ + struct ice_vfio_pci_core_device *ice_vdev = + (struct ice_vfio_pci_core_device *)dev_get_drvdata(&pdev->dev); + + vfio_pci_core_unregister_device(&ice_vdev->core_device); + vfio_pci_core_uninit_device(&ice_vdev->core_device); + kfree(ice_vdev); +} + +/* ice_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id ice_vfio_pci_table[] = { + { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x1889) }, + {} +}; +MODULE_DEVICE_TABLE(pci, ice_vfio_pci_table); + +static struct pci_driver ice_vfio_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = ice_vfio_pci_table, + .probe = ice_vfio_pci_probe, + .remove = ice_vfio_pci_remove, + .err_handler = &vfio_pci_core_err_handlers, +}; + +/** + * ice_vfio_pci_init - Driver registration routine + * + * ice_vfio_pci_init is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + */ +static int __init ice_vfio_pci_init(void) +{ + int ret; + + /* Register and scan for devices */ + ret = pci_register_driver(&ice_vfio_pci_driver); + if (ret) + return ret; + + return 0; +} +module_init(ice_vfio_pci_init); + +/** + * ice_vfio_pci_exit - Driver exit cleanup routine + * + * ice_vfio_pci_exit is called just before the driver is removed + * from memory. + */ +static void __exit ice_vfio_pci_exit(void) +{ + pci_unregister_driver(&ice_vfio_pci_driver); +} +module_exit(ice_vfio_pci_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/thirdparty/ice/ice_virtchnl_pf.c b/drivers/thirdparty/ice/ice_virtchnl.c similarity index 53% rename from drivers/thirdparty/ice/ice_virtchnl_pf.c rename to drivers/thirdparty/ice/ice_virtchnl.c index 52fd9c37fa38..c1136ea5a7aa 100644 --- a/drivers/thirdparty/ice/ice_virtchnl_pf.c +++ b/drivers/thirdparty/ice/ice_virtchnl.c @@ -1,17 +1,18 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2018-2021, Intel Corporation. */ +#include "ice_virtchnl.h" +#include "ice_vf_lib_private.h" #include "ice.h" #include "ice_base.h" #include "ice_lib.h" #include "ice_fltr.h" -#include "ice_dcb_lib.h" -#include "ice_eswitch.h" #include "ice_virtchnl_allowlist.h" #include "ice_vf_vsi_vlan_ops.h" #include "ice_vlan.h" +#include "ice_vf_adq.h" #include "ice_flex_pipe.h" -#include "ice_tc_lib.h" +#include "ice_dcb_lib.h" #define FIELD_SELECTOR(proto_hdr_field) \ BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK) @@ -49,6 +50,9 @@ static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = { ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0}, {VIRTCHNL_PROTO_HDR_L2TPV2, ICE_FLOW_SEG_HDR_L2TPV2}, {VIRTCHNL_PROTO_HDR_PPP, ICE_FLOW_SEG_HDR_PPP}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, ICE_FLOW_SEG_HDR_IPV_FRAG}, + {VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG, ICE_FLOW_SEG_HDR_IPV_FRAG}, + {VIRTCHNL_PROTO_HDR_GRE, ICE_FLOW_SEG_HDR_GRE}, }; struct ice_vc_hash_field_match_type { @@ -100,8 +104,125 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list[] = { FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, - {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), + {VIRTCHNL_PROTO_HDR_IPV4, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID)}, + {VIRTCHNL_PROTO_HDR_IPV4, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST), + ICE_FLOW_HASH_IPV4}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), + ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC), BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)}, {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST), @@ -123,6 +244,9 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list[] = { ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID)}, {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC) | FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST), @@ -159,6 +283,25 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list[] = { FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) | FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT), ICE_FLOW_HASH_TCP_PORT}, + {VIRTCHNL_PROTO_HDR_TCP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_TCP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_TCP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_TCP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM), + ICE_FLOW_HASH_TCP_PORT | + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)}, {VIRTCHNL_PROTO_HDR_UDP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT), BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)}, @@ -169,6 +312,25 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list[] = { FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) | FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT), ICE_FLOW_HASH_UDP_PORT}, + {VIRTCHNL_PROTO_HDR_UDP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_UDP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_UDP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_UDP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM), + ICE_FLOW_HASH_UDP_PORT | + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)}, {VIRTCHNL_PROTO_HDR_SCTP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT), BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)}, @@ -179,6 +341,25 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list[] = { FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) | FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT), ICE_FLOW_HASH_SCTP_PORT}, + {VIRTCHNL_PROTO_HDR_SCTP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_SCTP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_SCTP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_SCTP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM), + ICE_FLOW_HASH_SCTP_PORT | + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)}, {VIRTCHNL_PROTO_HDR_PPPOE, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID), BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)}, @@ -201,130 +382,14 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list[] = { FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ECPRI_PC_RTC_ID), BIT_ULL(ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID) | BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID)}, + {VIRTCHNL_PROTO_HDR_L2TPV2, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV2_SESS_ID), + BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID)}, + {VIRTCHNL_PROTO_HDR_L2TPV2, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV2_LEN_SESS_ID), + BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID)}, }; -/** - * ice_get_vf_vsi - get VF's VSI based on the stored index - * @vf: VF used to get VSI - */ -struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf) -{ - return vf->pf->vsi[vf->lan_vsi_idx]; -} - -static struct ice_vsi *ice_get_vf_adq_vsi(struct ice_vf *vf, u8 tc) -{ - return vf->pf->vsi[vf->ch[tc].vsi_idx]; -} - -/** - * ice_is_vf_adq_ena - is VF ADQ enabled - * @vf: pointer to the VF info - * - * This function returns true if VF ADQ is enabled. It is must to check - * VF's num_tc as well, it must be more than ICE_VF_CHNL_START_TC for - * valid ADQ configuration - */ -static bool ice_is_vf_adq_ena(struct ice_vf *vf) -{ - return vf->adq_enabled && (vf->num_tc > ICE_VF_CHNL_START_TC); -} - -/** - * ice_vf_adq_vsi_stop_rings - stops the VF ADQ VSI rings - * @vf: pointer to the VF info - * @tc: VF ADQ TC number - * - * This function stops Tx and Rx ring specific to VF ADQ VSI - */ -static void ice_vf_adq_vsi_stop_rings(struct ice_vf *vf, int tc) -{ - struct ice_vsi *vsi = ice_get_vf_adq_vsi(vf, tc); - - if (!vsi) - return; - ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); - ice_vsi_stop_all_rx_rings(vsi); -} - -/** - * ice_vf_adq_vsi_disable_txqs - disable Tx queues for VF ADQ - * @vf: pointer to the VF info - * @tc: VF ADQ TC number - * - * This function disabled Tx queues specific to VF ADQ VSI - */ -static void ice_vf_adq_vsi_disable_txqs(struct ice_vf *vf, int tc) -{ - struct ice_vsi *vsi = ice_get_vf_adq_vsi(vf, tc); - - if (!vsi) - return; - ice_dis_vsi_txq(vsi->port_info, vf->ch[tc].vsi_idx, 0, 0, NULL, NULL, - NULL, ICE_VF_RESET, vf->vf_id, NULL); -} - -/** - * ice_validate_vf_id - helper to check if VF ID is valid - * @pf: pointer to the PF structure - * @vf_id: the ID of the VF to check - */ -static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id) -{ - /* vf_id range is only valid for 0-255, and should always be unsigned */ - if (vf_id >= pf->num_alloc_vfs) { - dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id); - return -EINVAL; - } - return 0; -} - -/** - * ice_check_vf_init - helper to check if VF init complete - * @pf: pointer to the PF structure - * @vf: the pointer to the VF to check - */ -static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf) -{ - if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { - dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n", - vf->vf_id); - return -EBUSY; - } - return 0; -} - -/** - * ice_err_to_virt_err - translate errors for VF return code - * @ice_err: error return code - */ -static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err) -{ - switch (ice_err) { - case ICE_SUCCESS: - return VIRTCHNL_STATUS_SUCCESS; - case ICE_ERR_BAD_PTR: - case ICE_ERR_INVAL_SIZE: - case ICE_ERR_DEVICE_NOT_SUPPORTED: - case ICE_ERR_PARAM: - case ICE_ERR_CFG: - return VIRTCHNL_STATUS_ERR_PARAM; - case ICE_ERR_NO_MEMORY: - return VIRTCHNL_STATUS_ERR_NO_MEMORY; - case ICE_ERR_NOT_READY: - case ICE_ERR_RESET_FAILED: - case ICE_ERR_FW_API_VER: - case ICE_ERR_AQ_ERROR: - case ICE_ERR_AQ_TIMEOUT: - case ICE_ERR_AQ_FULL: - case ICE_ERR_AQ_NO_WORK: - case ICE_ERR_AQ_EMPTY: - return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; - default: - return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; - } -} - /** * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF * @pf: pointer to the PF structure @@ -333,16 +398,16 @@ static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err) * @msg: pointer to the msg buffer * @msglen: msg length */ -static void +void ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode, enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) { struct ice_hw *hw = &pf->hw; - unsigned int i; - - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; + struct ice_vf *vf; + unsigned int bkt; + mutex_lock(&pf->vfs.table_lock); + ice_for_each_vf(pf, bkt, vf) { /* Not all vfs are enabled so skip the ones that are not */ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) @@ -354,6 +419,7 @@ ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode, ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg, msglen, NULL); } + mutex_unlock(&pf->vfs.table_lock); } /** @@ -381,52 +447,6 @@ ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe, } } -/** - * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled - * @vf: the VF to check - * - * Returns true if the VF has no Rx and no Tx queues enabled and returns false - * otherwise - */ -static bool ice_vf_has_no_qs_ena(struct ice_vf *vf) -{ - return (!bitmap_weight(vf->rxq_ena, ICE_MAX_QS_PER_VF) && - !bitmap_weight(vf->txq_ena, ICE_MAX_QS_PER_VF)); -} - -/** - * ice_vf_get_port_info - Get the VF's port info structure - * @vf: VF used to get the port info structure for - */ -static struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf) -{ - return vf->pf->hw.port_info; -} - -/** - * ice_is_vf_link_up - check if the VF's link is up - * @vf: VF to check if link is up - */ -static bool ice_is_vf_link_up(struct ice_vf *vf) -{ - struct ice_port_info *pi = ice_vf_get_port_info(vf); - struct ice_pf *pf = vf->pf; - - if (ice_check_vf_init(pf, vf)) - return false; - - if (test_bit(ICE_BAD_EEPROM, pf->state)) - return false; - - if (ice_vf_has_no_qs_ena(vf)) - return false; - else if (vf->link_forced) - return vf->link_up; - else - return pi->phy.link_info.link_info & - ICE_AQ_LINK_UP; -} - /** * ice_vc_notify_vf_link_state - Inform a VF of link status * @vf: pointer to the VF structure @@ -455,2069 +475,19 @@ void ice_vc_notify_vf_link_state(struct ice_vf *vf) sizeof(pfe), NULL); } -/** - * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access - * @vf: VF to remove access to VSI for - */ -static void ice_vf_invalidate_vsi(struct ice_vf *vf) -{ - vf->lan_vsi_idx = ICE_NO_VSI; - vf->lan_vsi_num = ICE_NO_VSI; -} - -/** - * ice_vf_vsi_release - invalidate the VF's VSI after freeing it - * @vf: invalidate this VF's VSI after freeing it - */ -static void ice_vf_vsi_release(struct ice_vf *vf) -{ - ice_vsi_release(ice_get_vf_vsi(vf)); - ice_vf_invalidate_vsi(vf); -} - -/** - * ice_vf_adq_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access - * @vf: VF that ADQ VSI is being invalidated on - * @tc: TC used to access channel specific vsi_idx/vsi_num - */ -static void ice_vf_adq_invalidate_vsi(struct ice_vf *vf, u8 tc) -{ - vf->ch[tc].vsi_idx = ICE_NO_VSI; - vf->ch[tc].vsi_num = ICE_NO_VSI; -} - -/** - * ice_vf_adq_vsi_valid - is ADQ VSI valid? - * @vf: VF that ADQ VSI is being validated - * @tc: TC used to access channel specific vsi_idx/vsi_num - * - * vsi_idx must be non-zero, and vsi_idx and vsi_num must not be ICE_NO_VSI - */ -static bool ice_vf_adq_vsi_valid(struct ice_vf *vf, u8 tc) -{ - return (vf->ch[tc].vsi_idx && vf->ch[tc].vsi_idx != ICE_NO_VSI && - vf->ch[tc].vsi_num != ICE_NO_VSI); -} - -/** - * ice_vf_adq_vsi_release - release VF ADQ VSI resources - * @vf: VF that ADQ VSI is being released on - * @tc: TC used to access channel specific VSI - * - * This function stops Tx and Rx queues if specified, disables Tx queues if - * specified, releases VSI resources, and invalidates it - * - */ -static void ice_vf_adq_vsi_release(struct ice_vf *vf, u8 tc) -{ - ice_vsi_release(ice_get_vf_adq_vsi(vf, tc)); - ice_vf_adq_invalidate_vsi(vf, tc); -} - -/** - * ice_vf_adq_cfg_cleanup - invalidate the VF's channel software info - * @vf: VF that ADQ VSI is being released on - * @tc: TC used to access channel specific VSI - * - * This function invalidates software data structures specific to channel - * such as num_qps, tx_rate, etc... This is called from places like: - * when ADQ VSI is released either from rebuild path "ice_vf_adq_release" - * or during rebuild ADQ config if failed to create/setup VF ADQ VSIs - */ -static void ice_vf_adq_cfg_cleanup(struct ice_vf *vf, u8 tc) -{ - vf->ch[tc].num_qps = 0; - vf->ch[tc].offset = 0; - vf->ch[tc].max_tx_rate = 0; - /* since this function is called from places where - * VF ADQ VSI are cleanup from HW, it's OK to clear - * VF ADQ filter_type to be INVALID. - * Remember VF ADQ filter are replayed by VF driver - * as needed - */ - vf->ch[tc].fltr_type = ICE_CHNL_FLTR_TYPE_INVALID; -} - -#ifdef HAVE_TC_SETUP_CLSFLOWER -/** - * ice_del_all_adv_switch_fltr - * @vf: pointer to the VF info - * - * This function deletes all advanced switch filters specific to the VF and - * releases filter memory and updates all book-keeping. This function to be - * used when delete channel message is received before deleting channel VSIs - */ -static void ice_del_all_adv_switch_fltr(struct ice_vf *vf) -{ - struct ice_rule_query_data rule; - struct ice_tc_flower_fltr *f; - struct ice_pf *pf = vf->pf; - struct hlist_node *node; - struct device *dev; - int err; - int i; - - dev = ice_pf_to_dev(pf); - hlist_for_each_entry_safe(f, node, &vf->tc_flower_fltr_list, - tc_flower_node) { - if (!f->dest_vsi) - continue; - - /* Deleting TC filter */ - rule.rid = f->rid; - rule.rule_id = f->rule_id; - rule.vsi_handle = f->dest_id; - err = ice_rem_adv_rule_by_id(&pf->hw, &rule); - if (err) { - if (err == ICE_ERR_DOES_NOT_EXIST) - dev_dbg(dev, "VF %d: filter (rule_id %u) for dest VSI %u DOES NOT EXIST in hw table\n", - vf->vf_id, f->rule_id, f->dest_id); - else - dev_err(dev, "VF %d: Failed to delete switch filter for VSI handle %u, err %d\n", - vf->vf_id, f->dest_id, err); - } - - /* book-keeping and update filter type if filter count - * reached zero - */ - f->dest_vsi->num_chnl_fltr--; - hlist_del(&f->tc_flower_node); - devm_kfree(dev, f); - vf->num_dmac_chnl_fltrs--; - } - - /* Reset VF channel filter type to be INVALID */ - for (i = 1; i < vf->num_tc; i++) - vf->ch[i].fltr_type = ICE_CHNL_FLTR_TYPE_INVALID; -} -#endif /* HAVE_TC_SETUP_CLSFLOWER */ - -/** - * ice_vf_adq_release - perform VF ADQ resource cleanup only - * @vf: pointer to the VF structure - * - * Delete all VF ADQ filters, release VF ADQ VSIs, cleanup internal data - * structues which keeps track of per TC infor including TC0. This function - * is invoked only when VFLR based VF Reset. - */ -static void ice_vf_adq_release(struct ice_vf *vf) -{ - u8 tc; - - /* no ADQ configured, nothing to do */ - if (!ice_is_vf_adq_ena(vf)) - return; - -#ifdef HAVE_TC_SETUP_CLSFLOWER - /* release VF ADQ specific filters and eventually VF driver - * will trigger replay of VF ADQ filters as needed, just like - * other MAC, VLAN filters - */ - ice_del_all_adv_switch_fltr(vf); -#endif - - for (tc = ICE_VF_CHNL_START_TC; tc < vf->num_tc; tc++) { - if (!ice_vf_adq_vsi_valid(vf, tc)) - continue; - /* Tx queues are disabled before VF reset is scheduled as part - * of VFLR flow. Disabling TX queues again causes error - * such as EINVAL from admin command because underlying - * scheduler configs are cleared as part of disabling once - */ - if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) - ice_vf_adq_vsi_stop_rings(vf, tc); - ice_vf_adq_vsi_release(vf, tc); - /* clear per TC info to avoid stale information such as - * num_qps, tx_rate, etc... - */ - ice_vf_adq_cfg_cleanup(vf, tc); - } - - /* to avoid rebuilding of VF ADQ VSIs by mistake */ - vf->adq_enabled = false; - vf->num_tc = 0; - - /* main VF VSI should be built with default, hence clear related - * data structures otherwise vf->ch[0].num_qps and tx_rate will - * still have stale information as stored from "add channel" - * virtchnl message - */ - ice_vf_adq_cfg_cleanup(vf, 0); -} - -/** - * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access - * @vf: VF that control VSI is being invalidated on - */ -static void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf) -{ - vf->ctrl_vsi_idx = ICE_NO_VSI; -} - -/** - * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it - * @vf: VF that control VSI is being released on - */ -static void ice_vf_ctrl_vsi_release(struct ice_vf *vf) -{ - ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]); - ice_vf_ctrl_invalidate_vsi(vf); -} - -/** - * ice_free_vf_res - Free a VF's resources - * @vf: pointer to the VF info - */ -static void ice_free_vf_res(struct ice_vf *vf) -{ - struct ice_pf *pf = vf->pf; - int i, last_vector_idx; - - /* First, disable VF's configuration API to prevent OS from - * accessing the VF's VSI after it's freed or invalidated. - */ - clear_bit(ICE_VF_STATE_INIT, vf->vf_states); - ice_vf_fdir_exit(vf); - /* free VF control VSI */ - if (vf->ctrl_vsi_idx != ICE_NO_VSI) - ice_vf_ctrl_vsi_release(vf); - - /* free VSI and disconnect it from the parent uplink */ - if (vf->lan_vsi_idx != ICE_NO_VSI) { - ice_vf_vsi_release(vf); - vf->num_mac = 0; - } - - last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1; - - /* clear VF MDD event information */ - memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); - memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); - - ice_vf_adq_release(vf); - - /* Disable interrupts so that VF starts in a known state */ - for (i = vf->first_vector_idx; i <= last_vector_idx; i++) { - wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M); - ice_flush(&pf->hw); - } - /* reset some of the state variables keeping track of the resources */ - clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); - clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); -} - -/** - * ice_dis_vf_mappings - * @vf: pointer to the VF structure - */ -static void ice_dis_vf_mappings(struct ice_vf *vf) -{ - struct ice_pf *pf = vf->pf; - struct ice_vsi *vsi; - struct device *dev; - int first, last, v; - struct ice_hw *hw; - - hw = &pf->hw; - vsi = ice_get_vf_vsi(vf); - - dev = ice_pf_to_dev(pf); - wr32(hw, VPINT_ALLOC(vf->vf_id), 0); - wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0); - - first = vf->first_vector_idx; - last = first + pf->num_msix_per_vf - 1; - for (v = first; v <= last; v++) { - u32 reg; - - reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) & - GLINT_VECT2FUNC_IS_PF_M) | - ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & - GLINT_VECT2FUNC_PF_NUM_M)); - wr32(hw, GLINT_VECT2FUNC(v), reg); - } - - if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) - wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0); - else - dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n"); - - if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) - wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0); - else - dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n"); -} - -/** - * ice_sriov_free_msix_res - Reset/free any used MSIX resources - * @pf: pointer to the PF structure - * - * Since no MSIX entries are taken from the pf->irq_tracker then just clear - * the pf->sriov_base_vector. - * - * Returns 0 on success, and -EINVAL on error. - */ -static int ice_sriov_free_msix_res(struct ice_pf *pf) -{ - struct ice_res_tracker *res; - - if (!pf) - return -EINVAL; - - res = pf->irq_tracker; - if (!res) - return -EINVAL; - - /* give back irq_tracker resources used */ - WARN_ON(pf->sriov_base_vector < res->num_entries); - - pf->sriov_base_vector = 0; - - return 0; -} - -/** - * ice_set_vf_state_qs_dis - Set VF queues state to disabled - * @vf: pointer to the VF structure - */ -void ice_set_vf_state_qs_dis(struct ice_vf *vf) -{ - /* Clear Rx/Tx enabled queues flag */ - bitmap_zero(vf->txq_ena, ICE_MAX_QS_PER_VF); - bitmap_zero(vf->rxq_ena, ICE_MAX_QS_PER_VF); - clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); -} - -/** - * ice_dis_vf_qs - Disable the VF queues - * @vf: pointer to the VF structure - */ -static void ice_dis_vf_qs(struct ice_vf *vf) -{ - struct ice_vsi *vsi = ice_get_vf_vsi(vf); - - ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); - ice_vsi_stop_all_rx_rings(vsi); - /* Likewise if VF ADQ is enabled, stop Tx and Rx rings of VF ADQ VSI */ - if (ice_is_vf_adq_ena(vf)) { - int tc; - - for (tc = ICE_VF_CHNL_START_TC; tc < vf->num_tc; tc++) { - if (!ice_vf_adq_vsi_valid(vf, tc)) - continue; - vsi = ice_get_vf_adq_vsi(vf, tc); - ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); - ice_vsi_stop_all_rx_rings(vsi); - } - } - ice_set_vf_state_qs_dis(vf); -} - -/** - * ice_free_vfs - Free all VFs - * @pf: pointer to the PF structure - */ -void ice_free_vfs(struct ice_pf *pf) -{ - struct device *dev = ice_pf_to_dev(pf); - struct ice_hw *hw = &pf->hw; - unsigned int tmp, i; - - if (!pf->vf) - return; - - ice_eswitch_release(pf); - - while (test_and_set_bit(ICE_VF_DIS, pf->state)) - usleep_range(1000, 2000); - - - /* Disable IOV before freeing resources. This lets any VF drivers - * running in the host get themselves cleaned up before we yank - * the carpet out from underneath their feet. - */ - if (!pci_vfs_assigned(pf->pdev)) - pci_disable_sriov(pf->pdev); - else - dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); - - if (ice_dcf_get_state(pf) != ICE_DCF_STATE_OFF) { - ice_rm_all_dcf_sw_rules(pf); - ice_dcf_set_state(pf, ICE_DCF_STATE_OFF); - pf->dcf.vf = NULL; - } - - /* Avoid wait time by stopping all VFs at the same time */ - ice_for_each_vf(pf, i) - if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states)) - ice_dis_vf_qs(&pf->vf[i]); - - tmp = pf->num_alloc_vfs; - pf->num_qps_per_vf = 0; - pf->num_alloc_vfs = 0; - for (i = 0; i < tmp; i++) { - if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) { - /* disable VF qp mappings and set VF disable state */ - ice_dis_vf_mappings(&pf->vf[i]); - set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states); - ice_free_vf_res(&pf->vf[i]); - } - } - - if (ice_sriov_free_msix_res(pf)) - dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n"); - - devm_kfree(dev, pf->vf); - pf->vf = NULL; - - /* This check is for when the driver is unloaded while VFs are - * assigned. Setting the number of VFs to 0 through sysfs is caught - * before this function ever gets called. - */ - if (!pci_vfs_assigned(pf->pdev)) { - unsigned int vf_id; - - /* Acknowledge VFLR for all VFs. Without this, VFs will fail to - * work correctly when SR-IOV gets re-enabled. - */ - for (vf_id = 0; vf_id < tmp; vf_id++) { - u32 reg_idx, bit_idx; - - reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; - bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; - wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); - } - } - - /* clear malicious info if the VFs are getting released */ - for (i = 0; i < tmp; i++) - if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, i)) - dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i); - - clear_bit(ICE_VF_DIS, pf->state); - clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags); -} - -/** - * ice_trigger_vf_reset - Reset a VF on HW - * @vf: pointer to the VF structure - * @is_vflr: true if VFLR was issued, false if not - * @is_pfr: true if the reset was triggered due to a previous PFR - * - * Trigger hardware to start a reset for a particular VF. Expects the caller - * to wait the proper amount of time to allow hardware to reset the VF before - * it cleans up and restores VF functionality. - */ -static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr) -{ - struct ice_pf *pf = vf->pf; - u32 reg, reg_idx, bit_idx; - unsigned int vf_abs_id, i; - struct device *dev; - struct ice_hw *hw; - - dev = ice_pf_to_dev(pf); - hw = &pf->hw; - vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; - - /* Inform VF that it is no longer active, as a warning */ - clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); - - /* Disable VF's configuration API during reset. The flag is re-enabled - * when it's safe again to access VF's VSI. - */ - clear_bit(ICE_VF_STATE_INIT, vf->vf_states); - - /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver - * needs to clear them in the case of VFR/VFLR. If this is done for - * PFR, it can mess up VF resets because the VF driver may already - * have started cleanup by the time we get here. - */ - if (!is_pfr) { - wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0); - wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0); - } - - /* In the case of a VFLR, the HW has already reset the VF and we - * just need to clean up, so don't hit the VFRTRIG register. - */ - if (!is_vflr) { - /* reset VF using VPGEN_VFRTRIG reg */ - reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); - reg |= VPGEN_VFRTRIG_VFSWR_M; - wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); - } - /* clear the VFLR bit in GLGEN_VFLRSTAT */ - reg_idx = (vf_abs_id) / 32; - bit_idx = (vf_abs_id) % 32; - wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); - ice_flush(hw); - - wr32(hw, PF_PCI_CIAA, - VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S)); - for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) { - reg = rd32(hw, PF_PCI_CIAD); - /* no transactions pending so stop polling */ - if ((reg & VF_TRANS_PENDING_M) == 0) - break; - - dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id); - udelay(ICE_PCI_CIAD_WAIT_DELAY_US); - } -} - -/** - * ice_vf_vsi_setup - Set up a VF VSI - * @vf: VF to setup VSI for - * - * Returns pointer to the successfully allocated VSI struct on success, - * otherwise returns NULL on failure. - */ -static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf) -{ - struct ice_port_info *pi = ice_vf_get_port_info(vf); - struct ice_pf *pf = vf->pf; - struct ice_vsi *vsi; - - vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id, NULL, 0); - - if (!vsi) { - dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n"); - ice_vf_invalidate_vsi(vf); - return NULL; - } - - vf->lan_vsi_idx = vsi->idx; - vf->lan_vsi_num = vsi->vsi_num; - - return vsi; -} - -/** - * ice_vf_adq_vsi_setup - Set up a VF channel VSI - * @vf: VF to setup VSI for - * @tc: TC to setup the channel VSI for - */ -static struct ice_vsi *ice_vf_adq_vsi_setup(struct ice_vf *vf, u8 tc) -{ - struct ice_port_info *pi = ice_vf_get_port_info(vf); - struct ice_pf *pf = vf->pf; - struct ice_vsi *vsi; - - vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id, NULL, tc); - if (!vsi) { - dev_err(ice_pf_to_dev(pf), "Failed to create VF ADQ VSI for TC %d\n", - tc); - ice_vf_adq_invalidate_vsi(vf, tc); - return NULL; - } - - vf->ch[tc].vsi_idx = vsi->idx; - vf->ch[tc].vsi_num = vsi->vsi_num; - - return vsi; -} - -/** - * ice_vf_ctrl_vsi_setup - Set up a VF control VSI - * @vf: VF to setup control VSI for - * - * Returns pointer to the successfully allocated VSI struct on success, - * otherwise returns NULL on failure. - */ -struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf) -{ - struct ice_port_info *pi = ice_vf_get_port_info(vf); - struct ice_pf *pf = vf->pf; - struct ice_vsi *vsi; - - vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id, NULL, 0); - - if (!vsi) { - dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n"); - ice_vf_ctrl_invalidate_vsi(vf); - } - - return vsi; -} - -/** - * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space - * @pf: pointer to PF structure - * @vf: pointer to VF that the first MSIX vector index is being calculated for - * - * This returns the first MSIX vector index in PF space that is used by this VF. - * This index is used when accessing PF relative registers such as - * GLINT_VECT2FUNC and GLINT_DYN_CTL. - * This will always be the OICR index in the AVF driver so any functionality - * using vf->first_vector_idx for queue configuration will have to increment by - * 1 to avoid meddling with the OICR index. - */ -static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf) -{ - return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf; -} - -/** - * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration - * @vf: VF to re-apply the configuration for - * - * Called after a VF VSI has been re-added/rebuild during reset. The PF driver - * needs to re-apply the host configured Tx rate limiting configuration. - */ -static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf) -{ - struct device *dev = ice_pf_to_dev(vf->pf); - struct ice_vsi *vsi = ice_get_vf_vsi(vf); - int err; - - if (vf->min_tx_rate) { - err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000); - if (err) { - dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n", - vf->min_tx_rate, vf->vf_id, err); - return err; - } - } - - if (vf->max_tx_rate) { - err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000); - if (err) { - dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n", - vf->max_tx_rate, vf->vf_id, err); - return err; - } - } - - return 0; -} - -static u16 ice_vf_get_port_vlan_id(struct ice_vf *vf) -{ - return vf->port_vlan_info.vid; -} - -static u8 ice_vf_get_port_vlan_prio(struct ice_vf *vf) -{ - return vf->port_vlan_info.prio; -} - -bool ice_vf_is_port_vlan_ena(struct ice_vf *vf) -{ - return (ice_vf_get_port_vlan_id(vf) || ice_vf_get_port_vlan_prio(vf)); -} - -static u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf) -{ - return vf->port_vlan_info.tpid; -} - -/** - * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN - * @vf: VF to add MAC filters for - * @vsi: Pointer to VSI - * - * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver - * always re-adds either a VLAN 0 or port VLAN based filter after reset. - */ -static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi) -{ - struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); - struct device *dev = ice_pf_to_dev(vf->pf); - int err; - - if (ice_vf_is_port_vlan_ena(vf)) { - err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info); - if (err) { - dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n", - vf->vf_id, err); - return err; - } - - err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info); - } else { - err = ice_vsi_add_vlan_zero(vsi); - } - - if (err) { - dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n", - ice_vf_is_port_vlan_ena(vf) ? - ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err); - return err; - } - - err = vlan_ops->ena_rx_filtering(vsi); - if (err) { - dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n", - vf->vf_id, vsi->idx, err); - } - - return 0; -} - -/** - * ice_vf_rebuild_dcf_vlan_cfg - Config DCF outer VLAN for VF - * @vf: VF to add outer VLAN for - * @vsi: Pointer to VSI - */ -static int ice_vf_rebuild_dcf_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi) -{ - struct ice_dcf_vlan_info *dcf_vlan = &vf->dcf_vlan_info; - struct device *dev = ice_pf_to_dev(vf->pf); - int err; - - if (!ice_is_dcf_enabled(vf->pf) || !dcf_vlan->applying) { - memset(dcf_vlan, 0, sizeof(*dcf_vlan)); - return 0; - } - - dcf_vlan->applying = 0; - - if (dcf_vlan->outer_port_vlan.vid) { - err = ice_vf_vsi_dcf_set_outer_port_vlan(vsi, &dcf_vlan->outer_port_vlan); - if (err) { - dev_err(dev, "failed to configure outer port VLAN via DCF for VF %u, error %d\n", - vf->vf_id, err); - return err; - } - } - - if (dcf_vlan->outer_stripping_ena) { - err = ice_vf_vsi_dcf_ena_outer_vlan_stripping(vsi, dcf_vlan->outer_stripping_tpid); - if (err) { - dev_err(dev, "failed to enable outer VLAN stripping via DCF for VF %u, error %d\n", - vf->vf_id, err); - return err; - } - } - - return 0; -} - -static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable) -{ - struct ice_vsi_ctx *ctx; - enum ice_status status; - int err = 0; - - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (!ctx) - return -ENOMEM; - - ctx->info.sec_flags = vsi->info.sec_flags; - ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); - - if (enable) - ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; - else - ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; - - status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %s\n", - enable ? "ON" : "OFF", vsi->vsi_num, - ice_stat_str(status)); - err = ice_status_to_errno(status); - } else { - vsi->info.sec_flags = ctx->info.sec_flags; - } - - kfree(ctx); - - return err; -} - -/** - * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI - * @vsi: VSI to enable Tx spoof checking for - */ -static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi) -{ - struct ice_vsi_vlan_ops *vlan_ops; - int err; - - vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); - - err = vlan_ops->ena_tx_filtering(vsi); - if (err) - return err; - - err = ice_cfg_mac_antispoof(vsi, true); - if (err) - return err; - - return 0; -} - -/** - * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI - * @vsi: VSI to disable Tx spoof checking for - */ -static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi) -{ - struct ice_vsi_vlan_ops *vlan_ops; - int err; - - vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); - - err = vlan_ops->dis_tx_filtering(vsi); - if (err) - return err; - - err = ice_cfg_mac_antispoof(vsi, false); - if (err) - return err; - - return 0; -} - -/** - * ice_vf_set_spoofchk_cfg - apply Tx spoof checking setting - * @vf: VF set spoofchk for - * @vsi: VSI associated to the VF - */ -static int -ice_vf_set_spoofchk_cfg(struct ice_vf *vf, struct ice_vsi *vsi) -{ - int err; - - if (vf->spoofchk) - err = ice_vsi_ena_spoofchk(vsi); - else - err = ice_vsi_dis_spoofchk(vsi); - - return err; -} - -/** - * ice_vf_rebuild_adq_port_vlan_cfg - set the port VLAN for VF ADQ VSIs - * @vf: VF to add MAC filters for - * - * Called after a VF ADQ VSI has been re-added/rebuilt during reset. - */ -static int ice_vf_rebuild_adq_port_vlan_cfg(struct ice_vf *vf) -{ - struct device *dev = ice_pf_to_dev(vf->pf); - int err, tc; - - for (tc = ICE_VF_CHNL_START_TC; tc < vf->num_tc; tc++) { - struct ice_vsi *vsi; - - if (!ice_vf_adq_vsi_valid(vf, tc)) - continue; - - vsi = ice_get_vf_adq_vsi(vf, tc); - err = ice_vf_rebuild_host_vlan_cfg(vf, vsi); - if (err) { - dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, ADQ VSI(num %u), error %d\n", - vf->vf_id, vsi->vsi_num, err); - return err; - } - } - return 0; -} - -/** - * ice_vf_rebuild_adq_spoofchk_cfg - set the spoofchk config for VF ADQ VSIs - * @vf: VF to set spoofchk for - * - * Called after a VF ADQ VSI has been re-added/rebuilt during reset. - */ -static int ice_vf_rebuild_adq_spoofchk_cfg(struct ice_vf *vf) -{ - struct device *dev = ice_pf_to_dev(vf->pf); - int err, tc; - - for (tc = ICE_VF_CHNL_START_TC; tc < vf->num_tc; tc++) { - struct ice_vsi *vsi; - - if (!ice_vf_adq_vsi_valid(vf, tc)) - continue; - - vsi = ice_get_vf_adq_vsi(vf, tc); - err = ice_vf_set_spoofchk_cfg(vf, vsi); - if (err) { - dev_err(dev, "failed to configure spoofchk via VSI parameters for VF %u, ADQ VSI(num %u), error %d\n", - vf->vf_id, vsi->vsi_num, err); - return err; - } - } - return 0; -} - -/** - * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA - * @vf: VF to add MAC filters for - * - * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver - * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset. - */ -static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) -{ - struct device *dev = ice_pf_to_dev(vf->pf); - struct ice_vsi *vsi = ice_get_vf_vsi(vf); - enum ice_status status; - u8 broadcast[ETH_ALEN]; - - if (ice_is_eswitch_mode_switchdev(vf->pf)) - return 0; - - eth_broadcast_addr(broadcast); - status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); - if (status) { - dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n", - vf->vf_id, ice_stat_str(status)); - return ice_status_to_errno(status); - } - - vf->num_mac++; - - if (is_valid_ether_addr(vf->hw_lan_addr.addr)) { - status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr, - ICE_FWD_TO_VSI); - if (status) { - dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n", - &vf->hw_lan_addr.addr[0], vf->vf_id, - ice_stat_str(status)); - return ice_status_to_errno(status); - } - vf->num_mac++; - - ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr); - } - - return 0; -} - -/** - * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value - * @vf: VF to configure trust setting for - */ -static void ice_vf_set_host_trust_cfg(struct ice_vf *vf) -{ - if (vf->trusted) - set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); - else - clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); -} - -/** - * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware - * @vf: VF to enable MSIX mappings for - * - * Some of the registers need to be indexed/configured using hardware global - * device values and other registers need 0-based values, which represent PF - * based values. - */ -static void ice_ena_vf_msix_mappings(struct ice_vf *vf) -{ - int device_based_first_msix, device_based_last_msix; - int pf_based_first_msix, pf_based_last_msix, v; - struct ice_pf *pf = vf->pf; - int device_based_vf_id; - struct ice_hw *hw; - u32 reg; - - hw = &pf->hw; - - pf_based_first_msix = vf->first_vector_idx; - pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1; - - device_based_first_msix = pf_based_first_msix + - pf->hw.func_caps.common_cap.msix_vector_first_id; - device_based_last_msix = - (device_based_first_msix + pf->num_msix_per_vf) - 1; - device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id; - - reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) & - VPINT_ALLOC_FIRST_M) | - ((device_based_last_msix << VPINT_ALLOC_LAST_S) & - VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M); - wr32(hw, VPINT_ALLOC(vf->vf_id), reg); - - reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S) - & VPINT_ALLOC_PCI_FIRST_M) | - ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) & - VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M); - wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg); - - /* map the interrupts to its functions */ - for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) { - reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) & - GLINT_VECT2FUNC_VF_NUM_M) | - ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & - GLINT_VECT2FUNC_PF_NUM_M)); - wr32(hw, GLINT_VECT2FUNC(v), reg); - } - - /* Map mailbox interrupt to VF VSI VF MSI-X vector 0 */ - wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M); -} - -/** - * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF - * @vf: VF to enable the mappings for - * @max_txq: max Tx queues allowed on the VF's VSI - * @max_rxq: max Rx queues allowed on the VF's VSI - */ -static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq) -{ - struct device *dev = ice_pf_to_dev(vf->pf); - struct ice_vsi *vsi = ice_get_vf_vsi(vf); - struct ice_hw *hw = &vf->pf->hw; - u32 reg; - - /* set regardless of mapping mode */ - wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M); - - /* VF Tx queues allocation */ - if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) { - /* set the VF PF Tx queue range - * VFNUMQ value should be set to (number of queues - 1). A value - * of 0 means 1 queue and a value of 255 means 256 queues - */ - reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) & - VPLAN_TX_QBASE_VFFIRSTQ_M) | - (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) & - VPLAN_TX_QBASE_VFNUMQ_M)); - wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg); - } else { - dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n"); - } - - /* set regardless of mapping mode */ - wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M); - - /* VF Rx queues allocation */ - if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) { - /* set the VF PF Rx queue range - * VFNUMQ value should be set to (number of queues - 1). A value - * of 0 means 1 queue and a value of 255 means 256 queues - */ - reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) & - VPLAN_RX_QBASE_VFFIRSTQ_M) | - (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) & - VPLAN_RX_QBASE_VFNUMQ_M)); - wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg); - } else { - dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n"); - } -} - -/** - * ice_ena_vf_mappings - enable VF MSIX and queue mapping - * @vf: pointer to the VF structure - */ -static void ice_ena_vf_mappings(struct ice_vf *vf) -{ - struct ice_vsi *vsi = ice_get_vf_vsi(vf); - u16 max_txq, max_rxq; - - ice_ena_vf_msix_mappings(vf); - - if (ice_is_vf_adq_ena(vf)) { - u16 offset, num_qps; - - offset = vf->ch[vf->num_tc - 1].offset; - num_qps = vf->ch[vf->num_tc - 1].num_qps; - max_txq = offset + num_qps; - max_rxq = offset + num_qps; - } else { - max_txq = vsi->alloc_txq; - max_rxq = vsi->alloc_rxq; - } - - ice_ena_vf_q_mappings(vf, max_txq, max_rxq); -} - -/** - * ice_determine_res - * @pf: pointer to the PF structure - * @avail_res: available resources in the PF structure - * @max_res: maximum resources that can be given per VF - * @min_res: minimum resources that can be given per VF - * - * Returns non-zero value if resources (queues/vectors) are available or - * returns zero if PF cannot accommodate for all num_alloc_vfs. - */ -static int -ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res) -{ - bool checked_min_res = false; - int res; - - /* start by checking if PF can assign max number of resources for - * all num_alloc_vfs. - * if yes, return number per VF - * If no, divide by 2 and roundup, check again - * repeat the loop till we reach a point where even minimum resources - * are not available, in that case return 0 - */ - res = max_res; - while ((res >= min_res) && !checked_min_res) { - int num_all_res; - - num_all_res = pf->num_alloc_vfs * res; - if (num_all_res <= avail_res) - return res; - - if (res == min_res) - checked_min_res = true; - - res = DIV_ROUND_UP(res, 2); - } - return 0; -} - -/** - * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space - * @vf: VF to calculate the register index for - * @q_vector: a q_vector associated to the VF - * @tc: Traffic class number for VF ADQ - */ -int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector, - u8 __maybe_unused tc) -{ - struct ice_pf *pf; - u32 reg_idx; - - if (!vf || !q_vector) - return -EINVAL; - - pf = vf->pf; - /* always add one to account for the OICR being the first MSIX */ - reg_idx = pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id + - q_vector->v_idx + 1; - - if (tc && ice_is_vf_adq_ena(vf)) - return reg_idx + vf->ch[tc].offset; - else - return reg_idx; -} - -/** - * ice_get_max_valid_res_idx - Get the max valid resource index - * @res: pointer to the resource to find the max valid index for - * - * Start from the end of the ice_res_tracker and return right when we find the - * first res->list entry with the ICE_RES_VALID_BIT set. This function is only - * valid for SR-IOV because it is the only consumer that manipulates the - * res->end and this is always called when res->end is set to res->num_entries. - */ -static int ice_get_max_valid_res_idx(struct ice_res_tracker *res) -{ - int i; - - if (!res) - return -EINVAL; - - for (i = res->num_entries - 1; i >= 0; i--) - if (res->list[i] & ICE_RES_VALID_BIT) - return i; - - return 0; -} - -/** - * ice_sriov_set_msix_res - Set any used MSIX resources - * @pf: pointer to PF structure - * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs - * - * This function allows SR-IOV resources to be taken from the end of the PF's - * allowed HW MSIX vectors so that the irq_tracker will not be affected. We - * just set the pf->sriov_base_vector and return success. - * - * If there are not enough resources available, return an error. This should - * always be caught by ice_set_per_vf_res(). - * - * Return 0 on success, and -EINVAL when there are not enough MSIX vectors - * in the PF's space available for SR-IOV. - */ -static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) -{ - u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; - int vectors_used = pf->irq_tracker->num_entries; - int sriov_base_vector; - - sriov_base_vector = total_vectors - num_msix_needed; - - /* make sure we only grab irq_tracker entries from the list end and - * that we have enough available MSIX vectors - */ - if (sriov_base_vector < vectors_used) - return -EINVAL; - - pf->sriov_base_vector = sriov_base_vector; - - return 0; -} - -/** - * ice_set_per_vf_res - check if vectors and queues are available - * @pf: pointer to the PF structure - * - * First, determine HW interrupts from common pool. If we allocate fewer VFs, we - * get more vectors and can enable more queues per VF. Note that this does not - * grab any vectors from the SW pool already allocated. Also note, that all - * vector counts include one for each VF's miscellaneous interrupt vector - * (i.e. OICR). - * - * Minimum VFs - 2 vectors, 1 queue pair - * Small VFs - 5 vectors, 4 queue pairs - * Medium VFs - 17 vectors, 16 queue pairs - * - * While more vectors can be assigned to a VF, the RSS LUT - * is only 4 bits wide, so we can only do 16 queues of RSS - * per VF. - * - * ADQ sizes: - * Small ADQ VFs - 5 vectors, 4 TCs, 16 queue pairs (4 queue pairs/int) - * Medium ADQ VFs - 17 vectors, 4 TCs, 16 queue pairs (1 queue pairs/int) - * - * Second, determine number of queue pairs per VF by starting with a pre-defined - * maximum each VF supports. If this is not possible, then we adjust based on - * queue pairs available on the device. - * - * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used - * by each VF during VF initialization and reset. - */ -static int ice_set_per_vf_res(struct ice_pf *pf) -{ - int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker); - int msix_avail_per_vf, msix_avail_for_sriov; - struct device *dev = ice_pf_to_dev(pf); - u16 num_msix_per_vf, num_txq, num_rxq; - - if (!pf->num_alloc_vfs || max_valid_res_idx < 0) - return -EINVAL; - - /* determine MSI-X resources per VF */ - msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors - - pf->irq_tracker->num_entries; - msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs; - if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MAX) { - num_msix_per_vf = ICE_NUM_VF_MSIX_MAX; - } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_LARGE) { - num_msix_per_vf = ICE_NUM_VF_MSIX_LARGE; - } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) { - num_msix_per_vf = ICE_NUM_VF_MSIX_MED; - } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) { - num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL; - } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) { - num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN; - } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) { - num_msix_per_vf = ICE_MIN_INTR_PER_VF; - } else { - dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n", - msix_avail_for_sriov, ICE_MIN_INTR_PER_VF, - pf->num_alloc_vfs); - return -EIO; - } - - /* determine queue resources per VF */ - num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf), - min_t(u16, - num_msix_per_vf - ICE_NONQ_VECS_VF, - ICE_MAX_DFLT_QS_PER_VF), - ICE_MIN_QS_PER_VF); - - num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf), - min_t(u16, - num_msix_per_vf - ICE_NONQ_VECS_VF, - ICE_MAX_DFLT_QS_PER_VF), - ICE_MIN_QS_PER_VF); - - if (!num_txq || !num_rxq) { - dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n", - ICE_MIN_QS_PER_VF, pf->num_alloc_vfs); - return -EIO; - } - - if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) { - dev_err(dev, "Unable to set MSI-X resources for %d VFs\n", - pf->num_alloc_vfs); - return -EINVAL; - } - - /* only allow equal Tx/Rx queue count (i.e. queue pairs) */ - pf->num_qps_per_vf = min_t(int, num_txq, num_rxq); - pf->num_msix_per_vf = num_msix_per_vf; - dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n", - pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf); - - return 0; -} - -/** - * ice_clear_vf_reset_trigger - enable VF to access hardware - * @vf: VF to enabled hardware access for - */ -static void ice_clear_vf_reset_trigger(struct ice_vf *vf) -{ - struct ice_hw *hw = &vf->pf->hw; - u32 reg; - - reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); - reg &= ~VPGEN_VFRTRIG_VFSWR_M; - wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); - ice_flush(hw); -} - -static int ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) -{ - struct ice_hw *hw = &vsi->back->hw; - u8 lport = vsi->port_info->lport; - enum ice_status status; - - if (ice_vf_is_port_vlan_ena(vf)) - status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, - ice_vf_get_port_vlan_id(vf), - lport); - else if (ice_vsi_has_non_zero_vlans(vsi)) - status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m); - else - status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0, lport); - - if (status && status != ICE_ERR_ALREADY_EXISTS) { - dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %s\n", - vf->vf_id, ice_stat_str(status)); - return ice_status_to_errno(status); - } - - return 0; -} - -static int ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) -{ - struct ice_hw *hw = &vsi->back->hw; - u8 lport = vsi->port_info->lport; - enum ice_status status; - - if (ice_vf_is_port_vlan_ena(vf)) - status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, - ice_vf_get_port_vlan_id(vf), - lport); - else if (ice_vsi_has_non_zero_vlans(vsi)) - status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m); - else - status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0, lport); - - if (status && status != ICE_ERR_DOES_NOT_EXIST) { - dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %s\n", - vf->vf_id, ice_stat_str(status)); - return ice_status_to_errno(status); - } - - return 0; -} - -static void ice_vf_clear_counters(struct ice_vf *vf) -{ - struct ice_vsi *vsi = ice_get_vf_vsi(vf); - - vf->num_mac = 0; - vsi->num_vlan = 0; - memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); - memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); -} - -/** - * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild - * @vf: VF to perfrom pre VSI rebuild tasks - * - * These tasks are items that don't need to be amortized since they are most - * likely called in a for loop with all VF(s) in the reset_all_vfs() case. - */ -static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf) -{ - /* Remove switch rules associated with the reset VF */ - ice_rm_dcf_sw_vsi_rule(vf->pf, vf->lan_vsi_num); - - if (ice_is_vf_dcf(vf)) { - if (vf->pf->hw.dcf_caps & DCF_ACL_CAP) - ice_acl_destroy_tbl(&vf->pf->hw); - ice_clear_dcf_udp_tunnel_cfg(vf->pf); - } - - ice_vf_clear_counters(vf); - ice_clear_vf_reset_trigger(vf); -} - -/** - * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config - * @vsi: Pointer to VSI - * - * This function moves VSI into corresponding scheduler aggregator node - * based on cached value of "aggregator node info" per VSI - */ -static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - enum ice_status status; - struct device *dev; - - if (!vsi->agg_node) - return; - - dev = ice_pf_to_dev(pf); - if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { - dev_dbg(dev, - "agg_id %u already has reached max_num_vsis %u\n", - vsi->agg_node->agg_id, vsi->agg_node->num_vsis); - return; - } - - status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id, - vsi->idx, (u8)vsi->tc_cfg.ena_tc); - if (status) - dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node", - vsi->idx, vsi->agg_node->agg_id); - else - vsi->agg_node->num_vsis++; -} - -/** - * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset - * @vf: VF to rebuild host configuration on - */ -static void ice_vf_rebuild_host_cfg(struct ice_vf *vf) -{ - struct device *dev = ice_pf_to_dev(vf->pf); - struct ice_vsi *vsi = ice_get_vf_vsi(vf); - - ice_vf_set_host_trust_cfg(vf); - - if (ice_vf_rebuild_host_mac_cfg(vf)) - dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n", - vf->vf_id); - - if (ice_vf_rebuild_dcf_vlan_cfg(vf, vsi)) - dev_err(dev, "failed to rebuild DCF VLAN configuration for VF %u\n", - vf->vf_id); - - if (ice_vf_rebuild_host_vlan_cfg(vf, vsi)) - dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n", - vf->vf_id); - - if (ice_vf_rebuild_host_tx_rate_cfg(vf)) - dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n", - vf->vf_id); - - if (ice_vf_set_spoofchk_cfg(vf, vsi)) - dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n", - vf->vf_id); - - /* rebuild aggregator node config for main VF VSI */ - ice_vf_rebuild_aggregator_node_cfg(vsi); -} - -/** - * ice_vf_rebuild_adq_aggregator_node - move ADQ VSIs into aggregator node - * @vf: VF to rebuild ADQ VSI(s) Tx rate configuration on - * - * If VF ADQ is enabled, replay scheduler aggregator node config - */ -static void ice_vf_rebuild_adq_aggregator_node(struct ice_vf *vf) -{ - int tc; - - if (!ice_is_vf_adq_ena(vf)) - return; - - for (tc = ICE_VF_CHNL_START_TC; tc < vf->num_tc; tc++) { - struct ice_vsi *vsi; - - if (!ice_vf_adq_vsi_valid(vf, tc)) - continue; - vsi = ice_get_vf_adq_vsi(vf, tc); - ice_vf_rebuild_aggregator_node_cfg(vsi); - } -} - -/** - * ice_vf_rebuild_adq_tx_rate_cfg - rebuild ADQ VSI(s) Tx rate configuration - * @vf: VF to rebuild ADQ VSI(s) Tx rate configuration on - */ -static void ice_vf_rebuild_adq_tx_rate_cfg(struct ice_vf *vf) -{ - struct device *dev = ice_pf_to_dev(vf->pf); - struct ice_vsi *vsi; - u64 max_tx_rate; - u8 tc; - - if (!ice_is_vf_adq_ena(vf)) - return; - - /* Host may have set Tx rate for VF, but use the TC0's specified - * max Tx rate for main VF VSI. - * Iterate thru' all VSI (hence for loop starts with zero) shared by - * given VF and set the BW limit if specified as part of - * VF ADQ TC config - */ - for (tc = 0; tc < vf->num_tc; tc++) { - if (!ice_vf_adq_vsi_valid(vf, tc)) - continue; - - max_tx_rate = vf->ch[tc].max_tx_rate; - if (!max_tx_rate) - continue; - - if (!tc && vf->max_tx_rate) - dev_dbg(dev, "Host managed VF rate limit %u for VF %d are being changed to %llu\n", - vf->max_tx_rate, vf->vf_id, max_tx_rate); - - vsi = ice_get_vf_adq_vsi(vf, tc); - if (ice_set_max_bw_limit(vsi, max_tx_rate * 1000)) - dev_err(dev, "Unable to set Tx rate %llu in Mbps for VF %u TC %d\n", - max_tx_rate, vf->vf_id, tc); - } -} - -/** - * ice_vf_rebuild_adq_host_cfg - host admin config is persistent across reset - * @vf: VF to rebuild ADQ host configuration on - */ -static void ice_vf_rebuild_adq_host_cfg(struct ice_vf *vf) -{ - struct device *dev = ice_pf_to_dev(vf->pf); - - ice_vf_rebuild_adq_aggregator_node(vf); - ice_vf_rebuild_adq_tx_rate_cfg(vf); - if (ice_vf_rebuild_adq_port_vlan_cfg(vf)) - dev_err(dev, "failed to rebuild port VLAN configuration for ADQ enabled VF %u\n", - vf->vf_id); - if (ice_vf_rebuild_adq_spoofchk_cfg(vf)) - dev_err(dev, "failed to rebuild spoofchk configuration for ADQ enabled VF %u\n", - vf->vf_id); -} - -/** - * ice_vf_rebuild_adq_vsi_with_release - release and setup each ADQ VSI - * @vf: VF to re-apply ADQ configuration for - * - * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF - * configuration change, etc.). - * - * This cannot be called for the reset all VFs case as ice_vf_adq_vsi_release() - * will fail because there are no VF VSI(s) in firmware at this point. - */ -static int ice_vf_rebuild_adq_vsi_with_release(struct ice_vf *vf) -{ - u8 tc; - - if (!ice_is_vf_adq_ena(vf)) - return 0; - - for (tc = ICE_VF_CHNL_START_TC; tc < vf->num_tc; tc++) { - if (ice_vf_adq_vsi_valid(vf, tc)) { - ice_vf_adq_vsi_stop_rings(vf, tc); - ice_vf_adq_vsi_disable_txqs(vf, tc); - ice_vf_adq_vsi_release(vf, tc); - } - - if (!ice_vf_adq_vsi_setup(vf, tc)) { - dev_err(ice_pf_to_dev(vf->pf), "failed to setup ADQ VSI for VF %u, TC %d, disabling VF ADQ VSI\n", - vf->vf_id, tc); - goto adq_cfg_failed; - } - } - - /* must to store away TC0's info because it is used later */ - vf->ch[0].vsi_idx = vf->lan_vsi_idx; - vf->ch[0].vsi_num = vf->lan_vsi_num; - - return 0; - -adq_cfg_failed: - /* perform VSI release for ADQ VSI if some of them were - * created successfully. - */ - for (tc = ICE_VF_CHNL_START_TC; tc < vf->num_tc; tc++) { - if (ice_vf_adq_vsi_valid(vf, tc)) { - ice_vf_adq_vsi_disable_txqs(vf, tc); - ice_vf_adq_vsi_release(vf, tc); - } - ice_vf_adq_cfg_cleanup(vf, tc); - } - vf->adq_enabled = false; - vf->num_tc = 0; - /* Upon failure also clean up tc=0 specific info from - * software data structs, to avoid having stale info - */ - ice_vf_adq_invalidate_vsi(vf, 0); - ice_vf_adq_cfg_cleanup(vf, 0); - return -ENOMEM; -} - -/** - * ice_vf_rebuild_adq_vsi - rebuild ADQ VSI(s) on the VF - * @vf: VF to rebuild ADQ VSI(s) on - */ -static int ice_vf_rebuild_adq_vsi(struct ice_vf *vf) -{ - struct ice_pf *pf = vf->pf; - int tc; - - /* no ADQ configured, nothing to do */ - if (!ice_is_vf_adq_ena(vf)) - return 0; - - for (tc = ICE_VF_CHNL_START_TC; tc < vf->num_tc; tc++) { - struct ice_vsi *vsi; - int ret; - - if (!ice_vf_adq_vsi_valid(vf, tc)) - continue; - - vsi = ice_get_vf_adq_vsi(vf, tc); - ret = ice_vsi_rebuild(vsi, true); - if (ret) { - dev_err(ice_pf_to_dev(pf), "failed to rebuild ADQ VSI for VF %u, disabling VF ADQ VSI\n", - vf->vf_id); - vf->adq_enabled = false; - ice_vf_adq_invalidate_vsi(vf, tc); - return ret; - } - - vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); - vf->ch[tc].vsi_num = vsi->vsi_num; - vf->ch[tc].vsi_idx = vsi->idx; - } - - /* must to store away TC0's info because it is use later */ - vf->ch[0].vsi_idx = vf->lan_vsi_idx; - vf->ch[0].vsi_num = vf->lan_vsi_num; - - return 0; -} - -/** - * ice_vf_rebuild_vsi_with_release - release and setup the VF's VSI - * @vf: VF to release and setup the VSI for - * - * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF - * configuration change, etc.). - */ -static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf) -{ - ice_vf_vsi_release(vf); - if (!ice_vf_vsi_setup(vf)) - return -ENOMEM; - - ice_vf_rebuild_adq_vsi_with_release(vf); - - return 0; -} - -/** - * ice_vf_rebuild_vsi - rebuild the VF's VSI - * @vf: VF to rebuild the VSI for - * - * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the - * host, PFR, CORER, etc.). - */ -static int ice_vf_rebuild_vsi(struct ice_vf *vf) -{ - struct ice_vsi *vsi = ice_get_vf_vsi(vf); - struct ice_pf *pf = vf->pf; - - if (ice_vsi_rebuild(vsi, true)) { - dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n", - vf->vf_id); - return -EIO; - } - /* vsi->idx will remain the same in this case so don't update - * vf->lan_vsi_idx - */ - vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); - vf->lan_vsi_num = vsi->vsi_num; - - if (ice_vf_rebuild_adq_vsi(vf)) { - dev_err(ice_pf_to_dev(pf), "failed to rebuild ADQ configuration for VF %d\n", - vf->vf_id); - return -EIO; - } - - return 0; -} - -/** - * ice_vf_set_initialized - VF is ready for VIRTCHNL communication - * @vf: VF to set in initialized state - * - * After this function the VF will be ready to receive/handle the - * VIRTCHNL_OP_GET_VF_RESOURCES message - */ -static void ice_vf_set_initialized(struct ice_vf *vf) -{ - ice_set_vf_state_qs_dis(vf); - clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); - clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); - clear_bit(ICE_VF_STATE_DIS, vf->vf_states); - set_bit(ICE_VF_STATE_INIT, vf->vf_states); - memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps)); -} - -/** - * ice_vf_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt - * @vf: VF to perform tasks on - */ -static void ice_vf_post_vsi_rebuild(struct ice_vf *vf) -{ - ice_vf_rebuild_host_cfg(vf); - ice_vf_rebuild_adq_host_cfg(vf); - ice_vf_set_initialized(vf); - ice_ena_vf_mappings(vf); - wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); -} - -/** - * ice_reset_all_vfs - reset all allocated VFs in one go - * @pf: pointer to the PF structure - * @is_vflr: true if VFLR was issued, false if not - * - * First, tell the hardware to reset each VF, then do all the waiting in one - * chunk, and finally finish restoring each VF after the wait. This is useful - * during PF routines which need to reset all VFs, as otherwise it must perform - * these resets in a serialized fashion. - * - * Returns true if any VFs were reset, and false otherwise. - */ -bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) -{ - struct device *dev = ice_pf_to_dev(pf); - struct ice_hw *hw = &pf->hw; - struct ice_vf *vf; - int v, i; - - /* If we don't have any VFs, then there is nothing to reset */ - if (!pf->num_alloc_vfs) - return false; - - /* clear all malicious info if the VFs are getting reset */ - ice_for_each_vf(pf, i) - if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, i)) - dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i); - - /* If VFs have been disabled, there is no need to reset */ - if (test_and_set_bit(ICE_VF_DIS, pf->state)) - return false; - - ice_clear_dcf_acl_cfg(pf); - ice_clear_dcf_udp_tunnel_cfg(pf); - pf->hw.dcf_caps &= ~(DCF_ACL_CAP | DCF_UDP_TUNNEL_CAP); - - /* Begin reset on all VFs at once */ - ice_for_each_vf(pf, v) - ice_trigger_vf_reset(&pf->vf[v], is_vflr, true); - - /* HW requires some time to make sure it can flush the FIFO for a VF - * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in - * sequence to make sure that it has completed. We'll keep track of - * the VFs using a simple iterator that increments once that VF has - * finished resetting. - */ - for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { - /* Check each VF in sequence */ - while (v < pf->num_alloc_vfs) { - u32 reg; - - vf = &pf->vf[v]; - reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id)); - if (!(reg & VPGEN_VFRSTAT_VFRD_M)) { - /* only delay if the check failed */ - usleep_range(10, 20); - break; - } - - /* If the current VF has finished resetting, move on - * to the next VF in sequence. - */ - v++; - } - } - - - /* Display a warning if at least one VF didn't manage to reset in - * time, but continue on with the operation. - */ - if (v < pf->num_alloc_vfs) - dev_warn(dev, "VF reset check timeout\n"); - - - /* free VF resources to begin resetting the VSI state */ - ice_for_each_vf(pf, v) { - vf = &pf->vf[v]; - - vf->driver_caps = 0; - ice_vc_set_default_allowlist(vf); - -#ifdef HAVE_TC_SETUP_CLSFLOWER - /* always release VF ADQ filters since those filters will be - * replayed by VF driver. This is needed to avoid stale - * filters in software internal data structues - */ - ice_del_all_adv_switch_fltr(vf); -#endif - - ice_vf_fdir_exit(vf); - ice_vf_fdir_init(vf); - /* clean VF control VSI when resetting VFs since it should be - * setup only when iAVF creates its first FDIR rule. - */ - if (vf->ctrl_vsi_idx != ICE_NO_VSI) - ice_vf_ctrl_invalidate_vsi(vf); - - ice_vf_pre_vsi_rebuild(vf); - ice_vf_rebuild_vsi(vf); - ice_vf_post_vsi_rebuild(vf); - } - - if (ice_is_eswitch_mode_switchdev(pf)) - if (ice_eswitch_rebuild(pf)) - dev_warn(dev, "eswitch rebuild failed\n"); - - ice_flush(hw); - clear_bit(ICE_VF_DIS, pf->state); - - return true; -} - -/** - * ice_is_vf_disabled - * @vf: pointer to the VF info - * - * Returns true if the PF or VF is disabled, false otherwise. - */ -static bool ice_is_vf_disabled(struct ice_vf *vf) -{ - struct ice_pf *pf = vf->pf; - - /* If the PF has been disabled, there is no need resetting VF until - * PF is active again. Similarly, if the VF has been disabled, this - * means something else is resetting the VF, so we shouldn't continue. - * Otherwise, set disable VF state bit for actual reset, and continue. - */ - return (test_bit(ICE_VF_DIS, pf->state) || - test_bit(ICE_VF_STATE_DIS, vf->vf_states)); -} - -/** - * ice_vf_get_glint_ceqctl_idx - get the GLINT_CEQCTL index relative to the PF - * @vf: VF used to get the index - * @ceq_idx: 0-based index from the VF - * - * Use the VF relative (0-based) CEQ index plus the first PF MSI-X index - * assigned to this VF (relative to the PF's MSIX space) to determine the index - * of the GLINT_CEQCTL register - */ -static u16 ice_vf_get_glint_ceqctl_idx(struct ice_vf *vf, u16 ceq_idx) -{ - return vf->first_vector_idx + ceq_idx; -} - -/** - * ice_vf_clear_ceq_irq_map - clear the CEQ IRQ mapping - * @vf: VF used to clear the mapping - * @ceq_idx: VF relative (0-based) CEQ index - */ -static void ice_vf_clear_ceq_irq_map(struct ice_vf *vf, u16 ceq_idx) -{ - u16 glint_ceqctl_idx = ice_vf_get_glint_ceqctl_idx(vf, ceq_idx); - - wr32(&vf->pf->hw, GLINT_CEQCTL(glint_ceqctl_idx), 0); -} - -/** - * ice_vf_clear_aeq_irq_map - clear the AEQ IRQ mapping - * @vf: VF used to clear the mapping - */ -static void ice_vf_clear_aeq_irq_map(struct ice_vf *vf) -{ - wr32(&vf->pf->hw, VPINT_AEQCTL(vf->vf_id), 0); -} - -/** - * ice_vf_clear_rdma_irq_map - clear the RDMA IRQ mapping - * @vf: VF used to clear the mapping - * - * Clear any RDMA IRQ mapping that a VF might have requested. Since the number - * of CEQ indices are never greater than the num_msix_per_vf just clear all CEQ - * indices that are possibly associated to this VF. Also clear the AEQ for this - * VF. Doing it this way prevents the need to cache the configuration received - * on VIRTCHNL_OP_CONFIG_RMDA_IRQ_MAP since VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP is - * designed to clear the entire RDMA IRQ mapping configuration. - */ -static void ice_vf_clear_rdma_irq_map(struct ice_vf *vf) -{ - u16 i; - - for (i = 0; i < vf->pf->num_msix_per_vf; i++) - ice_vf_clear_ceq_irq_map(vf, i); - - ice_vf_clear_aeq_irq_map(vf); -} - -/** - * ice_reset_vf - Reset a particular VF - * @vf: pointer to the VF structure - * @is_vflr: true if VFLR was issued, false if not - * - * Returns true if the VF is currently in reset, resets successfully, or resets - * are disabled and false otherwise. - */ -bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) -{ - struct ice_pf *pf = vf->pf; - struct ice_vsi *vsi; - struct device *dev; - struct ice_hw *hw; - bool rsd = false; - u8 promisc_m; - u32 reg; - int i; - - dev = ice_pf_to_dev(pf); - - if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { - dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n", - vf->vf_id); - return true; - } - - if (ice_is_vf_disabled(vf)) { - dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n", - vf->vf_id); - return true; - } - - /* Set VF disable bit state here, before triggering reset */ - set_bit(ICE_VF_STATE_DIS, vf->vf_states); - ice_trigger_vf_reset(vf, is_vflr, false); - - if (ice_dcf_get_state(pf) == ICE_DCF_STATE_ON) - ice_dcf_set_state(pf, ICE_DCF_STATE_BUSY); - - vsi = ice_get_vf_vsi(vf); - - if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) - ice_dis_vf_qs(vf); - - /* Call Disable LAN Tx queue AQ whether or not queues are - * enabled. This is needed for successful completion of VFR. - */ - ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, - NULL, ICE_VF_RESET, vf->vf_id, NULL); - /* Likewise Disable LAN Tx queues for VF ADQ VSIs */ - if (ice_is_vf_adq_ena(vf)) { - int tc; - - for (tc = ICE_VF_CHNL_START_TC; tc < vf->num_tc; tc++) { - if (!ice_vf_adq_vsi_valid(vf, tc)) - continue; - ice_dis_vsi_txq(vsi->port_info, vf->ch[tc].vsi_idx, 0, - 0, NULL, NULL, NULL, ICE_VF_RESET, - vf->vf_id, NULL); - } - } - - if (vf->driver_caps & VIRTCHNL_VF_CAP_RDMA) - ice_vf_clear_rdma_irq_map(vf); - - hw = &pf->hw; - /* poll VPGEN_VFRSTAT reg to make sure - * that reset is complete - */ - for (i = 0; i < 10; i++) { - /* VF reset requires driver to first reset the VF and then - * poll the status register to make sure that the reset - * completed successfully. - */ - reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id)); - if (reg & VPGEN_VFRSTAT_VFRD_M) { - rsd = true; - break; - } - - /* only sleep if the reset is not done */ - usleep_range(10, 20); - } - - vf->driver_caps = 0; - ice_vc_set_default_allowlist(vf); - - /* Display a warning if VF didn't manage to reset in time, but need to - * continue on with the operation. - */ - if (!rsd) - dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id); - - /* disable promiscuous modes in case they were enabled - * ignore any error if disabling process failed - */ - if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) || - test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) { - if (ice_vf_is_port_vlan_ena(vf) || vsi->num_vlan) - promisc_m = ICE_UCAST_VLAN_PROMISC_BITS; - else - promisc_m = ICE_UCAST_PROMISC_BITS; - - if (ice_vf_clear_vsi_promisc(vf, vsi, promisc_m)) - dev_err(dev, "disabling promiscuous mode failed\n"); - } - -#ifdef HAVE_TC_SETUP_CLSFLOWER - /* always release VF ADQ filters since those filters will be - * replayed by VF driver. This is needed to avoid stale filters in - * software internal data structures - */ - ice_del_all_adv_switch_fltr(vf); -#endif - /* VF driver gets reloaded on VFLR, so clear ADQ configuration */ - if (is_vflr) - ice_vf_adq_release(vf); - - - ice_vf_fdir_exit(vf); - ice_vf_fdir_init(vf); - /* clean VF control VSI when resetting VF since it should be setup - * only when iAVF creates its first FDIR rule. - */ - if (vf->ctrl_vsi_idx != ICE_NO_VSI) - ice_vf_ctrl_vsi_release(vf); - - ice_vf_pre_vsi_rebuild(vf); - - if (ice_vf_rebuild_vsi_with_release(vf)) { - dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id); - return false; - } - - ice_vf_post_vsi_rebuild(vf); - vsi = ice_get_vf_vsi(vf); - ice_eswitch_update_repr(vsi); - - if (ice_dcf_get_state(pf) == ICE_DCF_STATE_BUSY) { - ice_dcf_set_state(pf, ICE_DCF_STATE_ON); - } - - /* if the VF has been reset allow it to come up again */ - if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, vf->vf_id)) - dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i); - - return true; -} - /** * ice_vc_notify_link_state - Inform all VFs on a PF of link status * @pf: pointer to the PF structure */ void ice_vc_notify_link_state(struct ice_pf *pf) { - int i; + struct ice_vf *vf; + unsigned int bkt; - ice_for_each_vf(pf, i) - ice_vc_notify_vf_link_state(&pf->vf[i]); + mutex_lock(&pf->vfs.table_lock); + ice_for_each_vf(pf, bkt, vf) + ice_vc_notify_vf_link_state(vf); + mutex_unlock(&pf->vfs.table_lock); } /** @@ -2530,7 +500,7 @@ void ice_vc_notify_reset(struct ice_pf *pf) { struct virtchnl_pf_event pfe; - if (!pf->num_alloc_vfs) + if (!ice_has_vfs(pf)) return; pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; @@ -2539,508 +509,6 @@ void ice_vc_notify_reset(struct ice_pf *pf) (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); } -/** - * ice_vc_notify_dcf_vf_info - Send DCF VF information to the VF - * @old_dcf_vf: pointer to the previous DCF VF structure - * @cur_dcf_vf: pointer to the current DCF VF structure - */ -static void ice_vc_notify_dcf_vf_info(struct ice_vf *old_dcf_vf, struct ice_vf *cur_dcf_vf) -{ - struct ice_pf *pf = cur_dcf_vf->pf; - struct virtchnl_pf_event pfe = { 0 }; - - if (!old_dcf_vf || !cur_dcf_vf) - return; - - pfe.event = VIRTCHNL_EVENT_DCF_VSI_INFO; - pfe.event_data.vf_vsi_map.vf_id = cur_dcf_vf->vf_id; - pfe.event_data.vf_vsi_map.vsi_id = cur_dcf_vf->lan_vsi_num; - - ice_aq_send_msg_to_vf(&pf->hw, old_dcf_vf->vf_id, VIRTCHNL_OP_EVENT, - VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe), NULL); - return; -} - -/** - * ice_vc_notify_vf_reset - Notify VF of a reset event - * @vf: pointer to the VF structure - */ -static void ice_vc_notify_vf_reset(struct ice_vf *vf) -{ - struct virtchnl_pf_event pfe; - struct ice_pf *pf; - - if (!vf) - return; - - pf = vf->pf; - if (ice_validate_vf_id(pf, vf->vf_id)) - return; - - /* Bail out if VF is in disabled state, neither initialized, nor active - * state - otherwise proceed with notifications - */ - if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && - !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) || - test_bit(ICE_VF_STATE_DIS, vf->vf_states)) - return; - - pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; - pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; - ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, - VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe), - NULL); -} - - -/** - * ice_init_vf_vsi_res - initialize/setup VF VSI resources - * @vf: VF to initialize/setup the VSI for - * - * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the - * VF VSI's broadcast filter and is only used during initial VF creation. - */ -static int ice_init_vf_vsi_res(struct ice_vf *vf) -{ - struct ice_vsi_vlan_ops *vlan_ops; - struct ice_pf *pf = vf->pf; - u8 broadcast[ETH_ALEN]; - enum ice_status status; - struct ice_vsi *vsi; - struct device *dev; - int err; - - vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf); - - dev = ice_pf_to_dev(pf); - vsi = ice_vf_vsi_setup(vf); - if (!vsi) - return -ENOMEM; - - err = ice_vsi_add_vlan_zero(vsi); - if (err) { - dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n", - vf->vf_id); - goto release_vsi; - } - - vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); - err = vlan_ops->ena_rx_filtering(vsi); - if (err) { - dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n", - vf->vf_id); - goto release_vsi; - } - - eth_broadcast_addr(broadcast); - status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); - if (status) { - dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n", - vf->vf_id, ice_stat_str(status)); - err = ice_status_to_errno(status); - goto release_vsi; - } - - err = ice_vf_set_spoofchk_cfg(vf, vsi); - if (err) { - dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n", - vf->vf_id); - goto release_vsi; - } - - - vf->num_mac = 1; - - return 0; - -release_vsi: - ice_vf_vsi_release(vf); - return err; -} - -/** - * ice_start_vfs - start VFs so they are ready to be used by SR-IOV - * @pf: PF the VFs are associated with - */ -static int ice_start_vfs(struct ice_pf *pf) -{ - struct ice_hw *hw = &pf->hw; - int retval, i; - - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; - - ice_clear_vf_reset_trigger(vf); - - retval = ice_init_vf_vsi_res(vf); - if (retval) { - dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n", - vf->vf_id, retval); - goto teardown; - } - - set_bit(ICE_VF_STATE_INIT, vf->vf_states); - ice_ena_vf_mappings(vf); - wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); - } - - ice_flush(hw); - return 0; - -teardown: - for (i = i - 1; i >= 0; i--) { - struct ice_vf *vf = &pf->vf[i]; - - ice_dis_vf_mappings(vf); - ice_vf_vsi_release(vf); - } - - return retval; -} - -static void -ice_vf_hash_ctx_init(struct ice_vf *vf) -{ - memset(&vf->hash_ctx, 0, sizeof(vf->hash_ctx)); -} - -/** - * ice_set_dflt_settings_vfs - set VF defaults during initialization/creation - * @pf: PF holding reference to all VFs for default configuration - */ -static void ice_set_dflt_settings_vfs(struct ice_pf *pf) -{ - int i; - - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; - vf->vf_sw_id = pf->first_sw; - vf->pf = pf; - vf->vf_id = i; - /* assign default capabilities */ - set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps); - vf->spoofchk = true; - vf->num_vf_qs = pf->num_qps_per_vf; - ice_vc_set_default_allowlist(vf); - - /* ctrl_vsi_idx will be set to a valid value only when iAVF - * creates its first fdir rule. - */ - ice_vf_ctrl_invalidate_vsi(vf); - ice_vf_fdir_init(vf); - - ice_vf_hash_ctx_init(vf); - - ice_vc_set_dflt_vf_ops(&vf->vc_ops); - } -} - -/** - * ice_alloc_vfs - allocate num_vfs in the PF structure - * @pf: PF to store the allocated VFs in - * @num_vfs: number of VFs to allocate - */ -static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs) -{ - struct ice_vf *vfs; - - vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs), - GFP_KERNEL); - if (!vfs) - return -ENOMEM; - - pf->vf = vfs; - pf->num_alloc_vfs = num_vfs; - - return 0; -} - -/** - * ice_ena_vfs - enable VFs so they are ready to be used - * @pf: pointer to the PF structure - * @num_vfs: number of VFs to enable - */ -static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) -{ - struct device *dev = ice_pf_to_dev(pf); - struct ice_hw *hw = &pf->hw; - int ret; - - /* Disable global interrupt 0 so we don't try to handle the VFLR. */ - wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), - ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); - set_bit(ICE_OICR_INTR_DIS, pf->state); - ice_flush(hw); - - ret = pci_enable_sriov(pf->pdev, num_vfs); - if (ret) { - pf->num_alloc_vfs = 0; - goto err_unroll_intr; - } - - ret = ice_alloc_vfs(pf, num_vfs); - if (ret) - goto err_pci_disable_sriov; - - ice_dcf_init_sw_rule_mgmt(pf); - - if (ice_set_per_vf_res(pf)) { - dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n", - num_vfs); - ret = -ENOSPC; - goto err_unroll_sriov; - } - - ice_set_dflt_settings_vfs(pf); - - if (ice_start_vfs(pf)) { - dev_err(dev, "Failed to start VF(s)\n"); - ret = -EAGAIN; - goto err_unroll_sriov; - } - - clear_bit(ICE_VF_DIS, pf->state); - - if (ice_eswitch_configure(pf)) - goto err_unroll_sriov; - - /* rearm global interrupts */ - if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state)) - ice_irq_dynamic_ena(hw, NULL, NULL); - - return 0; - -err_unroll_sriov: - devm_kfree(dev, pf->vf); - pf->vf = NULL; - pf->num_alloc_vfs = 0; -err_pci_disable_sriov: - pci_disable_sriov(pf->pdev); -err_unroll_intr: - /* rearm interrupts here */ - ice_irq_dynamic_ena(hw, NULL, NULL); - clear_bit(ICE_OICR_INTR_DIS, pf->state); - return ret; -} - -/** - * ice_pci_sriov_ena - Enable or change number of VFs - * @pf: pointer to the PF structure - * @num_vfs: number of VFs to allocate - * - * Returns 0 on success and negative on failure - */ -static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) -{ - int pre_existing_vfs = pci_num_vf(pf->pdev); - struct device *dev = ice_pf_to_dev(pf); - int err; - - if (pre_existing_vfs && pre_existing_vfs != num_vfs) - ice_free_vfs(pf); - else if (pre_existing_vfs && pre_existing_vfs == num_vfs) - return 0; - - if (num_vfs > pf->num_vfs_supported) { - dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n", - num_vfs, pf->num_vfs_supported); - return -EOPNOTSUPP; - } - - dev_info(dev, "Enabling %d VFs\n", num_vfs); - err = ice_ena_vfs(pf, num_vfs); - if (err) { - dev_err(dev, "Failed to enable SR-IOV: %d\n", err); - return err; - } - - set_bit(ICE_FLAG_SRIOV_ENA, pf->flags); - return 0; -} - - -/** - * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks - * @pf: PF to enabled SR-IOV on - */ -static int ice_check_sriov_allowed(struct ice_pf *pf) -{ - struct device *dev = ice_pf_to_dev(pf); - - if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) { - dev_err(dev, "This device is not capable of SR-IOV\n"); - return -EOPNOTSUPP; - } - - if (test_bit(ICE_RECOVERY_MODE, pf->state)) { - dev_err(dev, "SR-IOV cannot be configured - Device is in Recovery Mode\n"); - return -EOPNOTSUPP; - } - - if (ice_is_safe_mode(pf)) { - dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n"); - return -EOPNOTSUPP; - } - - if (!ice_pf_state_is_nominal(pf)) { - dev_err(dev, "Cannot enable SR-IOV, device not ready\n"); - return -EBUSY; - } - - return 0; -} - -/** - * ice_sriov_configure - Enable or change number of VFs via sysfs - * @pdev: pointer to a pci_dev structure - * @num_vfs: number of VFs to allocate or 0 to free VFs - * - * This function is called when the user updates the number of VFs in sysfs. On - * success return whatever num_vfs was set to by the caller. Return negative on - * failure. - */ -int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) -{ - struct ice_pf *pf = pci_get_drvdata(pdev); - struct device *dev = ice_pf_to_dev(pf); - enum ice_status status; - int err; - - err = ice_check_sriov_allowed(pf); - if (err) - return err; - - if (!num_vfs) { - if (!pci_vfs_assigned(pdev)) { - ice_mbx_deinit_snapshot(&pf->hw); - ice_free_vfs(pf); - return 0; - } - - dev_err(dev, "can't free VFs because some are assigned to VMs.\n"); - return -EBUSY; - } - - status = ice_mbx_init_snapshot(&pf->hw, num_vfs); - if (status) - return ice_status_to_errno(status); - - err = ice_pci_sriov_ena(pf, num_vfs); - if (err) { - ice_mbx_deinit_snapshot(&pf->hw); - return err; - } - - return num_vfs; -} - -/** - * ice_process_vflr_event - Free VF resources via IRQ calls - * @pf: pointer to the PF structure - * - * called from the VFLR IRQ handler to - * free up VF resources and state variables - */ -void ice_process_vflr_event(struct ice_pf *pf) -{ - struct ice_hw *hw = &pf->hw; - unsigned int vf_id; - u32 reg; - - if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) || - !pf->num_alloc_vfs) - return; - - ice_for_each_vf(pf, vf_id) { - struct ice_vf *vf = &pf->vf[vf_id]; - u32 reg_idx, bit_idx; - - reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; - bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; - /* read GLGEN_VFLRSTAT register to find out the flr VFs */ - reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx)); - if (reg & BIT(bit_idx)) - /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */ - ice_reset_vf(vf, true); - } -} - -/** - * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF - * @vf: pointer to the VF info - */ -static void ice_vc_reset_vf(struct ice_vf *vf) -{ - ice_vc_notify_vf_reset(vf); - ice_reset_vf(vf, false); -} - -/** - * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in - * @pf: PF used to index all VFs - * @pfq: queue index relative to the PF's function space - * - * If no VF is found who owns the pfq then return NULL, otherwise return a - * pointer to the VF who owns the pfq - */ -static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq) -{ - unsigned int vf_id; - - ice_for_each_vf(pf, vf_id) { - struct ice_vf *vf = &pf->vf[vf_id]; - struct ice_vsi *vsi; - u16 rxq_idx; - - vsi = ice_get_vf_vsi(vf); - - ice_for_each_rxq(vsi, rxq_idx) - if (vsi->rxq_map[rxq_idx] == pfq) - return vf; - } - - return NULL; -} - -/** - * ice_globalq_to_pfq - convert from global queue index to PF space queue index - * @pf: PF used for conversion - * @globalq: global queue index used to convert to PF space queue index - */ -static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq) -{ - return globalq - pf->hw.func_caps.common_cap.rxq_first_id; -} - -/** - * ice_vf_lan_overflow_event - handle LAN overflow event for a VF - * @pf: PF that the LAN overflow event happened on - * @event: structure holding the event information for the LAN overflow event - * - * Determine if the LAN overflow event was caused by a VF queue. If it was not - * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a - * reset on the offending VF. - */ -void -ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) -{ - u32 gldcb_rtctq, queue; - struct ice_vf *vf; - - gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq); - dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq); - - /* event returns device global Rx queue number */ - queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >> - GLDCB_RTCTQ_RXQNUM_S; - - vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue)); - if (!vf) - return; - - ice_vc_reset_vf(vf); -} - /** * ice_vc_send_msg_to_vf - Send message to VF * @vf: pointer to the VF info @@ -3055,42 +523,18 @@ int ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) { - enum ice_status aq_ret; struct device *dev; struct ice_pf *pf; - - if (!vf) - return -EINVAL; + int aq_ret; pf = vf->pf; - if (ice_validate_vf_id(pf, vf->vf_id)) - return -EINVAL; - dev = ice_pf_to_dev(pf); - /* single place to detect unsuccessful return values */ - if (v_retval) { - vf->num_inval_msgs++; - dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id, - v_opcode, v_retval); - if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) { - dev_err(dev, "Number of invalid messages exceeded for VF %d\n", - vf->vf_id); - dev_err(dev, "Use PF Control I/F to enable the VF\n"); - set_bit(ICE_VF_STATE_DIS, vf->vf_states); - return -EIO; - } - } else { - vf->num_valid_msgs++; - /* reset the invalid counter, if a valid message is received. */ - vf->num_inval_msgs = 0; - } - aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, msg, msglen, NULL); if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) { - dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n", - vf->vf_id, ice_stat_str(aq_ret), + dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n", + vf->vf_id, aq_ret, ice_aq_str(pf->hw.mailboxq.sq_last_status)); return -EIO; } @@ -3143,6 +587,53 @@ static u16 ice_vc_get_max_frame_size(struct ice_vf *vf) return max_frame_size; } +/** + * ice_vc_get_vlan_caps + * @hw: pointer to the hw + * @vf: pointer to the VF info + * @vsi: pointer to the VSI + * @driver_caps: current driver caps + * + * Return 0 if there is no VLAN caps supported, or VLAN caps value + */ +static u32 +ice_vc_get_vlan_caps(struct ice_hw *hw, struct ice_vf *vf, struct ice_vsi *vsi, + u32 driver_caps) +{ + if (ice_is_eswitch_mode_switchdev(vf->pf)) + /* In switchdev setting VLAN from VF isn't supported */ + return 0; + + if (driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN_V2) { + /* VLAN offloads based on current device configuration */ + return VIRTCHNL_VF_OFFLOAD_VLAN_V2; + } else if (driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN) { + /* allow VF to negotiate VIRTCHNL_VF_OFFLOAD explicitly for + * these two conditions, which amounts to guest VLAN filtering + * and offloads being based on the inner VLAN or the + * inner/single VLAN respectively and don't allow VF to + * negotiate VIRTCHNL_VF_OFFLOAD in any other cases + */ + if (ice_is_dvm_ena(hw) && ice_vf_is_port_vlan_ena(vf)) { + return VIRTCHNL_VF_OFFLOAD_VLAN; + } else if (!ice_is_dvm_ena(hw) && + !ice_vf_is_port_vlan_ena(vf)) { + /* configure backward compatible support for VFs that + * only support VIRTCHNL_VF_OFFLOAD_VLAN, the PF is + * configured in SVM, and no port VLAN is configured + */ + ice_vf_vsi_cfg_svm_legacy_vlan_mode(vsi); + return VIRTCHNL_VF_OFFLOAD_VLAN; + } else if (ice_is_dvm_ena(hw)) { + /* configure software offloaded VLAN support when DVM + * is enabled, but no port VLAN is enabled + */ + ice_vf_vsi_cfg_dvm_legacy_vlan_mode(vsi); + } + } + return 0; +} + /** * ice_vc_get_vf_res_msg * @vf: pointer to the VF info @@ -3154,12 +645,13 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) { enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_vf_resource *vfres = NULL; + struct ice_hw *hw = &vf->pf->hw; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; int len = 0; int ret; - if (ice_check_vf_init(pf, vf)) { + if (ice_check_vf_init(vf)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto err; } @@ -3186,33 +678,8 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) goto err; } - if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN_V2) { - /* VLAN offloads based on current device configuration */ - vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN_V2; - } else if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN) { - /* allow VF to negotiate VIRTCHNL_VF_OFFLOAD explicitly for - * these two conditions, which amounts to guest VLAN filtering - * and offloads being based on the inner VLAN or the - * inner/single VLAN respectively and don't allow VF to - * negotiate VIRTCHNL_VF_OFFLOAD in any other cases - */ - if (ice_is_dvm_ena(&pf->hw) && ice_vf_is_port_vlan_ena(vf)) { - vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; - } else if (!ice_is_dvm_ena(&pf->hw) && - !ice_vf_is_port_vlan_ena(vf)) { - vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; - /* configure backward compatible support for VFs that - * only support VIRTCHNL_VF_OFFLOAD_VLAN, the PF is - * configured in SVM, and no port VLAN is configured - */ - ice_vf_vsi_cfg_svm_legacy_vlan_mode(vsi); - } else if (ice_is_dvm_ena(&pf->hw)) { - /* configure software offloaded VLAN support when DVM - * is enabled, but no port VLAN is enabled - */ - ice_vf_vsi_cfg_dvm_legacy_vlan_mode(vsi); - } - } + vfres->vf_cap_flags |= ice_vc_get_vlan_caps(hw, vf, vsi, + vf->driver_caps); if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; @@ -3229,6 +696,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF; + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FSUB_PF) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FSUB_PF; + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; @@ -3256,9 +726,11 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF; #ifdef __TC_MQPRIO_MODE_MAX - if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ && !ice_is_eswitch_mode_switchdev(pf)) + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ && + !ice_is_eswitch_mode_switchdev(pf)) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ; - if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ_V2 && !ice_is_eswitch_mode_switchdev(pf)) + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ_V2 && + !ice_is_eswitch_mode_switchdev(pf)) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ_V2; #endif /* __TC_MQPRIO_MODE_MAX */ @@ -3270,20 +742,26 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) /* Negotiate DCF capability. */ if (vf->driver_caps & VIRTCHNL_VF_CAP_DCF) { - if (!ice_is_vf_dcf(vf)) { + if (!ice_is_dcf_enabled(pf)) { if (!ice_check_dcf_allowed(vf)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto err; } - if (!ice_is_vf_dcf(vf)) - ice_vc_notify_dcf_vf_info(pf->dcf.vf, vf); pf->dcf.vf = vf; dev_info(ice_pf_to_dev(pf), "Grant request for DCF functionality to VF%d\n", - vf->vf_id); - if (!ice_is_tunnel_empty(&pf->hw)) { + ICE_DCF_VFID); + if (ice_is_acl_empty(hw)) { + ice_acl_destroy_tbl(hw); + hw->dcf_caps |= DCF_ACL_CAP; + } else { + dev_info(ice_pf_to_dev(pf), "Failed to grant ACL capability to VF%d as ACL rules already exist\n", + ICE_DCF_VFID); + hw->dcf_caps &= ~DCF_ACL_CAP; + } + if (!ice_is_tunnel_empty(hw)) { dev_info(ice_pf_to_dev(pf), "Failed to grant UDP tunnel capability to VF%d as UDP tunnel rules already exist\n", - vf->vf_id); - pf->hw.dcf_caps &= ~DCF_UDP_TUNNEL_CAP; + ICE_DCF_VFID); + hw->dcf_caps &= ~DCF_UDP_TUNNEL_CAP; } } @@ -3300,16 +778,24 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) ice_rm_all_dcf_sw_rules(pf); ice_clear_dcf_acl_cfg(pf); ice_clear_dcf_udp_tunnel_cfg(pf); - pf->hw.dcf_caps &= ~(DCF_ACL_CAP | DCF_UDP_TUNNEL_CAP); + hw->dcf_caps &= ~(DCF_ACL_CAP | DCF_UDP_TUNNEL_CAP); ice_dcf_set_state(pf, ICE_DCF_STATE_OFF); pf->dcf.vf = NULL; - ice_reset_vf(vf, false); + ice_reset_vf(vf, 0); } + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_QOS) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_QOS; + + if (vf->driver_caps & VIRTCHNL_VF_CAP_RDMA && + vf->vf_ops->cfg_rdma_irq_map && vf->vf_ops->clear_rdma_irq_map && + ice_chk_rdma_cap(pf) && ice_is_rdma_aux_loaded(pf)) + vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_RDMA; + vfres->num_vsis = 1; /* Tx and Rx queue are equal for VF */ vfres->num_queue_pairs = vsi->num_txq; - vfres->max_vectors = pf->num_msix_per_vf; + vfres->max_vectors = pf->vfs.num_msix_per; vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE; vfres->rss_lut_size = vsi->rss_table_size; vfres->max_mtu = ice_vc_get_max_frame_size(vf); @@ -3348,25 +834,7 @@ err: static void ice_vc_reset_vf_msg(struct ice_vf *vf) { if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) - ice_reset_vf(vf, false); -} - -/** - * ice_find_vsi_from_id - * @pf: the PF structure to search for the VSI - * @id: ID of the VSI it is searching for - * - * searches for the VSI with the given ID - */ -static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id) -{ - int i; - - ice_for_each_vsi(pf, i) - if (pf->vsi[i] && pf->vsi[i]->vsi_num == id) - return pf->vsi[i]; - - return NULL; + ice_reset_vf(vf, 0); } /** @@ -3381,9 +849,9 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; - vsi = ice_find_vsi_from_id(pf, vsi_id); + vsi = ice_find_vsi(pf, vsi_id); - return (vsi && (vsi->vf_id == vf->vf_id)); + return (vsi && vsi->vf == vf); } /** @@ -3396,7 +864,7 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) */ static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid) { - struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id); + struct ice_vsi *vsi = ice_find_vsi(vf->pf, vsi_id); /* allocated Tx and Rx queues should be always equal for VF VSI */ return (vsi && (qid < vsi->alloc_txq)); } @@ -3416,19 +884,17 @@ static bool ice_vc_isvalid_ring_len(u16 ring_len) !(ring_len % ICE_REQ_DESC_MULTIPLE)); } -static enum virtchnl_status_code ice_vc_rss_hash_update(struct ice_hw *hw, - struct ice_vsi *vsi, - u8 hash_type) +static enum virtchnl_status_code +ice_vc_rss_hash_update(struct ice_hw *hw, struct ice_vsi *vsi, u8 hash_type) { enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct ice_vsi_ctx *ctx; - enum ice_status status; + int status; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return VIRTCHNL_STATUS_ERR_NO_MEMORY; - /* clear previous hash_type */ ctx->info.q_opt_rss = vsi->info.q_opt_rss & ~(ICE_AQ_VSI_Q_OPT_RSS_HASH_M); @@ -3444,10 +910,8 @@ static enum virtchnl_status_code ice_vc_rss_hash_update(struct ice_hw *hw, status = ice_update_vsi(hw, vsi->idx, ctx, NULL); if (status) { - dev_err(ice_hw_to_dev(hw), - "update VSI for rss failed, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + dev_err(ice_hw_to_dev(hw), "update VSI for RSS failed, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); v_ret = VIRTCHNL_STATUS_ERR_PARAM; } else { vsi->info.q_opt_rss = ctx->info.q_opt_rss; @@ -3458,36 +922,6 @@ static enum virtchnl_status_code ice_vc_rss_hash_update(struct ice_hw *hw, return v_ret; } -/** - * ice_pkg_name_to_type - translate DDP package name string to type - * @hw: pointer to the hardware - * - * This is a helper function to translate the DDP package name string - * to ice_pkg_type, in order to select the correct hash list. - */ -enum ice_pkg_type ice_pkg_name_to_type(struct ice_hw *hw) -{ - uint16_t i; - static const struct { - char name[ICE_PKG_NAME_SIZE]; - enum ice_pkg_type pkg_type; - } ice_pkg_type_list[] = { - {"ICE OS Default Package", ICE_PKG_TYPE_OS_DEFAULT}, - {"ICE COMMS Package", ICE_PKG_TYPE_COMMS}, - {"ICE Wireless Edge Package", ICE_PKG_TYPE_WIRELESS_EDGE}, - {"ICE GTP over GRE Package", ICE_PKG_TYPE_GTP_OVER_GRE}, - {"ICE Tencent GRE Package", ICE_PKG_TYPE_OS_DEFAULT}, - }; - - for (i = 0; i < ARRAY_SIZE(ice_pkg_type_list); i++) { - if (!strcmp(ice_pkg_type_list[i].name, - (const char *)hw->active_pkg_name)) - return ice_pkg_type_list[i].pkg_type; - } - - return ICE_PKG_TYPE_UNKNOWN; -}; - /** * ice_vc_validate_pattern * @vf: pointer to the VF info @@ -3506,10 +940,33 @@ ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto) bool is_udp = false; u16 ptype = -1; int i = 0; + int count; + s32 type; - while (i < proto->count && - proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) { - switch (proto->proto_hdr[i].type) { + /* pass the validate for raw pattern */ + if (proto->tunnel_level == 0 && proto->count == 0) + return true; + + if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS + + VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK) + return false; + + if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) + count = proto->count - VIRTCHNL_MAX_NUM_PROTO_HDRS; + else + count = proto->count; + + while (i < count) { + if (proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) + type = proto->proto_hdr[i].type; + else if (i < VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK && + proto->proto_hdr_w_msk[i].type != + VIRTCHNL_PROTO_HDR_NONE) + type = proto->proto_hdr_w_msk[i].type; + else + break; + + switch (type) { case VIRTCHNL_PROTO_HDR_ETH: ptype = ICE_PTYPE_MAC_PAY; break; @@ -3517,10 +974,18 @@ ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto) ptype = ICE_PTYPE_IPV4_PAY; is_ipv4 = true; break; + case VIRTCHNL_PROTO_HDR_IPV4_FRAG: + ptype = ICE_PTYPE_IPV4FRAG_PAY; + is_ipv4 = true; + break; case VIRTCHNL_PROTO_HDR_IPV6: ptype = ICE_PTYPE_IPV6_PAY; is_ipv6 = true; break; + case VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG: + ptype = ICE_PTYPE_IPV6FRAG_PAY; + is_ipv6 = true; + break; case VIRTCHNL_PROTO_HDR_UDP: if (is_ipv4) ptype = ICE_PTYPE_IPV4_UDP_PAY; @@ -3594,6 +1059,12 @@ ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto) else if (is_ipv6 && is_l2tpv2) ptype = ICE_MAC_IPV6_PPPOL2TPV2; goto out; + case VIRTCHNL_PROTO_HDR_GRE: + if (is_ipv4) + ptype = ICE_MAC_IPV4_TUN_PAY; + else if (is_ipv6) + ptype = ICE_MAC_IPV6_TUN_PAY; + goto out; default: break; } @@ -3627,6 +1098,7 @@ static bool ice_vc_parse_rss_cfg(struct ice_hw *hw, bool outer_ipv4 = false; bool outer_ipv6 = false; bool inner_hdr = false; + bool has_gre = false; u32 *addl_hdrs = &hash_cfg->addl_hdrs; u64 *hash_flds = &hash_cfg->hash_flds; @@ -3655,6 +1127,21 @@ static bool ice_vc_parse_rss_cfg(struct ice_hw *hw, hdr_found = hdr_map.ice_hdr; } + /* Find matched ice hash fields according to + * virtchnl hash fields. + */ + for (j = 0; j < hf_list_len; j++) { + struct ice_vc_hash_field_match_type hf_map = + hf_list[j]; + + if (proto_hdr->type == hf_map.vc_hdr && + proto_hdr->field_selector == + hf_map.vc_hash_field) { + *hash_flds |= hf_map.ice_hash_field; + break; + } + } + if (!hdr_found) return false; @@ -3663,15 +1150,18 @@ static bool ice_vc_parse_rss_cfg(struct ice_hw *hw, else if (proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV6 && !inner_hdr) outer_ipv6 = true; - /* for GTPU and L2TPv2, take inner header as input set if no + /* for GRE and L2TPv2, take inner header as input set if no * any field is selected from outer headers. + * for GTPU, take inner header and GTPU teid as input set. */ - else if ((proto_hdr->type == VIRTCHNL_PROTO_HDR_L2TPV2 || - proto_hdr->type == VIRTCHNL_PROTO_HDR_GTPU_IP || + else if ((proto_hdr->type == VIRTCHNL_PROTO_HDR_GTPU_IP || proto_hdr->type == VIRTCHNL_PROTO_HDR_GTPU_EH || proto_hdr->type == VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN || - proto_hdr->type == VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP) && - *hash_flds == 0) { + proto_hdr->type == + VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP) || + ((proto_hdr->type == VIRTCHNL_PROTO_HDR_L2TPV2 || + proto_hdr->type == VIRTCHNL_PROTO_HDR_GRE) && + *hash_flds == 0)) { /* set inner_hdr flag, and clean up outer header */ inner_hdr = true; @@ -3687,23 +1177,38 @@ static bool ice_vc_parse_rss_cfg(struct ice_hw *hw, hash_cfg->hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6; else hash_cfg->hdr_type = ICE_RSS_INNER_HEADERS; + + if (has_gre && outer_ipv4) + hash_cfg->hdr_type = + ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE; + if (has_gre && outer_ipv6) + hash_cfg->hdr_type = + ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE; + + if (proto_hdr->type == VIRTCHNL_PROTO_HDR_GRE) + has_gre = true; } *addl_hdrs |= hdr_found; - /* Find matched ice hash fields according to - * virtchnl hash fields. - */ - for (j = 0; j < hf_list_len; j++) { - struct ice_vc_hash_field_match_type hf_map = - hf_list[j]; - - if (proto_hdr->type == hf_map.vc_hdr && - proto_hdr->field_selector == - hf_map.vc_hash_field) { - *hash_flds |= hf_map.ice_hash_field; - break; - } + /* refine hash hdrs and fields for IP fragment */ + if (VIRTCHNL_TEST_PROTO_HDR_FIELD(proto_hdr, + VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID) && + proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV4_FRAG) { + *addl_hdrs |= ICE_FLOW_SEG_HDR_IPV_FRAG; + *addl_hdrs &= ~(ICE_FLOW_SEG_HDR_IPV_OTHER); + *hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID); + VIRTCHNL_DEL_PROTO_HDR_FIELD(proto_hdr, + VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID); + } + if (VIRTCHNL_TEST_PROTO_HDR_FIELD(proto_hdr, + VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID) && + proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG) { + *addl_hdrs |= ICE_FLOW_SEG_HDR_IPV_FRAG; + *addl_hdrs &= ~(ICE_FLOW_SEG_HDR_IPV_OTHER); + *hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID); + VIRTCHNL_DEL_PROTO_HDR_FIELD(proto_hdr, + VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID); } } @@ -3813,16 +1318,16 @@ static int ice_hash_moveout(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) { struct device *dev = ice_pf_to_dev(vf->pf); - enum ice_status status = 0; struct ice_hw *hw = &vf->pf->hw; + int status; if (!is_hash_cfg_valid(cfg)) return -ENOENT; status = ice_rem_rss_cfg(hw, vf->lan_vsi_idx, cfg); - if (status && status != ICE_ERR_DOES_NOT_EXIST) { - dev_err(dev, "ice_rem_rss_cfg failed for VSI:%d, error:%s\n", - vf->lan_vsi_num, ice_stat_str(status)); + if (status && status != -ENOENT) { + dev_err(dev, "ice_rem_rss_cfg failed for VSI:%d, error:%d\n", + vf->lan_vsi_num, status); return -EBUSY; } @@ -3840,16 +1345,16 @@ static int ice_hash_moveback(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) { struct device *dev = ice_pf_to_dev(vf->pf); - enum ice_status status = 0; struct ice_hw *hw = &vf->pf->hw; + int status; if (!is_hash_cfg_valid(cfg)) return -ENOENT; status = ice_add_rss_cfg(hw, vf->lan_vsi_idx, cfg); if (status) { - dev_err(dev, "ice_add_rss_cfg failed for VSI:%d, error:%s\n", - vf->lan_vsi_num, ice_stat_str(status)); + dev_err(dev, "ice_add_rss_cfg failed for VSI:%d, error:%d\n", + vf->lan_vsi_num, status); return -EBUSY; } @@ -4452,29 +1957,27 @@ ice_rem_rss_cfg_post(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) * and also post process the hash context base on the rollback mechanism * which handle some rules conflict by ice_add_rss_cfg_wrap. */ -static enum ice_status +static int ice_rem_rss_cfg_wrap(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) { struct device *dev = ice_pf_to_dev(vf->pf); - enum ice_status status = 0; struct ice_hw *hw = &vf->pf->hw; + int status; status = ice_rem_rss_cfg(hw, vf->lan_vsi_idx, cfg); - /* We just ignore ICE_ERR_DOES_NOT_EXIST, because - * if two configurations share the same profile remove - * one of them actually removes both, since the + /* We just ignore -ENOENT, because if two configurations share the same + * profile remove one of them actually removes both, since the * profile is deleted. */ - if (status && status != ICE_ERR_DOES_NOT_EXIST) { - dev_err(dev, "ice_rem_rss_cfg failed for VSI:%d, error:%s\n", - vf->lan_vsi_num, ice_stat_str(status)); - goto error; + if (status && status != -ENOENT) { + dev_err(dev, "ice_rem_rss_cfg failed for VSI:%d, error:%d\n", + vf->lan_vsi_num, status); + return status; } ice_rem_rss_cfg_post(vf, cfg); -error: - return status; + return 0; } /** @@ -4486,27 +1989,213 @@ error: * also use a rollback mechanism to handle some rules conflict due to TCAM * write sequence from top to down. */ -static enum ice_status +static int ice_add_rss_cfg_wrap(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) { struct device *dev = ice_pf_to_dev(vf->pf); - enum ice_status status = 0; struct ice_hw *hw = &vf->pf->hw; + int status; if (ice_add_rss_cfg_pre(vf, cfg)) - return ICE_ERR_PARAM; + return -EINVAL; status = ice_add_rss_cfg(hw, vf->lan_vsi_idx, cfg); if (status) { - dev_err(dev, "ice_add_rss_cfg failed for VSI:%d, error:%s\n", - vf->lan_vsi_num, ice_stat_str(status)); - goto error; + dev_err(dev, "ice_add_rss_cfg failed for VSI:%d, error:%d\n", + vf->lan_vsi_num, status); + return status; } if (ice_add_rss_cfg_post(vf, cfg)) - status = ICE_ERR_PARAM; + status = -EINVAL; -error: + return status; +} + +/** + * ice_parse_raw_rss_pattern - Parse raw pattern spec and mask for RSS + * @vf: pointer to the VF info + * @proto: pointer to the virtchnl protocol header + * @raw_cfg: pointer to the RSS raw pattern configuration + * + * Parser function to get spec and mask from virtchnl message, and parse + * them to get the corresponding profile and offset. The profile is used + * to add RSS configuration. + */ +static int +ice_parse_raw_rss_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto, + struct ice_rss_raw_cfg *raw_cfg) +{ + struct ice_parser_result pkt_parsed; + struct ice_hw *hw = &vf->pf->hw; + struct ice_parser_profile prof; + u16 pkt_len, udp_port = 0; + struct ice_parser *psr; + u8 *pkt_buf, *msk_buf; + int status = 0; + + pkt_len = proto->raw.pkt_len; + pkt_buf = kzalloc(pkt_len, GFP_KERNEL); + msk_buf = kzalloc(pkt_len, GFP_KERNEL); + if (!pkt_buf || !msk_buf) { + status = -ENOMEM; + goto free_alloc; + } + + memcpy(pkt_buf, proto->raw.spec, pkt_len); + memcpy(msk_buf, proto->raw.mask, pkt_len); + + status = ice_parser_create(hw, &psr); + if (status) + goto parser_destroy; + + if (ice_get_open_tunnel_port(hw, TNL_VXLAN, &udp_port)) + ice_parser_vxlan_tunnel_set(psr, udp_port, true); + + status = ice_parser_run(psr, pkt_buf, pkt_len, &pkt_parsed); + if (status) + goto parser_destroy; + + status = ice_parser_profile_init(&pkt_parsed, pkt_buf, msk_buf, + pkt_len, ICE_BLK_RSS, + true, &prof); + if (status) + goto parser_destroy; + + memcpy(&raw_cfg->prof, &prof, sizeof(prof)); + +parser_destroy: + ice_parser_destroy(psr); +free_alloc: + kfree(pkt_buf); + kfree(msk_buf); + return status; +} + +/** + * ice_add_raw_rss_cfg - add RSS configuration for raw pattern + * @vf: pointer to the VF info + * @cfg: pointer to the RSS raw pattern configuration + * + * This function adds the RSS configuration for raw pattern. + * Check if current profile is matched. If not, remove the old + * one and add the new profile to HW directly. Update the symmetric + * hash configuration as well. + */ +static int +ice_add_raw_rss_cfg(struct ice_vf *vf, struct ice_rss_raw_cfg *cfg) +{ + struct ice_parser_profile *prof = &cfg->prof; + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_rss_prof_info *rss_prof; + struct ice_hw *hw = &vf->pf->hw; + int i, ptg, status = 0; + u16 vsi_handle; + u64 id; + + vsi_handle = vf->lan_vsi_idx; + id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX); + + ptg = hw->blk[ICE_BLK_RSS].xlt1.t[id]; + rss_prof = &vf->rss_prof_info[ptg]; + + /* check if ptg already has a profile */ + if (rss_prof->prof.fv_num) { + for (i = 0; i < ICE_MAX_FV_WORDS; i++) { + if (rss_prof->prof.fv[i].proto_id != + prof->fv[i].proto_id || + rss_prof->prof.fv[i].offset != + prof->fv[i].offset) + break; + } + + /* current profile is matched, check symmetric hash */ + if (i == ICE_MAX_FV_WORDS) { + if (rss_prof->symm != cfg->symm) + goto update_symm; + return status; + } + + /* current profile is not matched, remove it */ + status = + ice_rem_prof_id_flow(hw, ICE_BLK_RSS, + ice_get_hw_vsi_num(hw, vsi_handle), + id); + if (status) { + dev_err(dev, "remove RSS flow failed\n"); + return status; + } + + status = ice_rem_prof(hw, ICE_BLK_RSS, id); + if (status) { + dev_err(dev, "remove RSS profile failed\n"); + return status; + } + } + + /* add new profile */ + status = ice_flow_set_hw_prof(hw, vsi_handle, 0, prof, ICE_BLK_RSS); + if (status) { + dev_err(dev, "HW profile add failed\n"); + return status; + } + + memcpy(&rss_prof->prof, prof, sizeof(struct ice_parser_profile)); + +update_symm: + rss_prof->symm = cfg->symm; + ice_rss_update_raw_symm(hw, cfg, id); + return status; +} + +/** + * ice_rem_raw_rss_cfg - remove RSS configuration for raw pattern + * @vf: pointer to the VF info + * @cfg: pointer to the RSS raw pattern configuration + * + * This function removes the RSS configuration for raw pattern. + * Check if vsi group is already removed first. If not, remove the + * profile. + */ +static int +ice_rem_raw_rss_cfg(struct ice_vf *vf, struct ice_rss_raw_cfg *cfg) +{ + struct ice_parser_profile *prof = &cfg->prof; + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_hw *hw = &vf->pf->hw; + int ptg, status = 0; + u16 vsig, vsi; + u64 id; + + id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX); + + ptg = hw->blk[ICE_BLK_RSS].xlt1.t[id]; + + memset(&vf->rss_prof_info[ptg], 0, + sizeof(struct ice_rss_prof_info)); + + /* check if vsig is already removed */ + vsi = ice_get_hw_vsi_num(hw, vf->lan_vsi_idx); + if (vsi >= ICE_MAX_VSI) { + status = -EINVAL; + goto err; + } + + vsig = hw->blk[ICE_BLK_RSS].xlt2.vsis[vsi].vsig; + if (vsig) { + status = ice_rem_prof_id_flow(hw, ICE_BLK_RSS, vsi, id); + if (status) + goto err; + + status = ice_rem_prof(hw, ICE_BLK_RSS, id); + if (status) + goto err; + } + + return status; + +err: + dev_err(dev, "HW profile remove failed\n"); return status; } @@ -4527,6 +2216,8 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) struct device *dev = ice_pf_to_dev(vf->pf); struct ice_hw *hw = &vf->pf->hw; struct ice_vsi *vsi; + u8 hash_type; + bool symm; if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n", @@ -4568,36 +2259,53 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) } if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) { - u8 hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR : ICE_AQ_VSI_Q_OPT_RSS_TPLZ; + hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR : + ICE_AQ_VSI_Q_OPT_RSS_TPLZ; v_ret = ice_vc_rss_hash_update(hw, vsi, hash_type); + goto error_param; + } + + hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ : + ICE_AQ_VSI_Q_OPT_RSS_TPLZ; + v_ret = ice_vc_rss_hash_update(hw, vsi, hash_type); + if (v_ret) + goto error_param; + + symm = rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC; + /* Configure RSS hash for raw pattern */ + if (rss_cfg->proto_hdrs.tunnel_level == 0 && + rss_cfg->proto_hdrs.count == 0) { + struct ice_rss_raw_cfg raw_cfg; + + if (ice_parse_raw_rss_pattern(vf, &rss_cfg->proto_hdrs, + &raw_cfg)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + if (add) { + raw_cfg.symm = symm; + if (ice_add_raw_rss_cfg(vf, &raw_cfg)) + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + } else { + if (ice_rem_raw_rss_cfg(vf, &raw_cfg)) + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + } } else { struct ice_rss_hash_cfg cfg; - u8 hash_type; cfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE; cfg.hash_flds = ICE_HASH_INVALID; cfg.hdr_type = ICE_RSS_ANY_HEADERS; - hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ : - ICE_AQ_VSI_Q_OPT_RSS_TPLZ; - - v_ret = ice_vc_rss_hash_update(hw, vsi, hash_type); - if (v_ret) - goto error_param; - if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &cfg)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (add) { - if (rss_cfg->rss_algorithm == - VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) - cfg.symm = true; - else - cfg.symm = false; - + cfg.symm = symm; if (ice_add_rss_cfg_wrap(vf, &cfg)) v_ret = VIRTCHNL_STATUS_ERR_PARAM; } else { @@ -4610,6 +2318,334 @@ error_param: return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0); } +/** + * ice_vc_get_qos_caps - Get current QoS caps from PF + * @vf: pointer to the VF info + * + * Get VF's QoS capabilities, such as TC number, arbiter and + * bandwidth from PF. + */ +static int ice_vc_get_qos_caps(struct ice_vf *vf) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct virtchnl_qos_cap_list *cap_list = NULL; + u8 tc_prio[ICE_MAX_TRAFFIC_CLASS] = {0}; + struct virtchnl_qos_cap_elem *cfg = NULL; + struct ice_vsi_ctx *vsi_ctx; + struct ice_pf *pf = vf->pf; + struct ice_port_info *pi; + struct ice_vsi *vsi; + u8 numtc, tc; + u16 len = 0; + int ret, i; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + pi = pf->hw.port_info; + numtc = vsi->tc_cfg.numtc; + + vsi_ctx = ice_get_vsi_ctx(pi->hw, vf->lan_vsi_idx); + if (!vsi_ctx) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + len = sizeof(*cap_list) + sizeof(cap_list->cap[0]) * (numtc - 1); + cap_list = kzalloc(len, GFP_KERNEL); + if (!cap_list) { + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + len = 0; + goto err; + } + + cap_list->vsi_id = vsi->vsi_num; + cap_list->num_elem = numtc; + + /* Store the UP2TC configuration from DCB to a user priority bitmap + * of each TC. Each element of prio_of_tc represents one TC. Each + * bitmap indicates the user priorities belong to this TC. + */ + for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { + tc = pi->qos_cfg.local_dcbx_cfg.etscfg.prio_table[i]; + tc_prio[tc] |= BIT(i); + } + + for (i = 0; i < numtc; i++) { + cfg = &cap_list->cap[i]; + cfg->tc_num = i; + cfg->tc_prio = tc_prio[i]; + cfg->arbiter = pi->qos_cfg.local_dcbx_cfg.etscfg.tsatable[i]; + cfg->weight = VIRTCHNL_STRICT_WEIGHT; + cfg->type = VIRTCHNL_BW_SHAPER; + cfg->shaper.committed = vsi_ctx->sched.bw_t_info[i].cir_bw.bw; + cfg->shaper.peak = vsi_ctx->sched.bw_t_info[i].eir_bw.bw; + } + +err: + ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_QOS_CAPS, v_ret, + (u8 *)cap_list, len); + kfree(cap_list); + return ret; +} + +/** + * ice_validate_vf_q_tc_map - Validate configurations for queue TC mapping + * @vf: pointer to the VF info + * @qtc: pointer to the queue tc mapping info structure + */ +static int +ice_validate_vf_q_tc_map(struct ice_vf *vf, + struct virtchnl_queue_tc_mapping *qtc) +{ + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + u16 offset = 0; + int i; + + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + dev_err(ice_pf_to_dev(pf), "VF-%d has invalid VSI pointer\n", + vf->vf_id); + return -EINVAL; + } + + if (qtc->num_queue_pairs > + min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) { + dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n", + vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)); + return -EINVAL; + } + + if (qtc->num_tc > vsi->tc_cfg.numtc) { + dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of TCs: %d\n", + vf->vf_id, vsi->tc_cfg.numtc); + return -EINVAL; + } + + for (i = 0; i < qtc->num_tc; i++) + offset += qtc->tc[i].req.queue_count; + + if (offset != qtc->num_queue_pairs) { + dev_err(ice_pf_to_dev(pf), "VF-%d queues to be mapped do not equal to number of VF queue pairs\n", + vf->vf_id); + return -EINVAL; + } + + return 0; +} + +/** + * ice_vc_cfg_q_tc_map - Configure per queue TC mapping + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer which holds the command descriptor + * + * Configure VF queues TC mapping. + */ +static int ice_vc_cfg_q_tc_map(struct ice_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct virtchnl_queue_tc_mapping *tc_map = NULL; + struct virtchnl_queue_tc_mapping *qtc = + (struct virtchnl_queue_tc_mapping *)msg; + u16 prio_bitmap[ICE_MAX_TRAFFIC_CLASS] = {0}; + u16 qmap = 0, pow = 0, offset = 0, len = 0; + struct ice_hw *hw = &vf->pf->hw; + struct ice_vsi_ctx *ctx = NULL; + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + u8 netdev_tc = 0; + int i, ret; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + if (!ice_vc_isvalid_vsi_id(vf, qtc->vsi_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + vsi = ice_get_vf_vsi(vf); + if (!vsi || vsi->vsi_num != qtc->vsi_id) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + if (ice_validate_vf_q_tc_map(vf, qtc)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) { + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + goto err; + } + + len = sizeof(*tc_map) + sizeof(tc_map->tc[0]) * (qtc->num_tc - 1); + tc_map = kzalloc(len, GFP_KERNEL); + if (!tc_map) { + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + len = 0; + goto err; + } + + tc_map->vsi_id = qtc->vsi_id; + tc_map->num_tc = qtc->num_tc; + tc_map->num_queue_pairs = qtc->num_queue_pairs; + + /* Get the corresponding user priority bitmap for each TC */ + for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { + int uptc = + hw->port_info->qos_cfg.local_dcbx_cfg.etscfg.prio_table[i]; + + prio_bitmap[uptc] = BIT(i); + } + /* Count queues number per TC, Rx and Tx queues are identical */ + for (i = 0; i < qtc->num_tc; i++) { + vsi->tc_cfg.tc_info[i].qoffset = offset; + vsi->tc_cfg.tc_info[i].qcount_tx = qtc->tc[i].req.queue_count; + vsi->tc_cfg.tc_info[i].qcount_rx = qtc->tc[i].req.queue_count; + vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; + + pow = (u16)order_base_2(qtc->tc[i].req.queue_count); + qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & + ICE_AQ_VSI_TC_Q_OFFSET_M) | + ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & + ICE_AQ_VSI_TC_Q_NUM_M); + ctx->info.tc_mapping[i] = cpu_to_le16(qmap); + offset += qtc->tc[i].req.queue_count; + + /* Write response message */ + tc_map->tc[i].resp.prio_type = VIRTCHNL_USER_PRIO_TYPE_UP; + tc_map->tc[i].resp.valid_prio_bitmap = prio_bitmap[i]; + } + + ice_vsi_cfg_dcb_rings(vsi); + + /* Update Rx queue mapping */ + ctx->info.mapping_flags = vsi->info.mapping_flags; + memcpy(&ctx->info.q_mapping, &vsi->info.q_mapping, + sizeof(vsi->info.q_mapping)); + ctx->info.valid_sections |= + cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); + + if (ice_update_vsi(hw, vsi->idx, ctx, NULL)) { + dev_err(ice_pf_to_dev(pf), "Update VSI failed\n"); + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + } else { + memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping, + sizeof(vsi->info.tc_mapping)); + } + +err: + /* send the response to the VF */ + ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP, + v_ret, (u8 *)tc_map, len); + kfree(ctx); + kfree(tc_map); + return ret; +} + +/** + * ice_vf_cfg_qs_bw - Configure per queue bandwidth + * @vf: pointer to the VF info + * @num_queues: number of queues to be configured + * + * Configure per queue bandwidth. + */ +static int ice_vf_cfg_qs_bw(struct ice_vf *vf, u16 num_queues) +{ + struct ice_hw *hw = &vf->pf->hw; + struct ice_vsi *vsi; + u32 p_rate; + int ret; + u16 i; + u8 tc; + + vsi = ice_get_vf_vsi(vf); + if (!vsi) + return VIRTCHNL_STATUS_ERR_PARAM; + + for (i = 0; i < num_queues; i++) { + p_rate = vf->qs_bw[i].peak; + tc = vf->qs_bw[i].tc; + if (p_rate) { + ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc, + vf->qs_bw[i].queue_id, + ICE_MAX_BW, p_rate); + } else { + ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc, + vf->qs_bw[i].queue_id, + ICE_MAX_BW); + } + if (ret) + return ret; + } + + return VIRTCHNL_STATUS_SUCCESS; +} + +/** + * ice_vf_cfg_q_quanta_profile + * @vf: pointer to the VF info + * @quanta_prof_idx: pointer to the quanta profile index + * @quanta_size: quanta size to be set + * + * This function chooses available quanta profile and configures the register. + * The quanta profile is evenly divided by the number of device ports, and then + * available to the specific PF and VFs. The first profile for each PF is a + * reserved default profile. Only quanta size of the rest unused profile can be + * modified. + */ +static int ice_vf_cfg_q_quanta_profile(struct ice_vf *vf, u16 quanta_size, + u16 *quanta_prof_idx) +{ + const u16 n_desc = calc_quanta_desc(quanta_size); + struct ice_hw *hw = &vf->pf->hw; + const u16 n_cmd = 2 * n_desc; + struct ice_pf *pf = vf->pf; + u16 per_pf, begin_id; + u8 n_used; + u32 reg; + + per_pf = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) / hw->dev_caps.num_funcs; + begin_id = hw->logical_pf_id * per_pf; + n_used = pf->n_quanta_prof_used; + + if (quanta_size == ICE_DFLT_QUANTA) { + *quanta_prof_idx = begin_id; + } else { + if (n_used < per_pf) { + *quanta_prof_idx = begin_id + 1 + n_used; + pf->n_quanta_prof_used++; + } else { + return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; + } + } + + reg = rd32(hw, GLCOMM_QUANTA_PROF(*quanta_prof_idx)); + reg &= ~GLCOMM_QUANTA_PROF_QUANTA_SIZE_M; + reg |= quanta_size << GLCOMM_QUANTA_PROF_QUANTA_SIZE_S; + reg &= ~GLCOMM_QUANTA_PROF_MAX_CMD_M; + reg |= n_cmd << GLCOMM_QUANTA_PROF_MAX_CMD_S; + reg &= ~GLCOMM_QUANTA_PROF_MAX_DESC_M; + reg |= n_desc << GLCOMM_QUANTA_PROF_MAX_DESC_S; + wr32(hw, GLCOMM_QUANTA_PROF(*quanta_prof_idx), reg); + + return VIRTCHNL_STATUS_SUCCESS; +} + /** * ice_vc_config_rss_key * @vf: pointer to the VF info @@ -4703,128 +2739,6 @@ error_param: NULL, 0); } -/** - * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset - * @vf: The VF being resseting - * - * The max poll time is about ~800ms, which is about the maximum time it takes - * for a VF to be reset and/or a VF driver to be removed. - */ -static void ice_wait_on_vf_reset(struct ice_vf *vf) -{ - int i; - - for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) { - if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) - break; - msleep(ICE_MAX_VF_RESET_SLEEP_MS); - } -} - -/** - * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried - * @vf: VF to check if it's ready to be configured/queried - * - * The purpose of this function is to make sure the VF is not in reset, not - * disabled, and initialized so it can be configured and/or queried by a host - * administrator. - */ -int ice_check_vf_ready_for_cfg(struct ice_vf *vf) -{ - struct ice_pf *pf; - - ice_wait_on_vf_reset(vf); - - if (ice_is_vf_disabled(vf)) - return -EINVAL; - - pf = vf->pf; - if (ice_check_vf_init(pf, vf)) - return -EBUSY; - - return 0; -} - -/** - * ice_set_vf_spoofchk - * @netdev: network interface device structure - * @vf_id: VF identifier - * @ena: flag to enable or disable feature - * - * Enable or disable VF spoof checking - */ -int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) -{ - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; - struct ice_vsi *vf_vsi; - struct device *dev; - struct ice_vf *vf; - int ret; - - dev = ice_pf_to_dev(pf); - if (ice_validate_vf_id(pf, vf_id)) - return -EINVAL; - - vf = &pf->vf[vf_id]; - ret = ice_check_vf_ready_for_cfg(vf); - if (ret) - return ret; - - vf_vsi = ice_get_vf_vsi(vf); - if (!vf_vsi) { - netdev_err(netdev, "VSI %d for VF %d is null\n", - vf->lan_vsi_idx, vf->vf_id); - return -EINVAL; - } - - if (vf_vsi->type != ICE_VSI_VF) { - netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n", - vf_vsi->type, vf_vsi->vsi_num, vf->vf_id); - return -ENODEV; - } - - if (ena == vf->spoofchk) { - dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF"); - return 0; - } - - if (ena) - ret = ice_vsi_ena_spoofchk(vf_vsi); - else - ret = ice_vsi_dis_spoofchk(vf_vsi); - if (ret) - dev_err(dev, "Failed to set spoofchk %s for VF %d VSI %d\n error %d\n", - ena ? "ON" : "OFF", vf->vf_id, vf_vsi->vsi_num, ret); - else - vf->spoofchk = ena; - - return ret; -} - -/** - * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode - * @pf: PF structure for accessing VF(s) - * - * Return false if no VF(s) are in unicast and/or multicast promiscuous mode, - * else return true - */ -bool ice_is_any_vf_in_promisc(struct ice_pf *pf) -{ - int vf_idx; - - ice_for_each_vf(pf, vf_idx) { - struct ice_vf *vf = &pf->vf[vf_idx]; - - /* found a VF that has promiscuous mode configured */ - if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) || - test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) - return true; - } - - return false; -} - /** * ice_vc_cfg_promiscuous_mode_msg * @vf: pointer to the VF info @@ -4842,6 +2756,7 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg) int mcast_err = 0, ucast_err = 0; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; + u8 mcast_m, ucast_m; struct device *dev; int ret = 0; @@ -4862,7 +2777,7 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg) } dev = ice_pf_to_dev(pf); - if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { + if (!ice_is_vf_trusted(vf)) { dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n", vf->vf_id); /* Leave v_ret alone, lie to the VF on purpose. */ @@ -4888,39 +2803,30 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg) goto error_param; } - if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) { - bool set_dflt_vsi = alluni || allmulti; + ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m); - if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(vsi->vsw)) - /* only attempt to set the default forwarding VSI if - * it's not currently set - */ - ret = ice_set_dflt_vsi(vsi->vsw, vsi); - else if (!set_dflt_vsi && - ice_is_vsi_dflt_vsi(vsi->vsw, vsi)) - /* only attempt to free the default forwarding VSI if we - * are the owner - */ - ret = ice_clear_dflt_vsi(vsi->vsw); + if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) { + if (alluni) + ret = ice_set_dflt_vsi(vsi); + else + ret = ice_clear_dflt_vsi(vsi); + + /* in this case we're turning on/off only + * allmulticast + */ + if (allmulti) + mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m); + else + mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m); if (ret) { - dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n", - set_dflt_vsi ? "en" : "dis", vf->vf_id, ret); + ice_dev_err_errno(dev, ret, + "Turning on/off promiscuous mode for VF %d failed", + vf->vf_id); v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; goto error_param; } } else { - u8 mcast_m, ucast_m; - - if (ice_vf_is_port_vlan_ena(vf) || - ice_vsi_has_non_zero_vlans(vsi)) { - mcast_m = ICE_MCAST_VLAN_PROMISC_BITS; - ucast_m = ICE_UCAST_VLAN_PROMISC_BITS; - } else { - mcast_m = ICE_MCAST_PROMISC_BITS; - ucast_m = ICE_UCAST_PROMISC_BITS; - } - if (alluni) ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m); else @@ -4937,18 +2843,31 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg) !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) dev_info(dev, "VF %u successfully set multicast promiscuous mode\n", vf->vf_id); - else if (!allmulti && test_and_clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) + else if (!allmulti && + test_and_clear_bit(ICE_VF_STATE_MC_PROMISC, + vf->vf_states)) dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n", vf->vf_id); + } else { + ice_dev_err_errno(dev, mcast_err, + "Error while modifying multicast promiscuous mode for VF %u", + vf->vf_id); } if (!ucast_err) { - if (alluni && !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) + if (alluni && + !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) dev_info(dev, "VF %u successfully set unicast promiscuous mode\n", vf->vf_id); - else if (!alluni && test_and_clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) + else if (!alluni && + test_and_clear_bit(ICE_VF_STATE_UC_PROMISC, + vf->vf_states)) dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n", vf->vf_id); + } else { + ice_dev_err_errno(dev, ucast_err, + "Error while modifying unicast promiscuous mode for VF %u", + vf->vf_id); } error_param: @@ -4997,67 +2916,6 @@ error_param: (u8 *)&stats, sizeof(stats)); } -/** - * ice_vf_get_tc_based_qid - get the updated QID based on offset - * @qid: queue ID - * @offset : TC specific queue offset - * - * This function returns updated queueID based on offset. This is - * meant to be used only with VF ADQ. Queue ID will always be - * 0-based from the specified offset - */ -static u16 ice_vf_get_tc_based_qid(u16 qid, u16 offset) -{ - return (qid >= offset) ? (qid - offset) : qid; -} - -/** - * ice_vf_q_id_get_vsi_q_id - * @vf: pointer to the VF info - * @vf_q_id: VF relative queue ID - * @t_tc: traffic class for indexing the VSIs - * @vqs: the VFs virtual queue selection - * @vsi_p: pointer to VSI pointer, which changes based on TC for ADQ - * @vsi_id: VSI ID specific to desired queue ID - * @q_id: queue ID of the VSI - * - * provides ADQ queue enablement support by mapping the VF queue ID and TC to - * VSI ID and queue ID. call while iterating through VF queue IDs, VF VSIs and - * TCs. - */ -static void ice_vf_q_id_get_vsi_q_id(struct ice_vf *vf, u16 vf_q_id, u16 *t_tc, - struct virtchnl_queue_select *vqs, - struct ice_vsi **vsi_p, u16 *vsi_id, - u16 *q_id) -{ - struct ice_vsi *vsi = *vsi_p; - u32 max_chnl_tc; - u16 tc = *t_tc; - - max_chnl_tc = ice_vc_get_max_chnl_tc_allowed(vf); - - /* Update the VSI and TC based on per TC queue region and offset */ - if (tc + 1U < max_chnl_tc && vf_q_id == vf->ch[tc + 1].offset && - tc < vf->num_tc && ice_is_vf_adq_ena(vf)) { - vsi = vf->pf->vsi[vf->ch[tc + 1].vsi_idx]; - tc++; - } - - /* Update vsi_id and queue_id based on TC if TC is VF ADQ TC, then - * use VF ADQ VSI otherwise main VF VSI - */ - if (tc >= ICE_VF_CHNL_START_TC && ice_is_vf_adq_ena(vf)) { - *vsi_id = vsi->vsi_num; - *q_id = ice_vf_get_tc_based_qid(vf_q_id, vf->ch[tc].offset); - } else { - *vsi_id = vqs->vsi_id; - *q_id = vf_q_id; - } - - *vsi_p = vsi; - *t_tc = tc; -} - /** * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL * @vqs: virtchnl_queue_select structure containing bitmaps to validate @@ -5127,10 +2985,13 @@ static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx) * @q_id: VSI relative (0-based) queue ID * @vf_q_id: VF relative (0-based) queue ID * - * Attempt to enable the Rx queue passed in. If the Rx queue was successfully enabled then set - * q_id bit in the enabled queues bitmap and return success. Otherwise return error. + * Attempt to enable the Rx queue passed in. If the Rx queue was successfully + * enabled then set q_id bit in the enabled queues bitmap and return success. + * Otherwise return error. */ -static int ice_vf_vsi_ena_single_rxq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id, u16 vf_q_id) +static int +ice_vf_vsi_ena_single_rxq(struct ice_vf *vf, struct ice_vsi *vsi, + u16 q_id, u16 vf_q_id) { int err; @@ -5157,11 +3018,14 @@ static int ice_vf_vsi_ena_single_rxq(struct ice_vf *vf, struct ice_vsi *vsi, u16 * @q_id: VSI relative (0-based) queue ID * @vf_q_id: VF relative (0-based) queue ID * - * Enable the Tx queue's interrupt then set the q_id bit in the enabled queues bitmap. Note that the - * Tx queue(s) should have already been configurated/enabled in VIRTCHNL_OP_CONFIG_QUEUES so this - * function only enables the interrupt associated with the q_id. + * Enable the Tx queue's interrupt then set the q_id bit in the enabled queues + * bitmap. Note that the Tx queue(s) should have already been + * configurated/enabled in VIRTCHNL_OP_CONFIG_QUEUES so this function only + * enables the interrupt associated with the q_id. */ -static void ice_vf_vsi_ena_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id, u16 vf_q_id) +static void +ice_vf_vsi_ena_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, + u16 q_id, u16 vf_q_id) { if (test_bit(vf_q_id, vf->txq_ena)) return; @@ -5237,7 +3101,13 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) } tc = 0; + /* Grab VSI pointer again since it may have been modified */ vsi = ice_get_vf_vsi(vf); + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + q_map = vqs->tx_queues; for_each_set_bit(vf_q_id, &q_map, ICE_MAX_DFLT_QS_PER_VF) { u16 vsi_id, q_id; @@ -5273,24 +3143,27 @@ error_param: } /** - * ice_vf_vsi_dis_single_txq - disable a single Tx queue for the VF based on relative queue ID + * ice_vf_vsi_dis_single_txq - disable a single Tx queue * @vf: VF to disable queue for * @vsi: VSI for the VF * @q_id: VSI relative (0-based) queue ID * @vf_q_id: VF relative (0-based) queue ID * - * Attempt to disable the Tx queue passed in. If the Tx queue was successfully disabled then clear - * q_id bit in the enabled queues bitmap and return success. Otherwise return error. + * Attempt to disable the Tx queue passed in. If the Tx queue was successfully + * disabled then clear q_id bit in the enabled queues bitmap and return + * success. Otherwise return error. */ -static int ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id, u16 vf_q_id) +static int +ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, + u16 q_id, u16 vf_q_id) { struct ice_txq_meta txq_meta = { 0 }; struct ice_ring *ring; int err; - /* Skip queue if not enabled */ if (!test_bit(vf_q_id, vf->txq_ena)) - return 0; + dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n", + q_id, vsi->vsi_num); ring = vsi->tx_rings[q_id]; if (!ring) @@ -5298,7 +3171,8 @@ static int ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 ice_fill_txq_meta(vsi, ring, &txq_meta); - err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta); + err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, + &txq_meta); if (err) { dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n", q_id, vsi->vsi_num); @@ -5323,7 +3197,9 @@ static int ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 * Otherwise return error. */ -static int ice_vf_vsi_dis_single_rxq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id, u16 vf_q_id) +static int +ice_vf_vsi_dis_single_rxq(struct ice_vf *vf, struct ice_vsi *vsi, + u16 q_id, u16 vf_q_id) { int err; @@ -5415,6 +3291,10 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) tc = 0; /* Reset VSI pointer as it was assigned to ADQ VSIs */ vsi = ice_get_vf_vsi(vf); + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } /* speed up Rx queue disable by batching them if possible */ if (q_map && bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_DFLT_QS_PER_VF)) { @@ -5479,14 +3359,13 @@ error_param: * ice_cfg_interrupt * @vf: pointer to the VF info * @vsi: the VSI being configured - * @vector_id: vector ID * @tc: traffic class number for ADQ * @map: vector map for mapping vectors to queues * @q_vector: structure for interrupt vector * configure the IRQ to queue map */ static int -ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id, +ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u8 __maybe_unused tc, struct virtchnl_vector_map *map, struct ice_q_vector *q_vector) { @@ -5510,7 +3389,8 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id, q_vector->num_ring_rx++; q_vector->rx.itr_idx = map->rxitr_idx; vsi->rx_rings[vsi_q_id]->q_vector = q_vector; - ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id, + ice_cfg_rxq_interrupt(vsi, vsi_q_id, + q_vector->v_idx + vsi->base_vector, q_vector->rx.itr_idx); } @@ -5528,7 +3408,8 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id, q_vector->num_ring_tx++; q_vector->tx.itr_idx = map->txitr_idx; vsi->tx_rings[vsi_q_id]->q_vector = q_vector; - ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id, + ice_cfg_txq_interrupt(vsi, vsi_q_id, + q_vector->v_idx + vsi->base_vector, q_vector->tx.itr_idx); } @@ -5562,7 +3443,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) * there is actually at least a single VF queue vector mapped */ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || - pf->num_msix_per_vf < num_q_vectors_mapped || + pf->vfs.num_msix_per < num_q_vectors_mapped || !num_q_vectors_mapped) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -5592,7 +3473,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) /* vector_id is always 0-based for each VF, and can never be * larger than or equal to the max allowed interrupts per VF */ - if (!(vector_id < pf->num_msix_per_vf) || + if (!(vector_id < pf->vfs.num_msix_per) || !ice_vc_isvalid_vsi_id(vf, vsi_id) || (!vector_id && (map->rxq_map || map->txq_map))) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -5603,9 +3484,6 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) if (!vector_id) continue; - /* Subtract non queue vector from vector_id passed by VF - * to get actual number of VSI queue vector array index - */ if (tc && ice_is_vf_adq_ena(vf)) vector_id_ch = vector_id - vf->ch[tc].offset; else @@ -5621,17 +3499,16 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - q_vector = vsi->q_vectors[vector_id_ch - ICE_NONQ_VECS_VF]; + q_vector = vf->vf_ops->get_q_vector(vf, vsi, vector_id_ch); + if (!q_vector) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } /* lookout for the invalid queue index */ - v_ret = (enum virtchnl_status_code) - ice_cfg_interrupt(vf, vsi, vector_id, tc, map, - q_vector); + ice_cfg_interrupt(vf, vsi, tc, map, q_vector); if (v_ret) goto error_param; @@ -5650,11 +3527,11 @@ error_param: } /** - * ice_vc_get_max_allowed_qpairs - get max allowed queue pairs based on negotiated capabilities + * ice_vc_get_max_allowed_qpairs - get max allowed queue pairs * @vf: VF used to get max queue pairs allowed * - * The maximum allowed queues is determined based on whether VIRTCHNL_VF_LARGE_NUM_QPAIRS was - * negotiated. + * The maximum allowed queues is determined based on whether + * VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated. */ static int ice_vc_get_max_allowed_qpairs(struct ice_vf *vf) { @@ -5664,6 +3541,137 @@ static int ice_vc_get_max_allowed_qpairs(struct ice_vf *vf) return ICE_MAX_DFLT_QS_PER_VF; } +/** + * ice_vc_cfg_q_bw - Configure per queue bandwidth + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer which holds the command descriptor + * + * Configure VF queues bandwidth. + */ +static int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct virtchnl_queues_bw_cfg *qbw = + (struct virtchnl_queues_bw_cfg *)msg; + struct ice_vf_qs_bw *qs_bw; + struct ice_vsi *vsi; + size_t len; + u16 i; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || + !ice_vc_isvalid_vsi_id(vf, qbw->vsi_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + vsi = ice_get_vf_vsi(vf); + if (!vsi || vsi->vsi_num != qbw->vsi_id) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + if (qbw->num_queues > ice_vc_get_max_allowed_qpairs(vf) || + qbw->num_queues > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) { + dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n", + vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + len = sizeof(struct ice_vf_qs_bw) * qbw->num_queues; + qs_bw = kzalloc(len, GFP_KERNEL); + if (!qs_bw) { + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + goto err_bw; + } + + for (i = 0; i < qbw->num_queues; i++) { + qs_bw[i].queue_id = qbw->cfg[i].queue_id; + qs_bw[i].peak = qbw->cfg[i].shaper.peak; + qs_bw[i].committed = qbw->cfg[i].shaper.committed; + qs_bw[i].tc = qbw->cfg[i].tc; + } + + memcpy(vf->qs_bw, qs_bw, len); + +err_bw: + kfree(qs_bw); + +err: + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUEUE_BW, + v_ret, NULL, 0); +} + +/** + * ice_vc_cfg_q_quanta - Configure per queue quanta + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer which holds the command descriptor + * + * Configure VF queues quanta. + */ +static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg) +{ + u16 quanta_prof_id, quanta_size, start_qid, num_queues, end_qid, i; + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct virtchnl_quanta_cfg *qquanta = + (struct virtchnl_quanta_cfg *)msg; + struct ice_vsi *vsi; + int ret; + + start_qid = qquanta->queue_select.start_queue_id; + num_queues = qquanta->queue_select.num_queues; + quanta_size = qquanta->quanta_size; + end_qid = start_qid + num_queues; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + if (end_qid > ice_vc_get_max_allowed_qpairs(vf) || + end_qid > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) { + dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n", + vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + if (quanta_size > ICE_MAX_QUANTA_SIZE || + quanta_size < ICE_MIN_QUANTA_SIZE) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + if (quanta_size % 64) { + dev_err(ice_pf_to_dev(vf->pf), "quanta size should be the product of 64\n"); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + ret = ice_vf_cfg_q_quanta_profile(vf, quanta_size, + &quanta_prof_id); + if (ret) { + v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; + goto err; + } + + for (i = start_qid; i < end_qid; i++) + vsi->tx_rings[i]->quanta_prof_id = quanta_prof_id; + +err: + /* send the response to the VF */ + ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUANTA, + v_ret, NULL, 0); + return ret; +} + /** * ice_vc_cfg_qs_msg * @vf: pointer to the VF info @@ -5673,30 +3681,23 @@ static int ice_vc_get_max_allowed_qpairs(struct ice_vf *vf) */ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) { - enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_vsi_queue_config_info *qci = (struct virtchnl_vsi_queue_config_info *)msg; struct virtchnl_queue_pair_info *qpi; struct ice_pf *pf = vf->pf; - struct ice_vsi *vsi; + struct ice_vsi *vsi = NULL; u16 queue_id_tmp, tc; - int i, q_idx; + int i = -1, q_idx; - if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) goto error_param; - } - if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) goto error_param; - } vsi = ice_get_vf_vsi(vf); - if (!vsi) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (!vsi) goto error_param; - } /* check for number of queues is done in ice_alloc_vf_res() function * for ADQ @@ -5708,7 +3709,6 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) { dev_err(ice_pf_to_dev(pf), "VF-%d trying to configure more than allocated number of queues: %d\n", vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -5721,10 +3721,8 @@ skip_num_queues_check: if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC) || vf->dcf_vlan_info.outer_stripping_ena || - vf->vlan_strip_ena) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + vf->vlan_strip_ena) goto error_param; - } } for (i = 0; i < qci->num_queue_pairs; i++) { qpi = &qci->qpair[i]; @@ -5737,20 +3735,15 @@ skip_num_queues_check: qpi->txq.headwb_enabled || !ice_vc_isvalid_ring_len(qpi->txq.ring_len) || !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) || - !ice_vc_isvalid_q_id(vf, qci->vsi_id, - qpi->txq.queue_id)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) goto error_param; - } skip_non_adq_checks: if (ice_is_vf_adq_ena(vf)) { q_idx = queue_id_tmp; - vsi = ice_find_vsi_from_id(vf->pf, vf->ch[tc].vsi_num); - if (!vsi) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + vsi = ice_find_vsi(vf->pf, vf->ch[tc].vsi_num); + if (!vsi) goto error_param; - } } else { q_idx = qpi->rxq.queue_id; } @@ -5759,17 +3752,23 @@ skip_non_adq_checks: * for selected "vsi" (which could be main VF VSI or * VF ADQ VSI */ - if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) goto error_param; - } /* copy Tx queue info from VF into VSI */ if (qpi->txq.ring_len > 0) { vsi->tx_rings[q_idx]->dma = qpi->txq.dma_ring_addr; vsi->tx_rings[q_idx]->count = qpi->txq.ring_len; + + /* Disable any existing queue first */ + if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx, + qpi->txq.queue_id)) + goto error_param; + + /* Configure a queue with the requested settings */ if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n", + vf->vf_id, i); goto error_param; } } @@ -5782,51 +3781,53 @@ skip_non_adq_checks: vsi->rx_rings[q_idx]->dma = qpi->rxq.dma_ring_addr; vsi->rx_rings[q_idx]->count = qpi->rxq.ring_len; - vsi->rx_rings[q_idx]->rx_crc_strip_dis = qpi->rxq.crc_disable; + if (qpi->rxq.crc_disable) + vsi->rx_rings[q_idx]->flags |= + ICE_RX_FLAGS_CRC_STRIP_DIS; + else + vsi->rx_rings[q_idx]->flags &= + ~ICE_RX_FLAGS_CRC_STRIP_DIS; if (qpi->rxq.databuffer_size != 0 && (qpi->rxq.databuffer_size > ((16 * 1024) - 128) || - qpi->rxq.databuffer_size < 1024)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + qpi->rxq.databuffer_size < 1024)) goto error_param; - } + vsi->rx_buf_len = qpi->rxq.databuffer_size; vsi->rx_rings[q_idx]->rx_buf_len = vsi->rx_buf_len; if (qpi->rxq.max_pkt_size > max_frame_size || - qpi->rxq.max_pkt_size < 64) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + qpi->rxq.max_pkt_size < 64) goto error_param; - } vsi->max_frame = qpi->rxq.max_pkt_size; - /* add space for the port VLAN since the VF driver is not - * expected to account for it in the MTU calculation + /* add space for the port VLAN since the VF driver is + * not expected to account for it in the MTU + * calculation */ if (ice_vf_is_port_vlan_ena(vf)) vsi->max_frame += VLAN_HLEN; if (ice_vsi_cfg_single_rxq(vsi, q_idx)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n", + vf->vf_id, i); goto error_param; } - /* If Rx flex desc is supported, select RXDID for Rx queues. - * Otherwise, use legacy 32byte descriptor format. - * Legacy 16byte descriptor is not supported. If this RXDID - * is selected, return error. + /* If Rx flex desc is supported, select RXDID for Rx + * queues. Otherwise, use legacy 32byte descriptor + * format. Legacy 16byte descriptor is not supported. + * If this RXDID is selected, return error. */ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) { rxdid = qpi->rxq.rxdid; - if (!(BIT(rxdid) & pf->supported_rxdids)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (!(BIT(rxdid) & pf->supported_rxdids)) goto error_param; - } } else { rxdid = ICE_RXDID_LEGACY_1; } - - ice_write_qrxflxp_cntxt(&vsi->back->hw, vsi->rxq_map[q_idx], rxdid, 0x03, - false); + ice_write_qrxflxp_cntxt(&vsi->back->hw, + vsi->rxq_map[q_idx], + rxdid, 0x03, false); } /* For ADQ there can be up to 4 VSIs with max 4 queues each. @@ -5845,19 +3846,31 @@ skip_non_adq_checks: } } -error_param: - /* send the response to the VF */ - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret, - NULL, 0); -} + if (ice_vf_cfg_qs_bw(vf, qci->num_queue_pairs)) + goto error_param; -/** - * ice_is_vf_trusted - * @vf: pointer to the VF info - */ -static bool ice_is_vf_trusted(struct ice_vf *vf) -{ - return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, + VIRTCHNL_STATUS_SUCCESS, NULL, 0); +error_param: + /* disable whatever we can */ + if (vsi) { + for (; i >= 0; i--) { + qpi = &qci->qpair[i]; + + if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true)) + dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n", + vf->vf_id, i); + if (ice_vf_vsi_dis_single_txq(vf, vsi, i, + qpi->txq.queue_id)) + dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n", + vf->vf_id, i); + } + } + + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, + VIRTCHNL_STATUS_ERR_PARAM, NULL, 0); } /** @@ -5882,8 +3895,7 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf) * ice_vc_ether_addr_type - get type of virtchnl_ether_addr * @vc_ether_addr: used to extract the type */ -static u8 -ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr) +static u8 ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr) { return (vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK); } @@ -5892,8 +3904,7 @@ ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr) * ice_is_vc_addr_legacy - check if the MAC address is from an older VF * @vc_ether_addr: VIRTCHNL structure that contains MAC and type */ -static bool -ice_is_vc_addr_legacy(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr) +static bool ice_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr) { u8 type = ice_vc_ether_addr_type(vc_ether_addr); @@ -5907,8 +3918,7 @@ ice_is_vc_addr_legacy(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr) * This function should only be called when the MAC address in * virtchnl_ether_addr is a valid unicast MAC */ -static bool -ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr) +static bool ice_is_vc_addr_primary(struct virtchnl_ether_addr *vc_ether_addr) { u8 type = ice_vc_ether_addr_type(vc_ether_addr); @@ -5964,7 +3974,6 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, { struct device *dev = ice_pf_to_dev(vf->pf); u8 *mac_addr = vc_ether_addr->addr; - enum ice_status status; int ret = 0; /* device MAC already added */ @@ -5975,18 +3984,17 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, return -EPERM; } - status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI); - if (status == ICE_ERR_ALREADY_EXISTS) { - dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr, - vf->vf_id); + ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI); + if (ret == -EEXIST) { + dev_dbg(dev, "MAC %pM already exists for VF %d\n", + mac_addr, vf->vf_id); /* don’t return since we might need to update * the primary MAC in ice_vfhw_mac_add() below */ - ret = -EEXIST; - } else if (status) { - dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n", mac_addr, - vf->vf_id, ice_stat_str(status)); - return -EIO; + } else if (ret) { + dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n", + mac_addr, vf->vf_id, ret); + return ret; } else { vf->num_mac++; } @@ -6008,6 +4016,26 @@ static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac) ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME); } +/** + * ice_update_legacy_cached_mac - update cached hardware MAC for legacy VF + * @vf: VF to update + * @vc_ether_addr: structure from VIRTCHNL with MAC to check + * + * only update cached hardware MAC for legacy VF drivers on delete + * because we cannot guarantee order/type of MAC from the VF driver + */ +static void +ice_update_legacy_cached_mac(struct ice_vf *vf, + struct virtchnl_ether_addr *vc_ether_addr) +{ + if (!ice_is_vc_addr_legacy(vc_ether_addr) || + ice_is_legacy_umac_expired(&vf->legacy_last_added_umac)) + return; + + ether_addr_copy(vf->dev_lan_addr.addr, vf->legacy_last_added_umac.addr); + ether_addr_copy(vf->hw_lan_addr.addr, vf->legacy_last_added_umac.addr); +} + /** * ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed * @vf: VF to update @@ -6029,16 +4057,7 @@ ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr) */ eth_zero_addr(vf->dev_lan_addr.addr); - /* only update cached hardware MAC for legacy VF drivers on delete - * because we cannot guarantee order/type of MAC from the VF driver - */ - if (ice_is_vc_addr_legacy(vc_ether_addr) && - !ice_is_legacy_umac_expired(&vf->legacy_last_added_umac)) { - ether_addr_copy(vf->dev_lan_addr.addr, - vf->legacy_last_added_umac.addr); - ether_addr_copy(vf->hw_lan_addr.addr, - vf->legacy_last_added_umac.addr); - } + ice_update_legacy_cached_mac(vf, vc_ether_addr); } /** @@ -6053,24 +4072,23 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, { struct device *dev = ice_pf_to_dev(vf->pf); u8 *mac_addr = vc_ether_addr->addr; - enum ice_status status; + int status; if (!ice_can_vf_change_mac(vf) && ether_addr_equal(vf->dev_lan_addr.addr, mac_addr)) return 0; status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI); - if (status == ICE_ERR_DOES_NOT_EXIST) { + if (status == -ENOENT) { dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr, vf->vf_id); return -ENOENT; } else if (status) { - dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n", - mac_addr, vf->vf_id, ice_stat_str(status)); + dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n", + mac_addr, vf->vf_id, status); return -EIO; } - ice_vfhw_mac_del(vf, vc_ether_addr); vf->num_mac--; @@ -6223,11 +4241,12 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) req_queues - cur_queues > tx_rx_queue_left) { dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n", vf->vf_id, req_queues - cur_queues, tx_rx_queue_left); - vfres->num_queue_pairs = min_t(u16, max_avail_vf_qps, max_allowed_vf_qps); + vfres->num_queue_pairs = min_t(u16, max_avail_vf_qps, + max_allowed_vf_qps); } else { /* request is successful, then reset VF */ vf->num_req_qs = req_queues; - ice_vc_reset_vf(vf); + ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); dev_info(dev, "VF %d granted request of %u queues.\n", vf->vf_id, req_queues); return 0; @@ -6239,105 +4258,6 @@ error_param: v_ret, (u8 *)vfres, sizeof(*vfres)); } -/** - * ice_is_supported_port_vlan_proto - make sure the vlan_proto is supported - * @hw: hardware structure used to check the VLAN mode - * @vlan_proto: VLAN TPID being checked - * - * If the device is configured in Double VLAN Mode (DVM), then both ETH_P_8021Q - * and ETH_P_8021AD are supported. If the device is configured in Single VLAN - * Mode (SVM), then only ETH_P_8021Q is supported. - */ -static bool -ice_is_supported_port_vlan_proto(struct ice_hw *hw, u16 vlan_proto) -{ - bool is_supported = false; - - switch (vlan_proto) { - case ETH_P_8021Q: - is_supported = true; - break; - case ETH_P_8021AD: - if (ice_is_dvm_ena(hw)) - is_supported = true; - break; - } - - return is_supported; -} - -#ifdef IFLA_VF_VLAN_INFO_MAX -/** - * ice_set_vf_port_vlan - * @netdev: network interface device structure - * @vf_id: VF identifier - * @vlan_id: VLAN ID being set - * @qos: priority setting - * @vlan_proto: VLAN protocol - * - * program VF Port VLAN ID and/or QoS - */ -int -ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, - __be16 vlan_proto) -#else -int -ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos) -#endif /* IFLA_VF_VLAN_INFO_MAX */ -{ - struct ice_pf *pf = ice_netdev_to_pf(netdev); -#ifdef IFLA_VF_VLAN_INFO_MAX - u16 local_vlan_proto = ntohs(vlan_proto); -#else - u16 local_vlan_proto = ETH_P_8021Q; -#endif - struct device *dev; - struct ice_vf *vf; - int ret; - - dev = ice_pf_to_dev(pf); - if (ice_validate_vf_id(pf, vf_id)) - return -EINVAL; - - if (vlan_id >= VLAN_N_VID || qos > 7) { - dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n", - vf_id, vlan_id, qos); - return -EINVAL; - } - - if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) { - dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n", - local_vlan_proto); - return -EPROTONOSUPPORT; - } - - vf = &pf->vf[vf_id]; - ret = ice_check_vf_ready_for_cfg(vf); - if (ret) - return ret; - - if (ice_vf_get_port_vlan_prio(vf) == qos && - ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto && - ice_vf_get_port_vlan_id(vf) == vlan_id) { - /* duplicate request, so just return success */ - dev_dbg(dev, "Duplicate port VLAN %u, QoS %u, TPID 0x%04x request\n", - vlan_id, qos, local_vlan_proto); - return 0; - } - - vf->port_vlan_info = - ICE_VLAN(local_vlan_proto, vlan_id, qos, ICE_FWD_TO_VSI); - if (ice_vf_is_port_vlan_ena(vf)) - dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n", - vlan_id, qos, local_vlan_proto, vf_id); - else - dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id); - - ice_vc_reset_vf(vf); - - return 0; -} - /** * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads * @caps: VF driver negotiated capabilities @@ -6365,42 +4285,57 @@ static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf) /** * ice_vf_ena_vlan_promisc - Enable Tx/Rx VLAN promiscuous for the VLAN + * @vf: VF to enable VLAN promisc on * @vsi: VF's VSI used to enable VLAN promiscuous mode * @vlan: VLAN used to enable VLAN promiscuous * * This function should only be called if VLAN promiscuous mode is allowed, * which can be determined via ice_is_vlan_promisc_allowed(). */ -static int ice_vf_ena_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan) +static int +ice_vf_ena_vlan_promisc(struct ice_vf *vf, struct ice_vsi *vsi, + struct ice_vlan *vlan) { - u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX; - enum ice_status status; + u8 promisc_m = 0; + int status; + + if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) + promisc_m |= ICE_VF_UCAST_VLAN_PROMISC_BITS; + if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) + promisc_m |= ICE_MCAST_VLAN_PROMISC_BITS; + + if (!promisc_m) + return 0; status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, vlan->vid, vsi->port_info->lport); - if (status && status != ICE_ERR_ALREADY_EXISTS) - return ice_status_to_errno(status); + if (status && status != -EEXIST) + return status; return 0; } /** * ice_vf_dis_vlan_promisc - Disable Tx/Rx VLAN promiscuous for the VLAN + * @vf: VF to disable VLAN promisc on * @vsi: VF's VSI used to disable VLAN promiscuous mode for * @vlan: VLAN used to disable VLAN promiscuous * * This function should only be called if VLAN promiscuous mode is allowed, * which can be determined via ice_is_vlan_promisc_allowed(). */ -static int ice_vf_dis_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan) +static int +ice_vf_dis_vlan_promisc(struct ice_vf *vf, struct ice_vsi *vsi, + struct ice_vlan *vlan) { - u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX; - enum ice_status status; + u8 promisc_m = ICE_VF_UCAST_VLAN_PROMISC_BITS | + ICE_MCAST_VLAN_PROMISC_BITS; + int status; status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, vlan->vid, vsi->port_info->lport); - if (status && status != ICE_ERR_DOES_NOT_EXIST) - return ice_status_to_errno(status); + if (status && status != -ENOENT) + return status; return 0; } @@ -6535,16 +4470,19 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) { if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; - dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n", - vid, status); + ice_dev_err_errno(dev, status, + "Enable VLAN pruning on VLAN ID: %d failed", + vid); goto error_param; } } else if (vlan_promisc) { - status = ice_vf_ena_vlan_promisc(vsi, &vlan); + status = ice_vf_ena_vlan_promisc(vf, vsi, + &vlan); if (status) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; - dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n", - vid, status); + ice_dev_err_errno(dev, status, + "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed", + vid); } } } @@ -6582,7 +4520,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) vsi->inner_vlan_ops.dis_rx_filtering(vsi); if (vlan_promisc) - ice_vf_dis_vlan_promisc(vsi, &vlan); + ice_vf_dis_vlan_promisc(vf, vsi, &vlan); } } @@ -6629,7 +4567,7 @@ static bool ice_vsi_is_rxq_crc_strip_dis(struct ice_vsi *vsi) u16 i; for (i = 0; i < vsi->alloc_rxq; i++) - if (vsi->rx_rings[i]->rx_crc_strip_dis) + if (vsi->rx_rings[i]->flags & ICE_RX_FLAGS_CRC_STRIP_DIS) return true; return false; @@ -6657,6 +4595,11 @@ static int ice_vc_ena_vlan_stripping(struct ice_vf *vf) } vsi = ice_get_vf_vsi(vf); + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + if (ice_vsi_is_rxq_crc_strip_dis(vsi)) { v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; goto error_param; @@ -6746,911 +4689,6 @@ err: return ret; } -#ifdef HAVE_TC_SETUP_CLSFLOWER -/** - * ice_validate_cloud_filter - * @vf: pointer to the VF info - * @tc_filter: pointer to virtchnl_filter - * - * This function validates cloud filter programmed as TC filter for ADQ - */ -static int -ice_validate_cloud_filter(struct ice_vf *vf, struct virtchnl_filter *tc_filter) -{ - struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec; - struct virtchnl_l4_spec data = tc_filter->data.tcp_spec; - struct ice_pf *pf = vf->pf; - struct device *dev; - - dev = ice_pf_to_dev(pf); - if (!tc_filter->action) { - dev_err(dev, "VF %d: Currently ADQ doesn't support Drop Action\n", - vf->vf_id); - return -EOPNOTSUPP; - } - - /* Check filter if it's programmed for advanced mode or basic mode. - * There are two ADQ modes (for VF only), - * 1. Basic mode: intended to allow as many filter options as possible - * to be added to a VF in Non-trusted mode. Main goal is - * to add filters to its own MAC and VLAN ID. - * 2. Advanced mode: is for allowing filters to be applied other than - * its own MAC or VLAN. This mode requires the VF to be - * Trusted. - */ - if (mask.dst_mac[0] && !mask.dst_ip[0]) { - /* As of now supporting, MAC filter if MAC address is the - * default LAN addr for this VF - */ - if (!ice_mac_fltr_exist(&pf->hw, data.dst_mac, - vf->lan_vsi_idx)) { - dev_err(dev, "Destination MAC %pM doesn't belong to VF %d\n", - data.dst_mac, vf->vf_id); - return -EINVAL; - } - } else if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { - /* Check if VF is trusted */ - dev_err(dev, "VF %d not trusted, make VF trusted to add ADQ filters\n", - vf->vf_id); - return -EOPNOTSUPP; - } - - if (mask.dst_mac[0] & data.dst_mac[0]) { - if (is_broadcast_ether_addr(data.dst_mac) || - is_zero_ether_addr(data.dst_mac)) { - dev_err(dev, "VF %d: Invalid Dest MAC addr %pM\n", - vf->vf_id, data.dst_mac); - return -EINVAL; - } - } - - if (mask.src_mac[0] & data.src_mac[0]) { - if (is_broadcast_ether_addr(data.src_mac) || - is_zero_ether_addr(data.src_mac)) { - dev_err(dev, "VF %d: Invalid Source MAC addr %pM\n", - vf->vf_id, data.src_mac); - return -EINVAL; - } - } - - if (mask.dst_port & data.dst_port) { - if (!data.dst_port) { - dev_err(dev, "VF %d: Invalid Dest port\n", vf->vf_id); - return -EINVAL; - } - } - - if (mask.src_port & data.src_port) { - if (!data.src_port) { - dev_err(dev, "VF %d: Invalid Source port\n", vf->vf_id); - return -EINVAL; - } - } - - if (mask.vlan_id & data.vlan_id) { - if (ntohs(data.vlan_id) >= VLAN_N_VID) { - dev_err(dev, "VF %d: invalid VLAN ID\n", vf->vf_id); - return -EINVAL; - } - /* Validate VLAN for the VF the same way we do for the PF */ - if (!ice_vlan_fltr_exist(&pf->hw, ntohs(data.vlan_id), - vf->lan_vsi_idx)) { - dev_err(dev, "specified VLAN %u doesn't belong to this VF %d\n", - ntohs(data.vlan_id), vf->vf_id); - return -EINVAL; - } - } - - return 0; -} - -/** - * ice_get_tc_flower_fltr - locate the TC flower filter - * @vf: pointer to the VF info - * @fltr: pointer to the tc_flower filter - * @mask: ptr to filter mask (representing filter data specification) - * - * This function is used to locate specific filter in filter list. It returns - * NULL if unable to locate such filter otherwise returns found filter - */ -static struct ice_tc_flower_fltr * -ice_get_tc_flower_fltr(struct ice_vf *vf, struct ice_tc_flower_fltr *fltr, - struct virtchnl_l4_spec *mask) -{ - struct ice_tc_flower_lyr_2_4_hdrs *hdrs; - struct ice_tc_l2_hdr *l2_key; - struct ice_tc_l3_hdr *l3_key; - struct ice_tc_l4_hdr *l4_key; - struct ice_tc_flower_fltr *f; - struct hlist_node *node; - - hdrs = &fltr->outer_headers; - if (!hdrs) - return NULL; - - l2_key = &hdrs->l2_key; - l3_key = &hdrs->l3_key; - l4_key = &hdrs->l4_key; - - hlist_for_each_entry_safe(f, node, - &vf->tc_flower_fltr_list, tc_flower_node) { - struct ice_tc_flower_lyr_2_4_hdrs *f_hdrs; - - if (!f->dest_vsi || fltr->dest_vsi != f->dest_vsi || - fltr->dest_vsi->idx != f->dest_vsi->idx) - continue; - - f_hdrs = &f->outer_headers; - - /* handle L2 fields if specified and do not match */ - if ((mask->src_mac[0] && - !ether_addr_equal(l2_key->src_mac, - f_hdrs->l2_key.src_mac)) || - (mask->dst_mac[0] && - !ether_addr_equal(l2_key->dst_mac, - f_hdrs->l2_key.dst_mac))) - continue; - - /* handle VLAN if specified and do not match */ - if (mask->vlan_id && hdrs->vlan_hdr.vlan_id != - f_hdrs->vlan_hdr.vlan_id) - continue; - - /* handle L3 IPv4 if specified and do not match - * for ipv4 data to be valid, check only first dword of mask - */ - if (l2_key->n_proto == ETH_P_IP) - if ((mask->dst_ip[0] && - l3_key->dst_ipv4 != f_hdrs->l3_key.dst_ipv4) || - (mask->src_ip[0] && - l3_key->src_ipv4 != f_hdrs->l3_key.src_ipv4)) - continue; - - /* handle L3 IPv6 if specified and do not match - * for ipv6 to be valid, last dword from mask must be valid - * hence check only last dword of mask - */ - if (l2_key->n_proto == ETH_P_IPV6 && mask->dst_ip[3]) - if (memcmp(&l3_key->ip.v6.dst_ip6, - &f_hdrs->l3_key.ip.v6.dst_ip6, - sizeof(l3_key->ip.v6.dst_ip6))) - continue; - if (l2_key->n_proto == ETH_P_IPV6 && mask->src_ip[3]) - if (memcmp(&l3_key->ip.v6.src_ip6, - &f_hdrs->l3_key.ip.v6.src_ip6, - sizeof(l3_key->ip.v6.src_ip6))) - continue; - - /* make sure "ip_proto" is same */ - if (l3_key->ip_proto != f_hdrs->l3_key.ip_proto) - continue; - - /* handle L4 fields if specified and do not match */ - if ((mask->dst_port && - l4_key->dst_port != f_hdrs->l4_key.dst_port) || - (mask->src_port && - l4_key->src_port != f_hdrs->l4_key.src_port)) - continue; - - /* if reached here, means found matching filter entry */ - return f; - } - - return NULL; -} - -/** - * ice_vc_chnl_fltr_state_verify - verify general state of VF - * @vf: pointer to the VF info - * @vcf: pointer to virtchannel_filter - * - * This function performs general validation including validation of filter - * message and content - */ -static enum virtchnl_status_code -ice_vc_chnl_fltr_state_verify(struct ice_vf *vf, struct virtchnl_filter *vcf) -{ - struct ice_pf *pf = vf->pf; - struct ice_vsi *vsi; - u32 max_tc_allowed; - struct device *dev; - - dev = ice_pf_to_dev(pf); - if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) - return VIRTCHNL_STATUS_ERR_PARAM; - - if (!ice_is_vf_adq_ena(vf)) { - dev_err(dev, "VF %d: ADQ is not enabled, can't apply switch filter\n", - vf->vf_id); - return VIRTCHNL_STATUS_ERR_PARAM; - } - - vsi = ice_get_vf_vsi(vf); - if (!vsi) { - dev_err(dev, "VF %d: No corresponding VF VSI\n", vf->vf_id); - return VIRTCHNL_STATUS_ERR_PARAM; - } - - max_tc_allowed = ice_vc_get_max_chnl_tc_allowed(vf); - if (vcf->action == VIRTCHNL_ACTION_TC_REDIRECT && - vcf->action_meta >= max_tc_allowed) { - dev_err(dev, "VF %d: Err: action(%u)_meta(TC): %u >= max_tc_allowed (%u)\n", - vf->vf_id, vcf->action, vcf->action_meta, - max_tc_allowed); - return VIRTCHNL_STATUS_ERR_PARAM; - } - - /* enforce supported flow_type based on negotiated capability */ - if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ_V2) { - if (!(vcf->flow_type == VIRTCHNL_TCP_V4_FLOW || - vcf->flow_type == VIRTCHNL_TCP_V6_FLOW || - vcf->flow_type == VIRTCHNL_UDP_V4_FLOW || - vcf->flow_type == VIRTCHNL_UDP_V6_FLOW)) { - dev_err(ice_pf_to_dev(pf), "VF %d: Invalid input/s, unsupported flow_type %u\n", - vf->vf_id, vcf->flow_type); - return VIRTCHNL_STATUS_ERR_PARAM; - } - } else { - if (!(vcf->flow_type == VIRTCHNL_TCP_V4_FLOW || - vcf->flow_type == VIRTCHNL_TCP_V6_FLOW)){ - dev_err(ice_pf_to_dev(pf), "VF %d: Invalid input/s, unsupported flow_type %u\n", - vf->vf_id, vcf->flow_type); - return VIRTCHNL_STATUS_ERR_PARAM; - } - } - - if (ice_validate_cloud_filter(vf, vcf)) { - dev_err(dev, "VF %d: Invalid input/s, can't apply switch filter\n", - vf->vf_id); - return VIRTCHNL_STATUS_ERR_PARAM; - } - - /* filter state fully verified, return SUCCESS */ - return VIRTCHNL_STATUS_SUCCESS; -} - -/** - * ice_setup_fltr - populate fields in TC flower filter structure - * @vf: ptr to VF - * @vcf: ptr to virt channel message - * @fltr: pointer to the TC filter structure - * @dest_vsi: pointer to destination VSI for filter - * @tc_class: TC number when action type to FWD_TO_VSI, counter index when - * action is count, queue number when action is FWD_TO_QUEUE, - * queue group ID when action is FWD_TO_QGRP - */ -static void -ice_setup_fltr(struct ice_vf *vf, struct ice_tc_flower_fltr *fltr, - struct virtchnl_filter *vcf, struct ice_vsi *dest_vsi, - int tc_class) -{ - struct virtchnl_l4_spec *mask = &vcf->mask.tcp_spec; - struct virtchnl_l4_spec *tcf = &vcf->data.tcp_spec; - struct ice_tc_flower_lyr_2_4_hdrs *hdrs; - - memset(fltr, 0, sizeof(*fltr)); - - hdrs = &fltr->outer_headers; - if (!hdrs) - return; - - /* copy L2 MAC address and MAC mask */ - ether_addr_copy(hdrs->l2_key.dst_mac, tcf->dst_mac); - ether_addr_copy(hdrs->l2_mask.dst_mac, mask->dst_mac); - if (!is_zero_ether_addr(hdrs->l2_key.dst_mac)) - fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC; - - /* copy L2 source address and MAC mask */ - ether_addr_copy(hdrs->l2_key.src_mac, tcf->src_mac); - ether_addr_copy(hdrs->l2_mask.src_mac, mask->src_mac); - if (!is_zero_ether_addr(hdrs->l2_key.src_mac)) - fltr->flags |= ICE_TC_FLWR_FIELD_SRC_MAC; - - /* copy VLAN info */ - hdrs->vlan_hdr.vlan_id = mask->vlan_id & tcf->vlan_id; - if (hdrs->vlan_hdr.vlan_id) - fltr->flags |= ICE_TC_FLWR_FIELD_VLAN; - - /* copy L4 fields */ - hdrs->l4_key.dst_port = mask->dst_port & tcf->dst_port; - hdrs->l4_mask.dst_port = mask->dst_port; - if (hdrs->l4_key.dst_port) - fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT; - - hdrs->l4_key.src_port = mask->src_port & tcf->src_port; - hdrs->l4_mask.src_port = mask->src_port; - if (hdrs->l4_key.src_port) - fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT; - - /* copy L3 fields, IPv4[6] */ - if (vcf->flow_type == VIRTCHNL_TCP_V4_FLOW || - vcf->flow_type == VIRTCHNL_UDP_V4_FLOW) { - struct ice_tc_l3_hdr *key, *msk; - - key = &hdrs->l3_key; - msk = &hdrs->l3_mask; - - /* set n_proto based on flow_type */ - hdrs->l2_key.n_proto = ETH_P_IP; - if (mask->dst_ip[0] & tcf->dst_ip[0]) { - key->dst_ipv4 = tcf->dst_ip[0]; - msk->dst_ipv4 = mask->dst_ip[0]; - fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV4; - } - if (mask->src_ip[0] & tcf->src_ip[0]) { - key->src_ipv4 = tcf->src_ip[0]; - msk->src_ipv4 = mask->src_ip[0]; - fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV4; - } - } else if (vcf->flow_type == VIRTCHNL_TCP_V6_FLOW || - vcf->flow_type == VIRTCHNL_UDP_V6_FLOW) { - struct ice_tc_l3_hdr *key, *msk; - - key = &hdrs->l3_key; - msk = &hdrs->l3_mask; - - /* set n_proto based on flow_type */ - hdrs->l2_key.n_proto = ETH_P_IPV6; - if (mask->dst_ip[3] & tcf->dst_ip[3]) { - memcpy(&key->ip.v6.dst_ip6, tcf->dst_ip, - sizeof(key->ip.v6.dst_ip6)); - memcpy(&msk->ip.v6.dst_ip6, mask->dst_ip, - sizeof(msk->ip.v6.dst_ip6)); - fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV6; - } - if (mask->src_ip[3] & tcf->src_ip[3]) { - memcpy(&key->ip.v6.src_ip6, tcf->src_ip, - sizeof(key->ip.v6.src_ip6)); - memcpy(&msk->ip.v6.src_ip6, mask->src_ip, - sizeof(msk->ip.v6.src_ip6)); - fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV6; - } - } - - /* get the VSI to which the TC belongs to */ - fltr->dest_vsi = dest_vsi; - if (vcf->action == VIRTCHNL_ACTION_TC_REDIRECT) - fltr->action.fltr_act = ICE_FWD_TO_VSI; - else - fltr->action.fltr_act = ICE_DROP_PACKET; - - /* make sure to include VF's MAC address when adding ADQ filter */ - if ((!(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC)) && - fltr->action.fltr_act == ICE_FWD_TO_VSI) { - fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC; - ether_addr_copy(hdrs->l2_key.dst_mac, vf->dev_lan_addr.addr); - eth_broadcast_addr(hdrs->l2_mask.dst_mac); - } - - /* 'tc_class' could be TC/QUEUE/QUEUE_GRP number */ - fltr->action.tc_class = tc_class; - - /* must to set the tunnel_type to be INVALID, otherwise if left as zero, - * it gets treated as VxLAN tunnel since definition of VxLAN tunnel - * type is zero - */ - fltr->tunnel_type = TNL_LAST; - - /* set ip_proto in headers based on flow_type which is part of VIRTCHNL - * message, "add filter" - */ - if (vcf->flow_type == VIRTCHNL_TCP_V4_FLOW || - vcf->flow_type == VIRTCHNL_TCP_V6_FLOW) - hdrs->l3_key.ip_proto = IPPROTO_TCP; - else - hdrs->l3_key.ip_proto = IPPROTO_UDP; -} - -/** - * ice_vc_del_switch_filter - * @vf: pointer to the VF info - * @msg: pointer to the msg buffer - * - * This function deletes a cloud filter programmed as TC filter for ADQ - */ -static int ice_vc_del_switch_filter(struct ice_vf *vf, u8 *msg) -{ - struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; - struct virtchnl_l4_spec *mask = &vcf->mask.tcp_spec; - struct ice_rule_query_data rule; - enum virtchnl_status_code v_ret; - struct ice_tc_flower_fltr fltr; - struct ice_tc_flower_fltr *f; - struct ice_pf *pf = vf->pf; - struct ice_vsi *dest_vsi; - struct device *dev; - int err; - - dev = ice_pf_to_dev(pf); - /* Advanced switch filters and DCF are mutually exclusive. */ - if (ice_is_dcf_enabled(pf)) { - dev_err(dev, "Device Control Functionality is currently enabled. Advanced switch filters cannot be deleted.\n"); - v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; - goto err; - } - - v_ret = ice_vc_chnl_fltr_state_verify(vf, vcf); - if (v_ret) { - dev_err(dev, "VF %d: failed to verify ADQ state during filter message processing\n", - vf->vf_id); - goto err; - } - - dest_vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; - - /* prepare the TC flower filter based on input */ - ice_setup_fltr(vf, &fltr, vcf, dest_vsi, vcf->action_meta); - - /* locate the filter in VF tc_flower filter list */ - f = ice_get_tc_flower_fltr(vf, &fltr, mask); - if (!f) { - dev_err(dev, "VF %d: Invalid input/s, unable to locate filter due to mismatch\n", - vf->vf_id); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto err; - } - - /* Deleting TC filter */ - rule.rid = f->rid; - rule.rule_id = f->rule_id; - rule.vsi_handle = f->dest_id; - err = ice_rem_adv_rule_by_id(&pf->hw, &rule); - if (err) { - dev_err(dev, "VF %d: Failed to delete switch filter for tc %u, err %d\n", - vf->vf_id, vcf->action_meta, err); - v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; - goto err; - } - - /* book-keeping and update filter type if filter count reached zero */ - dest_vsi->num_chnl_fltr--; - - /* reset filter type for channel if channel filter - * count reaches zero - */ - if (!dest_vsi->num_chnl_fltr) - vf->ch[vcf->action_meta].fltr_type = ICE_CHNL_FLTR_TYPE_INVALID; - - hlist_del(&f->tc_flower_node); - devm_kfree(dev, f); - if (f->flags & ICE_TC_FLWR_FIELD_DST_MAC) - vf->num_dmac_chnl_fltrs--; - v_ret = VIRTCHNL_STATUS_SUCCESS; -err: - /* send the response back to the VF */ - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER, v_ret, - NULL, 0); -} - -/** - * ice_vc_add_switch_filter - * @vf: pointer to the VF info - * @msg: pointer to the msg buffer - * - * This function adds a switch filter programmed as TC filter for ADQ - * - * General info about filtering mode: - * VF ADQ has two different modes when it comes to applying the switch - * filters - * 1. basic mode: only dst MAC and dst VLAN filters supported - * 2. advanced mode: all combination of filters including dst MAC and - * dst VLAN ex: - * a. dst IP + dst PORT - * b. dst MAC + src PORT - * c. dst MAC + dst PORT - * basic mode is for 'untrusted VFs' and advanced mode is only for - * 'trusted VFs'. When a VF is toggled from being 'trusted' to - * 'untrusted' we remove all filters irrespective if it's basic or - * advanced. - * when ADQ is enabled we need to do ice_down irrespective if VF is - * 'trusted' or not and delete switch filters only if a 'trusted' VF - * is made 'untrusted'. - */ -static int ice_vc_add_switch_filter(struct ice_vf *vf, u8 *msg) -{ - struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; - struct ice_tc_flower_fltr *fltr = NULL; - enum virtchnl_status_code v_ret; - struct ice_pf *pf = vf->pf; - struct ice_vsi *dest_vsi; - struct device *dev; - int ret; - - dev = ice_pf_to_dev(pf); - /* Advanced switch filters and DCF are mutually exclusive. */ - if (ice_is_dcf_enabled(pf)) { - dev_err(dev, "Device Control Functionality is currently enabled. Advanced switch filters cannot be added\n"); - v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; - goto err; - } - - v_ret = ice_vc_chnl_fltr_state_verify(vf, vcf); - if (v_ret) { - dev_err(dev, "VF %d: failed to verify ADQ state during filter message processing\n", - vf->vf_id); - goto err; - } - - dest_vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; - - fltr = devm_kzalloc(dev, sizeof(*fltr), GFP_KERNEL); - if (!fltr) { - v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; - goto err; - } - - /* prepare the TC flower filter based on input */ - ice_setup_fltr(vf, fltr, vcf, dest_vsi, vcf->action_meta); - - /* call function which adds advanced switch filter */ - ret = ice_add_tc_flower_adv_fltr(ice_get_vf_vsi(vf), fltr); - if (ret) { - dev_err(dev, "Failed to add TC Flower filter using advance filter recipe\n"); - v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; - devm_kfree(dev, fltr); - goto err; - } - - INIT_HLIST_NODE(&fltr->tc_flower_node); - hlist_add_head(&fltr->tc_flower_node, &vf->tc_flower_fltr_list); - if (fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC) - vf->num_dmac_chnl_fltrs++; - - v_ret = VIRTCHNL_STATUS_SUCCESS; - vf->adq_fltr_ena = true; - -err: - /* send the response back to the VF */ - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER, v_ret, - NULL, 0); -} - -/** - * ice_conv_virtchnl_speed_to_mbps - * @virt_speed: virt speed that needs to be converted from - * - * convert virt channel speeds to mbps, return link speed on success, - * '0' otherwise - */ -static u32 ice_conv_virtchnl_speed_to_mbps(u16 virt_speed) -{ - u32 speed, link_speed; - - speed = ice_conv_link_speed_to_virtchnl(false, virt_speed); - - /* get link speed in MB to validate rate limit */ - switch (speed) { - case VIRTCHNL_LINK_SPEED_100MB: - link_speed = SPEED_100; - break; - case VIRTCHNL_LINK_SPEED_1GB: - link_speed = SPEED_1000; - break; - case VIRTCHNL_LINK_SPEED_10GB: - link_speed = SPEED_10000; - break; - case VIRTCHNL_LINK_SPEED_20GB: - link_speed = SPEED_20000; - break; - case VIRTCHNL_LINK_SPEED_25GB: - link_speed = SPEED_25000; - break; - case VIRTCHNL_LINK_SPEED_40GB: - link_speed = SPEED_40000; - break; - default: - /* on failure to detect link speed the expectation of the caller - * to this function is '0'. - */ - link_speed = 0; - break; - } - - return link_speed; -} - -/** - * ice_vc_add_qch_msg: Add queue channel and enable ADQ - * @vf: pointer to the VF info - * @msg: pointer to the msg buffer - */ -static int ice_vc_add_qch_msg(struct ice_vf *vf, u8 *msg) -{ - struct virtchnl_tc_info *tci = - (struct virtchnl_tc_info *)msg; - enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; - struct ice_pf *pf = vf->pf; - int adq_request_qps = 0; - struct ice_link_status *ls; - u16 available_vsis = 0; - u64 total_max_rate = 0; - u32 max_tc_allowed; - struct device *dev; - u16 total_qs = 0; - u32 link_speed; - unsigned int i; - - dev = ice_pf_to_dev(pf); - ls = &pf->hw.port_info->phy.link_info; - - if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto err; - } - - /* check if VF has negotiated this capability before anything else */ - if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) { - dev_dbg(dev, "VF %d attempting to enable ADQ, but hasn't properly negotiated that capability\n", - vf->vf_id); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto err; - } - - /* Currently ADQ and DCB are mutually exclusive and keeping in sync - * with PF, don't allow VF ADQ configuration when DCB Firmware LLDP - * agent is already running/enabled. - */ - if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) { - dev_err(dev, "FW LLDP is enabled, cannot enable ADQ on VF %d\n", - vf->vf_id); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto err; - } - - /* VF ADQ and DCF are mutually exclusive. */ - if (ice_is_dcf_enabled(pf)) { - dev_err(dev, "Device Control Functionality is currently enabled. VF ADQ cannot be enabled\n"); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto err; - } - - /* ADQ cannot be applied if spoof check is ON */ - if (vf->spoofchk) { - dev_err(dev, "Spoof check is ON, turn it OFF to enable ADQ\n"); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto err; - } - - ice_for_each_vsi(pf, i) { - if (!pf->vsi[i]) - ++available_vsis; - } - - if (available_vsis < tci->num_tc - 1) { - dev_err(dev, "Not enough VSIs left to enable ADQ on VF %d\n", - vf->vf_id); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto err; - } - - max_tc_allowed = ice_vc_get_max_chnl_tc_allowed(vf); - /* max number of traffic classes for VF currently capped at 4 for legacy - * ADQ and 16 for ADQ V2. - */ - if (!tci->num_tc || tci->num_tc > max_tc_allowed) { - dev_dbg(dev, "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n", - vf->vf_id, tci->num_tc, max_tc_allowed); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto err; - } - - /* validate queues for each TC */ - for (i = 0; i < tci->num_tc; i++) { - if (!tci->list[i].count) { - dev_err(dev, "VF %d: TC %d trying to set %u queues, should be > 0 per TC\n", - vf->vf_id, i, tci->list[i].count); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto err; - } - total_qs += tci->list[i].count; - } - - if (total_qs > ICE_MAX_DFLT_QS_PER_VF) { - dev_err(dev, "VF %d: Total number of queues of all TCs cannot exceed %u\n", - vf->vf_id, ICE_MAX_DFLT_QS_PER_VF); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto err; - } - - /* Speed in Mbps */ - if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) - link_speed = ice_conv_link_speed_to_virtchnl(true, - ls->link_speed); - else - link_speed = ice_conv_virtchnl_speed_to_mbps(ls->link_speed); - - if (!link_speed) { - dev_err(dev, "Cannot detect link speed on VF %d\n", vf->vf_id); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto err; - } - - for (i = 0; i < tci->num_tc; i++) - if (tci->list[i].max_tx_rate) - total_max_rate += tci->list[i].max_tx_rate; - - if (total_max_rate > link_speed) { - dev_err(dev, "Invalid tx rate specified for ADQ on VF %d\n", - vf->vf_id); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto err; - } - - if (vf->max_tx_rate && total_max_rate > vf->max_tx_rate) { - dev_err(dev, "Invalid tx rate specified for ADQ on VF %d, total_max_rate %llu Mpbs > host set max_tx_rate %u Mbps\n", - vf->vf_id, total_max_rate, vf->max_tx_rate); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto err; - } - - /* need Max VF queues but already have default number of queues */ - adq_request_qps = ICE_MAX_DFLT_QS_PER_VF - pf->num_qps_per_vf; - - if (ice_get_avail_txq_count(pf) < adq_request_qps) { - dev_err(dev, "No queues left to allocate to VF %d\n", - vf->vf_id); - v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; - goto err; - } else { - /* we need to allocate max VF queues to enable ADQ so as to - * make sure ADQ enabled VF always gets back queues when it - * goes through a reset. - */ - vf->num_vf_qs = ICE_MAX_DFLT_QS_PER_VF; - } - - /* parse data from the queue channel info */ - vf->num_tc = tci->num_tc; - - for (i = 0; i < vf->num_tc; i++) { - if (tci->list[i].max_tx_rate) - vf->ch[i].max_tx_rate = tci->list[i].max_tx_rate; - - vf->ch[i].num_qps = tci->list[i].count; - vf->ch[i].offset = tci->list[i].offset; - } - - /* set this flag only after making sure all inputs are sane */ - vf->adq_enabled = true; - /* initialize filter enable flag, set it only if filters are applied */ - vf->adq_fltr_ena = false; - - /* reset the VF in order to allocate resources. Don't reset if ADQ_V2 - * capability is negotiated, since in that case AVF driver will request - * for a reset. - */ - if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ_V2)) { - ice_vc_notify_vf_reset(vf); - ice_reset_vf(vf, false); - } - /* send the response to the VF */ -err: - if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ_V2) - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS, - v_ret, (u8 *)tci, sizeof(*tci)); - else - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS, - v_ret, NULL, 0); -} - -/** - * ice_vc_del_qch_msg - * @vf: pointer to the VF info - * @msg: pointer to the msg buffer - * - * delete the additional VSIs which are created as part of ADQ - */ -static int ice_vc_del_qch_msg(struct ice_vf *vf, u8 *msg) -{ - enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; - struct ice_pf *pf = vf->pf; - struct ice_vsi *vsi; - struct device *dev; - u8 tc; - - dev = ice_pf_to_dev(pf); - - if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto err; - } - - /* VF ADQ and DCF are mutually exclusive. */ - if (ice_is_dcf_enabled(pf)) { - dev_err(dev, "Device Control Functionality is currently enabled. VF ADQ cannot be enabled\n"); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto err; - } - - if (ice_is_vf_adq_ena(vf)) { - /* if ADQ_V2 is set, perform inline cleanup of ADQ resources and - * return success and eventually VF driver will initiate reset - * as per design - */ - if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ_V2) { - dev_info(ice_pf_to_dev(pf), - "Deleting Queue Channels for ADQ on VF %d and ADQ_V2 is set\n", - vf->vf_id); - - /* release VF ADQ filters and VSIs inline */ - ice_vf_adq_release(vf); - v_ret = VIRTCHNL_STATUS_SUCCESS; - goto err; - } - -#ifdef HAVE_TC_SETUP_CLSFLOWER - /* delete all ADQ filters for given VF */ - ice_del_all_adv_switch_fltr(vf); -#endif /* HAVE_TC_SETUP_CLSFLOWER */ - - /* stop all Tx/Rx rings and clean them before deleting the ADQ - * resources, if not it will throw fail to set the LAN Tx queue - * context error. This is needed irrespective of ADQ_V2. Channel - * related TC starts at 1. Don't down the VSI and related - * resources for TC 0 because it is primary VF VSI and downing - * that VSI is handled somewhere else. - */ - for (tc = ICE_VF_CHNL_START_TC; tc < vf->num_tc; tc++) { - vsi = ice_get_vf_adq_vsi(vf, tc); - if (!vsi) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto err; - } - if (vf->ch[tc].vsi_num) - ice_down(vsi); - } - - /* this order of code is very important, if num_tc is not - * cleared, VF again rebuilds as ADQ enabled clearly contrary - * to what we're trying to do. Also clearing num_tc before - * deleting ADQ filters leads to the condition where the code - * will try to delete filters when none are configured. - */ - vf->num_tc = 0; - dev_info(ice_pf_to_dev(pf), "Deleting Queue Channels for ADQ on VF %d\n", - vf->vf_id); - - /* reset needs to happen first, before we clear the adq_enabled - * flag, since freeing up of ADQ resources happens based off of - * this flag in reset path. Doing a reset after clearing the - * flag will leave the ADQ resources in zombie state which in - * turn creates undesired problems such as system lock up, stack - * trace etc., - * Also we shouldn't be doing a reset if ADQ flag is cleared in - * some other place, hence sending the failure response back to - * the VF. - */ - ice_vc_notify_vf_reset(vf); - ice_reset_vf(vf, false); - if (ice_is_vf_link_up(vf)) { - /* bring the VSI 0 back up again */ - vsi = ice_get_vf_adq_vsi(vf, 0); - if (!vsi) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto err; - } - ice_up(vsi); - } - - vf->adq_enabled = false; - } else { - dev_info(dev, "VF %d trying to delete queue channels but ADQ isn't enabled\n", - vf->vf_id); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - } - - /* send the response to the VF */ -err: - if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ_V2) - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS, - v_ret, msg, - sizeof(struct virtchnl_tc_info)); - else - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS, - v_ret, NULL, 0); -} -#endif /* HAVE_TC_SETUP_CLSFLOWER */ - /** * ice_vc_set_rss_hena - set RSS HENA bits for the VF * @vf: pointer to the VF info @@ -7661,9 +4699,9 @@ static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg) struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct ice_pf *pf = vf->pf; - enum ice_status status; struct ice_vsi *vsi; struct device *dev; + int status; dev = ice_pf_to_dev(pf); @@ -7712,7 +4750,7 @@ static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg) /* send the response to the VF */ err: return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, v_ret, - NULL, 0); + NULL, 0); } /** @@ -7725,68 +4763,33 @@ err: */ static int ice_vc_rdma_msg(struct ice_vf *vf, u8 *msg, u16 len) { - struct ice_peer_obj *rdma_peer; - int ret; + struct iidc_core_dev_info *rcdi; + struct iidc_auxiliary_drv *iadrv; + int ret = -ENODEV; - rdma_peer = vf->pf->rdma_peer; - if (!rdma_peer) { + rcdi = ice_find_cdev_info_by_id(vf->pf, IIDC_RDMA_ID); + if (!rcdi) { pr_err("Invalid RDMA peer attempted to send message to peer\n"); return -EIO; } - if (!rdma_peer->peer_ops || !rdma_peer->peer_ops->vc_receive) { - pr_err("Incomplete RMDA peer attempting to send msg\n"); - return -EINVAL; - } + mutex_lock(&vf->pf->adev_mutex); + device_lock(&rcdi->adev->dev); + iadrv = ice_get_auxiliary_drv(rcdi); + if (iadrv && iadrv->vc_receive) { + u16 vf_abs_id = ice_abs_vf_id(&vf->pf->hw, vf->vf_id); - ret = rdma_peer->peer_ops->vc_receive(rdma_peer, vf->vf_id, msg, len); + ret = iadrv->vc_receive(rcdi, vf_abs_id, msg, len); + } + device_unlock(&rcdi->adev->dev); + mutex_unlock(&vf->pf->adev_mutex); if (ret) - pr_err("Failed to send message to RDMA peer, error %d\n", ret); + ice_dev_err_errno(ice_pf_to_dev(vf->pf), ret, + "Failed to send message to RDMA peer"); return ret; } -/** - * ice_vf_cfg_rdma_ceq_irq_map - configure the CEQ IRQ mapping - * @vf: VF structure associated to the VF that requested the mapping - * @qv_info: RDMA queue vector mapping information - * - * Configure the CEQ index for the passed in VF. This will result in the CEQ - * being able to generate interrupts - */ -static void -ice_vf_cfg_rdma_ceq_irq_map(struct ice_vf *vf, - struct virtchnl_rdma_qv_info *qv_info) -{ - u16 glint_ceqctl_idx = ice_vf_get_glint_ceqctl_idx(vf, - qv_info->ceq_idx); - - u32 regval = (qv_info->v_idx & GLINT_CEQCTL_MSIX_INDX_M) | - ((qv_info->itr_idx << GLINT_CEQCTL_ITR_INDX_S) & - GLINT_CEQCTL_ITR_INDX_M) | GLINT_CEQCTL_CAUSE_ENA_M; - - wr32(&vf->pf->hw, GLINT_CEQCTL(glint_ceqctl_idx), regval); -} - -/** - * ice_vf_cfg_rdma_aeq_irq_map - configure the AEQ IRQ mapping - * @vf: VF structure associated to the VF that requested the mapping - * @qv_info: RDMA queue vector mapping information - * - * Configure the AEQ for the passed in VF. This will result in the AEQ being - * able to generate interrupts - */ -static void -ice_vf_cfg_rdma_aeq_irq_map(struct ice_vf *vf, - struct virtchnl_rdma_qv_info *qv_info) -{ - u32 regval = (qv_info->v_idx & PFINT_AEQCTL_MSIX_INDX_M) | - ((qv_info->itr_idx << VPINT_AEQCTL_ITR_INDX_S) & - VPINT_AEQCTL_ITR_INDX_M) | VPINT_AEQCTL_CAUSE_ENA_M; - - wr32(&vf->pf->hw, VPINT_AEQCTL(vf->vf_id), regval); -} - /** * ice_vc_cfg_rdma_irq_map_msg - MSIX mapping of RDMA control queue interrupts * @vf: VF structure associated to the VF that requested the mapping @@ -7802,6 +4805,7 @@ static int ice_vc_cfg_rdma_irq_map_msg(struct ice_vf *vf, u8 *msg) enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_rdma_qvlist_info *qvlist = (struct virtchnl_rdma_qvlist_info *)msg; + const struct ice_vf_ops *ops = vf->vf_ops; u16 num_msix_per_vf; u32 i; @@ -7810,7 +4814,7 @@ static int ice_vc_cfg_rdma_irq_map_msg(struct ice_vf *vf, u8 *msg) goto err; } - num_msix_per_vf = vf->pf->num_msix_per_vf; + num_msix_per_vf = vf->pf->vfs.num_msix_per; if (qvlist->num_vectors > num_msix_per_vf) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto err; @@ -7846,11 +4850,7 @@ static int ice_vc_cfg_rdma_irq_map_msg(struct ice_vf *vf, u8 *msg) for (i = 0; i < qvlist->num_vectors; i++) { struct virtchnl_rdma_qv_info *qv_info = &qvlist->qv_info[i]; - if (qv_info->ceq_idx != VIRTCHNL_RDMA_INVALID_QUEUE_IDX) - ice_vf_cfg_rdma_ceq_irq_map(vf, qv_info); - - if (qv_info->aeq_idx != VIRTCHNL_RDMA_INVALID_QUEUE_IDX) - ice_vf_cfg_rdma_aeq_irq_map(vf, qv_info); + ops->cfg_rdma_irq_map(vf, qv_info); } err: @@ -7869,13 +4869,14 @@ err: static int ice_vc_clear_rdma_irq_map(struct ice_vf *vf) { enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + const struct ice_vf_ops *ops = vf->vf_ops; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto err; } - ice_vf_clear_rdma_irq_map(vf); + ops->clear_rdma_irq_map(vf); err: return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP, @@ -7995,8 +4996,8 @@ static int ice_validate_tpid(u16 tpid) */ static int ice_vc_dcf_vlan_offload_msg(struct ice_vf *vf, u8 *msg) { - struct virtchnl_dcf_vlan_offload *offload = (struct virtchnl_dcf_vlan_offload *)msg; enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct virtchnl_dcf_vlan_offload *offload; struct ice_dcf_vlan_info *dcf_vlan; struct ice_pf *pf = vf->pf; struct ice_vsi *target_vsi; @@ -8006,7 +5007,10 @@ static int ice_vc_dcf_vlan_offload_msg(struct ice_vf *vf, u8 *msg) u16 vlan_flags; u16 vlan_type; - if (!ice_is_dvm_ena(&pf->hw) || !(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN_V2) || + offload = (struct virtchnl_dcf_vlan_offload *)msg; + + if (!ice_is_dvm_ena(&pf->hw) || + !(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN_V2) || !ice_is_vf_dcf(vf) || ice_dcf_get_state(pf) != ICE_DCF_STATE_ON) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto err; @@ -8020,28 +5024,35 @@ static int ice_vc_dcf_vlan_offload_msg(struct ice_vf *vf, u8 *msg) vlan_type = (vlan_flags & VIRTCHNL_DCF_VLAN_TYPE_M) >> VIRTCHNL_DCF_VLAN_TYPE_S; - if (ice_validate_vf_id(pf, offload->vf_id) || ice_validate_tpid(offload->tpid) || - (!insert_mode && !strip_mode) || vlan_type != VIRTCHNL_DCF_VLAN_TYPE_OUTER || + if (ice_validate_tpid(offload->tpid) || + (!insert_mode && !strip_mode) || + vlan_type != VIRTCHNL_DCF_VLAN_TYPE_OUTER || offload->vlan_id >= VLAN_N_VID) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto err; } - target_vf = &pf->vf[offload->vf_id]; + target_vf = ice_get_vf_by_id(pf, offload->vf_id); + if (!target_vf) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + if (ice_check_vf_ready_for_cfg(target_vf)) { v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; - goto err; + goto err_put_target_vf; } target_vsi = ice_get_vf_vsi(target_vf); if (!target_vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto err; + goto err_put_target_vf; } - if (ice_vf_is_port_vlan_ena(target_vf) || ice_vsi_has_non_zero_vlans(target_vsi)) { + if (ice_vf_is_port_vlan_ena(target_vf) || + ice_vsi_has_non_zero_vlans(target_vsi)) { v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; - goto err; + goto err_put_target_vf; } dcf_vlan = &target_vf->dcf_vlan_info; @@ -8054,7 +5065,7 @@ static int ice_vc_dcf_vlan_offload_msg(struct ice_vf *vf, u8 *msg) } else if (insert_mode == VIRTCHNL_DCF_VLAN_INSERT_PORT_BASED) { if (strip_mode) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto err; + goto err_put_target_vf; } if (dcf_vlan->outer_port_vlan.tpid != offload->tpid || @@ -8067,7 +5078,7 @@ static int ice_vc_dcf_vlan_offload_msg(struct ice_vf *vf, u8 *msg) } } else if (insert_mode) { v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; - goto err; + goto err_put_target_vf; } if (strip_mode == VIRTCHNL_DCF_VLAN_STRIP_DISABLE) { @@ -8080,7 +5091,7 @@ static int ice_vc_dcf_vlan_offload_msg(struct ice_vf *vf, u8 *msg) !dcf_vlan->outer_stripping_ena) { if (ice_vsi_is_rxq_crc_strip_dis(target_vsi)) { v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; - goto err; + goto err_put_target_vf; } dcf_vlan->outer_stripping_tpid = offload->tpid; dcf_vlan->outer_stripping_ena = 1; @@ -8088,12 +5099,15 @@ static int ice_vc_dcf_vlan_offload_msg(struct ice_vf *vf, u8 *msg) } } else if (strip_mode) { v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; - goto err; + goto err_put_target_vf; } if (dcf_vlan->applying) - ice_vc_reset_vf(target_vf); + ice_reset_vf(target_vf, + ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK); +err_put_target_vf: + ice_put_vf(target_vf); err: return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DCF_VLAN_OFFLOAD, v_ret, NULL, 0); @@ -8120,9 +5134,9 @@ ice_dcf_handle_aq_cmd(struct ice_vf *vf, struct ice_aq_desc *aq_desc, enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct ice_pf *pf = vf->pf; enum virtchnl_ops v_op; - enum ice_status aq_ret; u16 v_msg_len = 0; u8 *v_msg = NULL; + int aq_ret; int ret; pf->dcf.aq_desc_received = false; @@ -8151,17 +5165,17 @@ ice_dcf_handle_aq_cmd(struct ice_vf *vf, struct ice_aq_desc *aq_desc, } aq_ret = ice_aq_send_cmd(&pf->hw, aq_desc, aq_buf, aq_buf_size, NULL); - /* It needs to send back the AQ response message if ICE_ERR_AQ_ERROR - * returns, some AdminQ handlers will use the error code filled by FW - * to do exception handling. + /* It needs to send back the AQ response message if -EIO returns, some + * AdminQ handlers will use the error code filled by FW to do exception + * handling. */ - if (aq_ret && aq_ret != ICE_ERR_AQ_ERROR) { + if (aq_ret && aq_ret != -EIO) { v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; v_op = VIRTCHNL_OP_DCF_CMD_DESC; goto err; } - if (aq_ret != ICE_ERR_AQ_ERROR) { + if (aq_ret != -EIO) { v_ret = ice_dcf_post_aq_send_cmd(pf, aq_desc, aq_buf); if (v_ret != VIRTCHNL_STATUS_SUCCESS) { v_op = VIRTCHNL_OP_DCF_CMD_DESC; @@ -8197,6 +5211,30 @@ err: return ice_vc_send_msg_to_vf(vf, v_op, v_ret, v_msg, v_msg_len); } +/** + * ice_dcf_pre_handle_desc - Pre-handle the DCF AdminQ command descriptor + * @vf: pointer to the VF info + * @aq_desc: the AdminQ command descriptor + * + * Pre-handle the DCF AdminQ command descriptor before sending it to the + * firmware. Since DCF does not have some necessary information for specific + * AdminQ commands, PF needs to complete the descriptor. + */ +static void +ice_dcf_pre_handle_desc(struct ice_vf *vf, struct ice_aq_desc *aq_desc) +{ + struct ice_hw *hw = &vf->pf->hw; + + switch (le16_to_cpu(aq_desc->opcode)) { + case ice_aqc_opc_query_port_ets: + aq_desc->params.port_ets.port_teid = + hw->port_info->root->info.node_teid; + break; + default: + break; + } +} + /** * ice_vc_dcf_cmd_desc_msg - handle the DCF AdminQ command descriptor * @vf: pointer to the VF info @@ -8220,6 +5258,9 @@ static int ice_vc_dcf_cmd_desc_msg(struct ice_vf *vf, u8 *msg, u16 len) goto err; } + /* Pre-handle the descriptor for specific DCF AdminQ commands */ + ice_dcf_pre_handle_desc(vf, aq_desc); + /* The AdminQ descriptor needs to be stored for use when the followed * VIRTCHNL_OP_DCF_CMD_BUFF is received. */ @@ -8261,28 +5302,6 @@ err: VIRTCHNL_STATUS_ERR_PARAM, NULL, 0); } -static int ice_vc_flush_dcf_rule(struct ice_vf *vf) -{ - enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; - struct ice_pf *pf = vf->pf; - - if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto err; - } - - if (!ice_is_vf_dcf(vf)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto err; - } - - ice_rm_all_dcf_sw_rules(pf); - -err: - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DCF_RULE_FLUSH, - v_ret, NULL, 0); -} - /** * ice_vc_dis_dcf_cap - disable DCF capability for the VF * @vf: pointer to the VF @@ -8302,9 +5321,6 @@ static int ice_vc_dis_dcf_cap(struct ice_vf *vf) } if (vf->driver_caps & VIRTCHNL_VF_CAP_DCF) { -#ifndef HAVE_NDO_SET_VF_TRUST - ice_set_vf_trust(ice_get_main_vsi(vf->pf)->netdev, vf->vf_id, false); -#endif /* !HAVE_NOD_SET_VF_TRUST */ vf->driver_caps &= ~VIRTCHNL_VF_CAP_DCF; ice_rm_all_dcf_sw_rules(vf->pf); ice_clear_dcf_acl_cfg(vf->pf); @@ -8328,8 +5344,10 @@ static int ice_vc_dcf_get_vsi_map(struct ice_vf *vf) struct virtchnl_dcf_vsi_map *vsi_map = NULL; struct ice_pf *pf = vf->pf; struct ice_vsi *pf_vsi; + struct ice_vf *tmp_vf; + unsigned int bkt; u16 len = 0; - int vf_id; + u16 num_vfs; int ret; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { @@ -8342,7 +5360,9 @@ static int ice_vc_dcf_get_vsi_map(struct ice_vf *vf) goto err; } - len = struct_size(vsi_map, vf_vsi, pf->num_alloc_vfs - 1); + num_vfs = ice_get_num_vfs(pf); + + len = struct_size(vsi_map, vf_vsi, num_vfs - 1); vsi_map = kzalloc(len, GFP_KERNEL); if (!vsi_map) { v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; @@ -8358,16 +5378,16 @@ static int ice_vc_dcf_get_vsi_map(struct ice_vf *vf) } vsi_map->pf_vsi = pf_vsi->vsi_num; - vsi_map->num_vfs = pf->num_alloc_vfs; - - ice_for_each_vf(pf, vf_id) { - struct ice_vf *tmp_vf = &pf->vf[vf_id]; + vsi_map->num_vfs = num_vfs; + mutex_lock(&pf->vfs.table_lock); + ice_for_each_vf(pf, bkt, tmp_vf) { if (!ice_is_vf_disabled(tmp_vf) && test_bit(ICE_VF_STATE_INIT, tmp_vf->vf_states)) - vsi_map->vf_vsi[vf_id] = tmp_vf->lan_vsi_num | + vsi_map->vf_vsi[tmp_vf->vf_id] = tmp_vf->lan_vsi_num | VIRTCHNL_DCF_VF_VSI_VALID; } + mutex_unlock(&pf->vfs.table_lock); err: ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DCF_GET_VSI_MAP, v_ret, @@ -8426,7 +5446,325 @@ err: } /** - * ice_vc_get_max_rss_qregion - message handling for VIRTCHNL_OP_GET_MAX_RSS_QREGION + * ice_dcf_cfg_vf_tc_bw_lmt - Configure VF bandwidth per TC + * @vf: pointer to the VF info + * @cfg_list: pointer to the VF TC bandwidth configuration + * + * Configure the bandwidth for VF VSI per enabled TC. If + * bandwidth is zero, default configuration is applied. + */ +static int +ice_dcf_cfg_vf_tc_bw_lmt(struct ice_vf *vf, + struct virtchnl_dcf_bw_cfg_list *cfg_list) +{ + struct ice_port_info *pi = vf->pf->hw.port_info; + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + struct virtchnl_dcf_bw_cfg *cfg; + u32 committed_rate; + u32 peak_rate; + int i, ret; + + if (!vsi) + return -EINVAL; + + for (i = 0; i < cfg_list->num_elem; i++) { + cfg = &cfg_list->cfg[i]; + peak_rate = cfg->shaper.peak; + committed_rate = cfg->shaper.committed; + + if (cfg->bw_type & VIRTCHNL_DCF_BW_PIR) { + if (peak_rate) { + ret = + ice_cfg_vsi_bw_lmt_per_tc(pi, vsi->idx, + cfg->tc_num, + ICE_MAX_BW, + peak_rate); + if (ret) + return ret; + } else { + /* If max bandwidth is zero, use default + * config + */ + ret = + ice_cfg_vsi_bw_dflt_lmt_per_tc(pi, + vsi->idx, + cfg->tc_num, + ICE_MAX_BW); + if (ret) + return ret; + } + } + + if (cfg->bw_type & VIRTCHNL_DCF_BW_CIR) { + if (committed_rate) { + ret = + ice_cfg_vsi_bw_lmt_per_tc(pi, vsi->idx, + cfg->tc_num, + ICE_MIN_BW, + committed_rate); + if (ret) + return ret; + } else { + /* If min bandwidth is zero, use default + * config + */ + ret = + ice_cfg_vsi_bw_dflt_lmt_per_tc(pi, + vsi->idx, + cfg->tc_num, + ICE_MIN_BW); + if (ret) + return ret; + } + } + } + + return 0; +} + +/** + * ice_dcf_tc_total_peak - Calculate the total max bandwidth of all TCs + * @cfg_list: pointer to the VF TC bandwidth configuration + */ +static u32 ice_dcf_tc_total_peak(struct virtchnl_dcf_bw_cfg_list *cfg_list) +{ + u32 total_peak = 0; + int i; + + for (i = 0; i < cfg_list->num_elem; i++) + total_peak += cfg_list->cfg[i].shaper.peak; + + return total_peak; +} + +/** + * ice_dcf_cfg_tc_node_bw_lmt - Configure TC node bandwidth + * @pf: pointer to the PF info + * @cfg_list: pointer to the VF TC bandwidth configuration + * + * Configure the bandwidth for enabled TC nodes. If bandwidth is zero, + * default configuration is applied. + */ +static int +ice_dcf_cfg_tc_node_bw_lmt(struct ice_pf *pf, + struct virtchnl_dcf_bw_cfg_list *cfg_list) +{ + struct ice_port_info *pi = pf->hw.port_info; + struct device *dev = ice_pf_to_dev(pf); + struct virtchnl_dcf_bw_cfg *cfg; + u32 peak_rate; + int i, ret; + + for (i = 0; i < cfg_list->num_elem; i++) { + cfg = &cfg_list->cfg[i]; + peak_rate = cfg->shaper.peak; + + if (pi->qos_cfg.local_dcbx_cfg.etscfg.tsatable[cfg->tc_num] != + VIRTCHNL_ABITER_STRICT) { + dev_dbg(dev, "TC %u: TC node max bandwidth can only be configured in Strict Priority mode\n", + cfg->tc_num); + continue; + } + + /* Since TC node CIR configuring is not supported, only + * configure PIR to guarantee max and min bandwidth of each TC. + * Because PIR and CIR can be both configured in below, any of + * the bw_type is allowed. + */ + if (cfg->bw_type & + (VIRTCHNL_DCF_BW_PIR | VIRTCHNL_DCF_BW_CIR)) { + if (peak_rate) { + ret = ice_cfg_tc_node_bw_lmt(pi, + cfg->tc_num, + ICE_MAX_BW, + peak_rate); + if (ret) + return ret; + } else { + /* If max bandwidth is zero, use default config + * (no rate limit) + */ + ret = ice_cfg_tc_node_bw_dflt_lmt(pi, + cfg->tc_num, + ICE_MAX_BW); + if (ret) + return ret; + } + } + } + + return 0; +} + +/** + * ice_dcf_validate_bw - Validate bandwidth for TC and VF VSI + * @pf: pointer to the PF info + * @vf: pointer to the VF info + * @cfg_list: pointer to the VF TC bandwidth configuration + * + * Validate the min and max bandwidth for TC and VF VSI in advance before + * configuring. + */ +static int +ice_dcf_validate_bw(struct ice_pf *pf, struct ice_vf *vf, + struct virtchnl_dcf_bw_cfg_list *cfg_list) +{ + struct device *dev = ice_pf_to_dev(pf); + struct virtchnl_dcf_bw_cfg *cfg; + u32 committed_rate, peak_rate; + bool lowest_cir_mark = false; + struct ice_vsi *vsi; + u32 total_peak = 0; + int i, speed; + + if (cfg_list->node_type == VIRTCHNL_DCF_TARGET_TC_BW) { + total_peak = ice_dcf_tc_total_peak(cfg_list); + vsi = ice_get_main_vsi(pf); + } else { + vsi = ice_get_vf_vsi(vf); + } + + if (!vsi) + return -EINVAL; + + speed = ice_get_link_speed_kbps(vsi); + + for (i = 0; i < cfg_list->num_elem; i++) { + cfg = &cfg_list->cfg[i]; + peak_rate = cfg->shaper.peak; + committed_rate = cfg->shaper.committed; + + if (!(BIT(cfg->tc_num) & vsi->tc_cfg.ena_tc)) { + dev_err(dev, "TC %u: TC is not enabled\n", + cfg->tc_num); + return -EINVAL; + } + + if (cfg_list->node_type == VIRTCHNL_DCF_TARGET_TC_BW) { + u32 rest_peak = total_peak - peak_rate; + /* For TC larger than the lowest TC with none-zero min + * bandwidth, max bandwidth must be set. + */ + if (lowest_cir_mark && peak_rate == 0) { + dev_err(dev, "TC %u: Max bandwidth must be configured\n", + cfg->tc_num); + return -EINVAL; + } + + if (!lowest_cir_mark && committed_rate) + lowest_cir_mark = true; + + if (committed_rate && + committed_rate + rest_peak > (u32)speed) { + dev_err(dev, "TC %u: Min bandwidth plus other TCs' max bandwidth %uKbps exceeds port link speed %uKbps\n", + cfg->tc_num, + committed_rate + rest_peak, speed); + return -EINVAL; + } + } + + /* If min bandwidth is 0, use default setting. If not 0, min + * bandwidth should be larger than 500Kbps. + */ + if (committed_rate && committed_rate < ICE_SCHED_MIN_BW) { + dev_err(dev, "TC %u: If min Tx bandwidth is set for %s %d, it cannot be less than 500Kbps\n", + cfg->tc_num, + ice_vsi_type_str(vsi->type), + vsi->idx); + return -EINVAL; + } + + if (peak_rate && committed_rate > peak_rate) { + dev_err(dev, "TC %u: Cannot set min Tx bandwidth greater than max Tx bandwidth for %s %d\n", + cfg->tc_num, + ice_vsi_type_str(vsi->type), + vsi->idx); + return -EINVAL; + } + + if (peak_rate > (u32)speed) { + dev_err(dev, "TC %u: Invalid max Tx bandwidth %uKbps specified for %s %d is greater than current link speed %uKbps\n", + cfg->tc_num, peak_rate, + ice_vsi_type_str(vsi->type), + vsi->idx, speed); + return -EINVAL; + } + + if (committed_rate > (u32)speed) { + dev_err(dev, "TC %u: Invalid min Tx bandwidth %uKbps specified for %s %d is greater than current link speed %uKbps\n", + cfg->tc_num, committed_rate, + ice_vsi_type_str(vsi->type), + vsi->idx, speed); + return -EINVAL; + } + } + + return 0; +} + +/** + * ice_vc_dcf_config_tc - Configure VF and TC bandwidth + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer which holds the command buffer + * + * Configure Tx scheduler node's bandwidth per enabled TC + * for assigned VF, as well as TC nodes. + */ +static int ice_vc_dcf_config_tc(struct ice_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct virtchnl_dcf_bw_cfg_list *bwcfg_list; + struct ice_pf *pf = vf->pf; + struct ice_vf *target_vf; + + bwcfg_list = (struct virtchnl_dcf_bw_cfg_list *)msg; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + if (!ice_is_vf_dcf(vf) || ice_dcf_get_state(pf) != ICE_DCF_STATE_ON) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + if (ice_dcf_validate_bw(pf, vf, bwcfg_list)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + if (bwcfg_list->node_type == VIRTCHNL_DCF_TARGET_TC_BW) { + if (ice_dcf_cfg_tc_node_bw_lmt(pf, bwcfg_list)) { + v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; + goto err; + } + } else { + target_vf = ice_get_vf_by_id(pf, bwcfg_list->vf_id); + if (!target_vf) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + if (ice_check_vf_ready_for_cfg(target_vf)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + ice_put_vf(target_vf); + goto err; + } + + if (ice_dcf_cfg_vf_tc_bw_lmt(target_vf, bwcfg_list)) + v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; + + ice_put_vf(target_vf); + } + +err: + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DCF_CONFIG_BW, v_ret, + NULL, 0); +} + +/** + * ice_vc_get_max_rss_qregion - handler for VIRTCHNL_OP_GET_MAX_RSS_QREGION * @vf: source of the request */ static int ice_vc_get_max_rss_qregion(struct ice_vf *vf) @@ -8469,21 +5807,25 @@ error_param: static bool ice_vc_supported_queue_type(s32 queue_type) { - return (queue_type == VIRTCHNL_QUEUE_TYPE_RX || queue_type == VIRTCHNL_QUEUE_TYPE_TX); + return (queue_type == VIRTCHNL_QUEUE_TYPE_RX || + queue_type == VIRTCHNL_QUEUE_TYPE_TX); } /** - * ice_vc_validate_qs_v2_msg - validate all parameters sent in the qs_msg structure + * ice_vc_validate_qs_v2_msg - validate all qs_msg parameters * @vf: VF the message was received from * @qs_msg: contents of the message from the VF * - * Used to validate both the VIRTCHNL_OP_ENABLE_QUEUES_V2 and VIRTCHNL_OP_DISABLE_QUEUES_V2 - * messages. This should always be called before attempting to enable and/or disable queues on - * behalf of a VF in response to the preivously mentioned opcodes. If all checks succeed, then - * return success indicating to the caller that the qs_msg is valid. Otherwise return false, - * indicating to the caller that the qs_msg is invalid. + * Used to validate both the VIRTCHNL_OP_ENABLE_QUEUES_V2 and + * VIRTCHNL_OP_DISABLE_QUEUES_V2 messages. This should always be called before + * attempting to enable and/or disable queues on behalf of a VF in response to + * the preivously mentioned opcodes. If all checks succeed, then return + * success indicating to the caller that the qs_msg is valid. Otherwise return + * false, indicating to the caller that the qs_msg is invalid. */ -static bool ice_vc_validate_qs_v2_msg(struct ice_vf *vf, struct virtchnl_del_ena_dis_queues *qs_msg) +static bool +ice_vc_validate_qs_v2_msg(struct ice_vf *vf, + struct virtchnl_del_ena_dis_queues *qs_msg) { struct virtchnl_queue_chunks *chunks = &qs_msg->chunks; int i; @@ -8495,13 +5837,17 @@ static bool ice_vc_validate_qs_v2_msg(struct ice_vf *vf, struct virtchnl_del_ena return false; for (i = 0; i < chunks->num_chunks; i++) { + u16 max_queue_in_chunk; + if (!ice_vc_supported_queue_type(chunks->chunks[i].type)) return false; if (!chunks->chunks[i].num_queues) return false; - if (chunks->chunks[i].start_queue_id + chunks->chunks[i].num_queues > vf->num_vf_qs) + max_queue_in_chunk = chunks->chunks[i].start_queue_id + + chunks->chunks[i].num_queues; + if (max_queue_in_chunk > vf->num_vf_qs) return false; } @@ -8513,7 +5859,8 @@ static bool ice_vc_validate_qs_v2_msg(struct ice_vf *vf, struct virtchnl_del_ena (q_id) < (chunk)->start_queue_id + (chunk)->num_queues; \ (q_id)++) -static int ice_vc_ena_rxq_chunk(struct ice_vf *vf, struct virtchnl_queue_chunk *chunk) +static int +ice_vc_ena_rxq_chunk(struct ice_vf *vf, struct virtchnl_queue_chunk *chunk) { struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; int q_id; @@ -8531,7 +5878,8 @@ static int ice_vc_ena_rxq_chunk(struct ice_vf *vf, struct virtchnl_queue_chunk * return 0; } -static int ice_vc_ena_txq_chunk(struct ice_vf *vf, struct virtchnl_queue_chunk *chunk) +static int +ice_vc_ena_txq_chunk(struct ice_vf *vf, struct virtchnl_queue_chunk *chunk) { struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; int q_id; @@ -8552,11 +5900,14 @@ static int ice_vc_ena_txq_chunk(struct ice_vf *vf, struct virtchnl_queue_chunk * */ static int ice_vc_ena_qs_v2_msg(struct ice_vf *vf, u8 *msg) { - struct virtchnl_del_ena_dis_queues *ena_qs_msg = (struct virtchnl_del_ena_dis_queues *)msg; - struct virtchnl_queue_chunks *chunks = &ena_qs_msg->chunks; enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct virtchnl_del_ena_dis_queues *ena_qs_msg; + struct virtchnl_queue_chunks *chunks; int i; + ena_qs_msg = (struct virtchnl_del_ena_dis_queues *)msg; + chunks = &ena_qs_msg->chunks; + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -8575,9 +5926,11 @@ static int ice_vc_ena_qs_v2_msg(struct ice_vf *vf, u8 *msg) for (i = 0; i < chunks->num_chunks; i++) { struct virtchnl_queue_chunk *chunk = &chunks->chunks[i]; - if (chunk->type == VIRTCHNL_QUEUE_TYPE_RX && ice_vc_ena_rxq_chunk(vf, chunk)) + if (chunk->type == VIRTCHNL_QUEUE_TYPE_RX && + ice_vc_ena_rxq_chunk(vf, chunk)) v_ret = VIRTCHNL_STATUS_ERR_PARAM; - else if (chunk->type == VIRTCHNL_QUEUE_TYPE_TX && ice_vc_ena_txq_chunk(vf, chunk)) + else if (chunk->type == VIRTCHNL_QUEUE_TYPE_TX && + ice_vc_ena_txq_chunk(vf, chunk)) v_ret = VIRTCHNL_STATUS_ERR_PARAM; if (v_ret != VIRTCHNL_STATUS_SUCCESS) @@ -8587,10 +5940,12 @@ static int ice_vc_ena_qs_v2_msg(struct ice_vf *vf, u8 *msg) set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); error_param: - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES_V2, v_ret, NULL, 0); + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES_V2, + v_ret, NULL, 0); } -static int ice_vc_dis_rxq_chunk(struct ice_vf *vf, struct virtchnl_queue_chunk *chunk) +static int +ice_vc_dis_rxq_chunk(struct ice_vf *vf, struct virtchnl_queue_chunk *chunk) { struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; u16 q_id; @@ -8609,7 +5964,8 @@ static int ice_vc_dis_rxq_chunk(struct ice_vf *vf, struct virtchnl_queue_chunk * return 0; } -static int ice_vc_dis_txq_chunk(struct ice_vf *vf, struct virtchnl_queue_chunk *chunk) +static int +ice_vc_dis_txq_chunk(struct ice_vf *vf, struct virtchnl_queue_chunk *chunk) { struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; u16 q_id; @@ -8635,11 +5991,14 @@ static int ice_vc_dis_txq_chunk(struct ice_vf *vf, struct virtchnl_queue_chunk * */ static int ice_vc_dis_qs_v2_msg(struct ice_vf *vf, u8 *msg) { - struct virtchnl_del_ena_dis_queues *dis_qs_msg = (struct virtchnl_del_ena_dis_queues *)msg; - struct virtchnl_queue_chunks *chunks = &dis_qs_msg->chunks; enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct virtchnl_del_ena_dis_queues *dis_qs_msg; + struct virtchnl_queue_chunks *chunks; int i; + dis_qs_msg = (struct virtchnl_del_ena_dis_queues *)msg; + chunks = &dis_qs_msg->chunks; + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -8658,9 +6017,11 @@ static int ice_vc_dis_qs_v2_msg(struct ice_vf *vf, u8 *msg) for (i = 0; i < chunks->num_chunks; i++) { struct virtchnl_queue_chunk *chunk = &chunks->chunks[i]; - if (chunk->type == VIRTCHNL_QUEUE_TYPE_RX && ice_vc_dis_rxq_chunk(vf, chunk)) + if (chunk->type == VIRTCHNL_QUEUE_TYPE_RX && + ice_vc_dis_rxq_chunk(vf, chunk)) v_ret = VIRTCHNL_STATUS_ERR_PARAM; - else if (chunk->type == VIRTCHNL_QUEUE_TYPE_TX && ice_vc_dis_txq_chunk(vf, chunk)) + else if (chunk->type == VIRTCHNL_QUEUE_TYPE_TX && + ice_vc_dis_txq_chunk(vf, chunk)) v_ret = VIRTCHNL_STATUS_ERR_PARAM; if (v_ret != VIRTCHNL_STATUS_SUCCESS) @@ -8671,7 +6032,8 @@ static int ice_vc_dis_qs_v2_msg(struct ice_vf *vf, u8 *msg) clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); error_param: - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES_V2, v_ret, NULL, 0); + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES_V2, + v_ret, NULL, 0); } /** @@ -8679,12 +6041,15 @@ error_param: * @vf: VF the message was received from * @qv_maps: contents of the message from the VF * - * Used to validate VIRTCHNL_OP_MAP_VECTOR messages. This should always be called before attempting - * map interrupts to queues. If all checks succeed, then return success indicating to the caller - * that the qv_maps are valid. Otherwise return false, indicating to the caller that the qv_maps - * are invalid. + * Used to validate VIRTCHNL_OP_MAP_VECTOR messages. This should always be + * called before attempting map interrupts to queues. If all checks succeed, + * then return success indicating to the caller that the qv_maps are valid. + * Otherwise return false, indicating to the caller that the qv_maps are + * invalid. */ -static bool ice_vc_validate_qv_maps(struct ice_vf *vf, struct virtchnl_queue_vector_maps *qv_maps) +static bool +ice_vc_validate_qv_maps(struct ice_vf *vf, + struct virtchnl_queue_vector_maps *qv_maps) { struct ice_vsi *vsi; int i; @@ -8717,11 +6082,13 @@ static bool ice_vc_validate_qv_maps(struct ice_vf *vf, struct virtchnl_queue_vec */ static int ice_vc_map_q_vector_msg(struct ice_vf *vf, u8 *msg) { - struct virtchnl_queue_vector_maps *qv_maps = (struct virtchnl_queue_vector_maps *)msg; enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct virtchnl_queue_vector_maps *qv_maps; struct ice_vsi *vsi; int i; + qv_maps = (struct virtchnl_queue_vector_maps *)msg; + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -8745,17 +6112,31 @@ static int ice_vc_map_q_vector_msg(struct ice_vf *vf, u8 *msg) for (i = 0; i < qv_maps->num_qv_maps; i++) { struct virtchnl_queue_vector *qv_map = &qv_maps->qv_maps[i]; + struct ice_q_vector *q_vector; + u16 msix_id; + + q_vector = vf->vf_ops->get_q_vector(vf, vsi, + qv_map->vector_id); + if (!q_vector) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + msix_id = q_vector->v_idx + vsi->base_vector; if (qv_map->queue_type == VIRTCHNL_QUEUE_TYPE_RX) - ice_cfg_rxq_interrupt(vsi, qv_map->queue_id, qv_map->vector_id, + ice_cfg_rxq_interrupt(vsi, qv_map->queue_id, + msix_id, qv_map->itr_idx); else if (qv_map->queue_type == VIRTCHNL_QUEUE_TYPE_TX) - ice_cfg_txq_interrupt(vsi, qv_map->queue_id, qv_map->vector_id, + ice_cfg_txq_interrupt(vsi, qv_map->queue_id, + msix_id, qv_map->itr_idx); } error_param: - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_MAP_QUEUE_VECTOR, v_ret, NULL, 0); + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_MAP_QUEUE_VECTOR, + v_ret, NULL, 0); } static u16 ice_vc_get_max_vlan_fltrs(struct ice_vf *vf) @@ -8767,8 +6148,11 @@ static u16 ice_vc_get_max_vlan_fltrs(struct ice_vf *vf) } /** - * ice_vf_outer_vlan_not_allowed - check outer VLAN can be used when the device is in DVM + * ice_vf_outer_vlan_not_allowed - check if outer VLAN can be used * @vf: VF that being checked for + * + * When the device is in double VLAN mode, check whether or not the outer VLAN + * is allowed. */ static bool ice_vf_outer_vlan_not_allowed(struct ice_vf *vf) { @@ -9050,9 +6434,11 @@ ice_vc_validate_vlan_filter_list(struct virtchnl_vlan_filtering_caps *vfc, return false; if ((ice_vc_is_valid_vlan(outer) && - !ice_vc_validate_vlan_tpid(filtering_support->outer, outer->tpid)) || + !ice_vc_validate_vlan_tpid(filtering_support->outer, + outer->tpid)) || (ice_vc_is_valid_vlan(inner) && - !ice_vc_validate_vlan_tpid(filtering_support->inner, inner->tpid))) + !ice_vc_validate_vlan_tpid(filtering_support->inner, + inner->tpid))) return false; } @@ -9123,7 +6509,7 @@ ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi, return err; if (vlan_promisc) - ice_vf_dis_vlan_promisc(vsi, &vlan); + ice_vf_dis_vlan_promisc(vf, vsi, &vlan); } vc_vlan = &vlan_fltr->inner; @@ -9140,7 +6526,7 @@ ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi, * we are in Single VLAN Mode (SVM) */ if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc) - ice_vf_dis_vlan_promisc(vsi, &vlan); + ice_vf_dis_vlan_promisc(vf, vsi, &vlan); } } @@ -9213,7 +6599,7 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi, return err; if (vlan_promisc) { - err = ice_vf_ena_vlan_promisc(vsi, &vlan); + err = ice_vf_ena_vlan_promisc(vf, vsi, &vlan); if (err) return err; } @@ -9233,7 +6619,7 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi, * we are in Single VLAN Mode (SVM) */ if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc) { - err = ice_vf_ena_vlan_promisc(vsi, &vlan); + err = ice_vf_ena_vlan_promisc(vf, vsi, &vlan); if (err) return err; } @@ -9258,7 +6644,8 @@ ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi, struct virtchnl_vlan_filtering_caps *vfc, struct virtchnl_vlan_filter_list_v2 *vfl) { - u16 num_requested_filters = vsi->num_vlan + vfl->num_elements; + u16 num_requested_filters = ice_vsi_num_non_zero_vlans(vsi) + + vfl->num_elements; if (num_requested_filters > vfc->max_filters) return false; @@ -9537,7 +6924,8 @@ static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg) vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA; out: - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2, v_ret, NULL, 0); + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2, + v_ret, NULL, 0); } /** @@ -9611,7 +6999,8 @@ static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg) vf->vlan_strip_ena &= ~ICE_INNER_VLAN_STRIP_ENA; out: - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2, v_ret, NULL, 0); + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2, + v_ret, NULL, 0); } /** @@ -9669,7 +7058,8 @@ static int ice_vc_ena_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg) } out: - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2, v_ret, NULL, 0); + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2, + v_ret, NULL, 0); } /** @@ -9723,10 +7113,11 @@ static int ice_vc_dis_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg) } out: - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2, v_ret, NULL, 0); + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2, + v_ret, NULL, 0); } -static struct ice_vc_vf_ops ice_vc_vf_dflt_ops = { +static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = { .get_ver_msg = ice_vc_get_ver_msg, .get_vf_res_msg = ice_vc_get_vf_res_msg, .reset_vf = ice_vc_reset_vf_msg, @@ -9763,9 +7154,16 @@ static struct ice_vc_vf_ops ice_vc_vf_dflt_ops = { .dis_dcf_cap = ice_vc_dis_dcf_cap, .dcf_get_vsi_map = ice_vc_dcf_get_vsi_map, .dcf_query_pkg_info = ice_vc_dcf_query_pkg_info, + .dcf_config_vf_tc = ice_vc_dcf_config_tc, .handle_rss_cfg_msg = ice_vc_handle_rss_cfg, + .get_qos_caps = ice_vc_get_qos_caps, + .cfg_q_tc_map = ice_vc_cfg_q_tc_map, + .cfg_q_bw = ice_vc_cfg_q_bw, + .cfg_q_quanta = ice_vc_cfg_q_quanta, .add_fdir_fltr_msg = ice_vc_add_fdir_fltr, .del_fdir_fltr_msg = ice_vc_del_fdir_fltr, + .flow_sub_fltr_msg = ice_vc_flow_sub_fltr, + .flow_unsub_fltr_msg = ice_vc_flow_unsub_fltr, .get_max_rss_qregion = ice_vc_get_max_rss_qregion, .ena_qs_v2_msg = ice_vc_ena_qs_v2_msg, .dis_qs_v2_msg = ice_vc_dis_qs_v2_msg, @@ -9779,15 +7177,13 @@ static struct ice_vc_vf_ops ice_vc_vf_dflt_ops = { .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg, }; -void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops) +/** + * ice_virtchnl_set_dflt_ops - Switch to default virtchnl ops + * @vf: the VF to switch ops + */ +void ice_virtchnl_set_dflt_ops(struct ice_vf *vf) { - *ops = ice_vc_vf_dflt_ops; -} - -static int ice_vc_repr_no_action_msg(struct ice_vf __always_unused *vf, - u8 __always_unused *msg) -{ - return 0; + vf->virtchnl_ops = &ice_virtchnl_dflt_ops; } /** @@ -9824,18 +7220,23 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg) for (i = 0; i < al->num_elements; i++) { u8 *mac_addr = al->list[i].addr; + int result; if (!is_unicast_ether_addr(mac_addr) || ether_addr_equal(mac_addr, vf->hw_lan_addr.addr)) continue; if (vf->pf_set_mac) { - dev_err(ice_pf_to_dev(pf), - "VF attempting to override administratively set MAC address\n"); + dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n"); v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; goto handle_mac_exit; } - + result = ice_eswitch_add_vf_mac_rule(pf, vf, mac_addr); + if (result) { + dev_err(ice_pf_to_dev(pf), "Failed to add MAC %pM for VF %d\n, error %d\n", + mac_addr, vf->vf_id, result); + goto handle_mac_exit; + } ice_vfhw_mac_add(vf, &al->list[i]); vf->num_mac++; @@ -9853,28 +7254,98 @@ handle_mac_exit: * @msg: virtchannel message * * Respond with success to not break normal VF flow. + * For legacy VF driver try to update cached MAC address. */ -static int ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, - u8 __always_unused *msg) +static int +ice_vc_repr_del_mac(struct ice_vf *vf, u8 *msg) { + struct virtchnl_ether_addr_list *al = + (struct virtchnl_ether_addr_list *)msg; + + ice_update_legacy_cached_mac(vf, &al->list[0]); + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, VIRTCHNL_STATUS_SUCCESS, NULL, 0); } -static int ice_vc_repr_no_action(struct ice_vf __always_unused *vf) +static int +ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg) { - return 0; + dev_dbg(ice_pf_to_dev(vf->pf), + "Can't config promiscuous mode in switchdev mode for VF %d\n", + vf->vf_id); + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, + NULL, 0); } -void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops) +static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = { + .get_ver_msg = ice_vc_get_ver_msg, + .get_vf_res_msg = ice_vc_get_vf_res_msg, + .reset_vf = ice_vc_reset_vf_msg, + .add_mac_addr_msg = ice_vc_repr_add_mac, + .del_mac_addr_msg = ice_vc_repr_del_mac, + .cfg_qs_msg = ice_vc_cfg_qs_msg, + .ena_qs_msg = ice_vc_ena_qs_msg, + .dis_qs_msg = ice_vc_dis_qs_msg, + .request_qs_msg = ice_vc_request_qs_msg, + .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg, + .config_rss_key = ice_vc_config_rss_key, + .config_rss_lut = ice_vc_config_rss_lut, + .get_stats_msg = ice_vc_get_stats_msg, + .cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode, + .add_vlan_msg = ice_vc_add_vlan_msg, + .remove_vlan_msg = ice_vc_remove_vlan_msg, + .query_rxdid = ice_vc_query_rxdid, + .get_rss_hena = ice_vc_get_rss_hena, + .set_rss_hena_msg = ice_vc_set_rss_hena, + .ena_vlan_stripping = ice_vc_ena_vlan_stripping, + .dis_vlan_stripping = ice_vc_dis_vlan_stripping, +#ifdef HAVE_TC_SETUP_CLSFLOWER + .add_qch_msg = ice_vc_add_qch_msg, + .add_switch_filter_msg = ice_vc_add_switch_filter, + .del_switch_filter_msg = ice_vc_del_switch_filter, + .del_qch_msg = ice_vc_del_qch_msg, +#endif /* HAVE_TC_SETUP_CLSFLOWER */ + .rdma_msg = ice_vc_rdma_msg, + .cfg_rdma_irq_map_msg = ice_vc_cfg_rdma_irq_map_msg, + .clear_rdma_irq_map = ice_vc_clear_rdma_irq_map, + .dcf_vlan_offload_msg = ice_vc_dcf_vlan_offload_msg, + .dcf_cmd_desc_msg = ice_vc_dcf_cmd_desc_msg, + .dcf_cmd_buff_msg = ice_vc_dcf_cmd_buff_msg, + .dis_dcf_cap = ice_vc_dis_dcf_cap, + .dcf_get_vsi_map = ice_vc_dcf_get_vsi_map, + .dcf_query_pkg_info = ice_vc_dcf_query_pkg_info, + .dcf_config_vf_tc = ice_vc_dcf_config_tc, + .handle_rss_cfg_msg = ice_vc_handle_rss_cfg, + .get_qos_caps = ice_vc_get_qos_caps, + .cfg_q_tc_map = ice_vc_cfg_q_tc_map, + .cfg_q_bw = ice_vc_cfg_q_bw, + .cfg_q_quanta = ice_vc_cfg_q_quanta, + .add_fdir_fltr_msg = ice_vc_add_fdir_fltr, + .del_fdir_fltr_msg = ice_vc_del_fdir_fltr, + .flow_sub_fltr_msg = ice_vc_flow_sub_fltr, + .flow_unsub_fltr_msg = ice_vc_flow_unsub_fltr, + .get_max_rss_qregion = ice_vc_get_max_rss_qregion, + .ena_qs_v2_msg = ice_vc_ena_qs_v2_msg, + .dis_qs_v2_msg = ice_vc_dis_qs_v2_msg, + .map_q_vector_msg = ice_vc_map_q_vector_msg, + .get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps, + .add_vlan_v2_msg = ice_vc_add_vlan_v2_msg, + .remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg, + .ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg, + .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg, + .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg, + .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg, +}; + +/** + * ice_virtchnl_set_repr_ops - Switch to representor virtchnl ops + * @vf: the VF to switch ops + */ +void ice_virtchnl_set_repr_ops(struct ice_vf *vf) { - ops->add_mac_addr_msg = ice_vc_repr_add_mac; - ops->del_mac_addr_msg = ice_vc_repr_del_mac; - ops->add_vlan_msg = ice_vc_repr_no_action_msg; - ops->remove_vlan_msg = ice_vc_repr_no_action_msg; - ops->ena_vlan_stripping = ice_vc_repr_no_action; - ops->dis_vlan_stripping = ice_vc_repr_no_action; - ops->cfg_promiscuous_mode_msg = ice_vc_repr_no_action_msg; + vf->virtchnl_ops = &ice_virtchnl_repr_ops; } /** @@ -9889,27 +7360,31 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) { u32 v_opcode = le32_to_cpu(event->desc.cookie_high); s16 vf_id = le16_to_cpu(event->desc.retval); + const struct ice_virtchnl_ops *ops; u16 msglen = event->msg_len; - struct ice_vc_vf_ops *ops; u8 *msg = event->msg_buf; struct ice_vf *vf = NULL; struct device *dev; int err = 0; dev = ice_pf_to_dev(pf); - if (ice_validate_vf_id(pf, vf_id)) { - err = -EINVAL; - goto error_handler; + + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) { + dev_err(dev, "Unable to locate VF for message from VF ID %d, opcode %d, len %d\n", + vf_id, v_opcode, msglen); + return; } - vf = &pf->vf[vf_id]; + mutex_lock(&vf->cfg_lock); + /* Check if VF is disabled. */ if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) { err = -EPERM; goto error_handler; } - ops = &vf->vc_ops; + ops = vf->virtchnl_ops; /* Perform basic checks on the msg */ err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); @@ -9920,20 +7395,21 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) err = -EINVAL; } - if (!ice_vc_is_opcode_allowed(vf, v_opcode)) { - ice_vc_send_msg_to_vf(vf, v_opcode, - VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL, - 0); - return; - } - error_handler: if (err) { ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM, NULL, 0); - dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n", - vf_id, v_opcode, msglen, err); - return; + ice_dev_err_errno(dev, err, + "Invalid message from VF %d, opcode %d, len %d", + vf_id, v_opcode, msglen); + goto finish; + } + + if (!ice_vc_is_opcode_allowed(vf, v_opcode)) { + ice_vc_send_msg_to_vf(vf, v_opcode, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL, + 0); + goto finish; } switch (v_opcode) { @@ -10037,9 +7513,6 @@ error_handler: case VIRTCHNL_OP_DCF_CMD_BUFF: err = ops->dcf_cmd_buff_msg(vf, msg, msglen); break; - case VIRTCHNL_OP_DCF_RULE_FLUSH: - err = ice_vc_flush_dcf_rule(vf); - break; case VIRTCHNL_OP_DCF_DISABLE: err = ops->dis_dcf_cap(vf); break; @@ -10049,18 +7522,39 @@ error_handler: case VIRTCHNL_OP_DCF_GET_PKG_INFO: err = ops->dcf_query_pkg_info(vf); break; + case VIRTCHNL_OP_DCF_CONFIG_BW: + err = ops->dcf_config_vf_tc(vf, msg); + break; case VIRTCHNL_OP_ADD_RSS_CFG: err = ops->handle_rss_cfg_msg(vf, msg, true); break; case VIRTCHNL_OP_DEL_RSS_CFG: err = ops->handle_rss_cfg_msg(vf, msg, false); break; + case VIRTCHNL_OP_GET_QOS_CAPS: + err = ops->get_qos_caps(vf); + break; + case VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP: + err = ops->cfg_q_tc_map(vf, msg); + break; + case VIRTCHNL_OP_CONFIG_QUEUE_BW: + err = ops->cfg_q_bw(vf, msg); + break; + case VIRTCHNL_OP_CONFIG_QUANTA: + err = ops->cfg_q_quanta(vf, msg); + break; case VIRTCHNL_OP_ADD_FDIR_FILTER: err = ops->add_fdir_fltr_msg(vf, msg); break; case VIRTCHNL_OP_DEL_FDIR_FILTER: err = ops->del_fdir_fltr_msg(vf, msg); break; + case VIRTCHNL_OP_FLOW_SUBSCRIBE: + err = ops->flow_sub_fltr_msg(vf, msg); + break; + case VIRTCHNL_OP_FLOW_UNSUBSCRIBE: + err = ops->flow_unsub_fltr_msg(vf, msg); + break; case VIRTCHNL_OP_GET_MAX_RSS_QREGION: err = ops->get_max_rss_qregion(vf); break; @@ -10111,684 +7605,8 @@ error_handler: dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n", vf_id, v_opcode, err); } -} - -/** - * ice_get_vf_cfg - * @netdev: network interface device structure - * @vf_id: VF identifier - * @ivi: VF configuration structure - * - * return VF configuration - */ -int -ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) -{ - struct ice_pf *pf = ice_netdev_to_pf(netdev); - struct ice_vf *vf; - int ret; - - if (ice_validate_vf_id(pf, vf_id)) - return -EINVAL; - - vf = &pf->vf[vf_id]; - ret = ice_check_vf_ready_for_cfg(vf); - if (ret) - return ret; - - ivi->vf = vf_id; - ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr); - - /* VF configuration for VLAN and applicable QoS */ - ivi->vlan = ice_vf_get_port_vlan_id(vf); - ivi->qos = ice_vf_get_port_vlan_prio(vf); -#ifdef IFLA_VF_VLAN_INFO_MAX - if (ice_vf_is_port_vlan_ena(vf)) - ivi->vlan_proto = cpu_to_be16(ice_vf_get_port_vlan_tpid(vf)); -#endif /* IFLA_VF_VLAN_INFO_MAX */ - -#ifdef HAVE_NDO_SET_VF_TRUST - ivi->trusted = vf->trusted; -#endif /* HAVE_NDO_SET_VF_TRUST */ - ivi->spoofchk = vf->spoofchk; -#ifdef HAVE_NDO_SET_VF_LINK_STATE - if (!vf->link_forced) - ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; - else if (vf->link_up) - ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; - else - ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; -#endif -#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE - ivi->max_tx_rate = vf->max_tx_rate; - ivi->min_tx_rate = vf->min_tx_rate; -#else - ivi->tx_rate = vf->max_tx_rate; -#endif - return 0; -} - -/** - * ice_set_vf_mac - * @netdev: network interface device structure - * @vf_id: VF identifier - * @mac: MAC address - * - * program VF MAC address - */ -int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) -{ - struct ice_pf *pf = ice_netdev_to_pf(netdev); - struct ice_vf *vf; - int ret; - - if (ice_validate_vf_id(pf, vf_id)) - return -EINVAL; - - if (is_multicast_ether_addr(mac)) { - netdev_err(netdev, "%pM not a valid unicast address\n", mac); - return -EINVAL; - } - - vf = &pf->vf[vf_id]; - /* nothing left to do, unicast MAC already set */ - if (ether_addr_equal(vf->dev_lan_addr.addr, mac) && - ether_addr_equal(vf->hw_lan_addr.addr, mac)) - return 0; - - ret = ice_check_vf_ready_for_cfg(vf); - if (ret) - return ret; - - if (ice_vf_chnl_dmac_fltr_cnt(vf)) { - netdev_err(netdev, - "can't set mac %pM. VF %d has tc-flower filters, delete them and try again\n", - mac, vf_id); - return -EAGAIN; - } - - /* VF is notified of its new MAC via the PF's response to the - * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset - */ - ether_addr_copy(vf->dev_lan_addr.addr, mac); - ether_addr_copy(vf->hw_lan_addr.addr, mac); - if (is_zero_ether_addr(mac)) { - /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */ - vf->pf_set_mac = false; - netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n", - vf->vf_id); - } else { - /* PF will add MAC rule for the VF */ - vf->pf_set_mac = true; - netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n", - mac, vf_id); - } - - ice_vc_reset_vf(vf); - return 0; -} - -/** - * ice_set_vf_trust - * @netdev: network interface device structure - * @vf_id: VF identifier - * @trusted: Boolean value to enable/disable trusted VF - * - * Enable or disable a given VF as trusted - */ -int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) -{ - struct ice_pf *pf = ice_netdev_to_pf(netdev); - struct ice_vf *vf; - int ret; - - if (ice_is_eswitch_mode_switchdev(pf)) { - dev_info(ice_pf_to_dev(pf), - "Trusted VF is forbidden in switchdev mode\n"); - return -EOPNOTSUPP; - } - - if (ice_validate_vf_id(pf, vf_id)) - return -EINVAL; - - vf = &pf->vf[vf_id]; - ret = ice_check_vf_ready_for_cfg(vf); - if (ret) - return ret; - - /* Check if already trusted */ - if (trusted == vf->trusted) - return 0; - -#ifdef HAVE_NDO_SET_VF_TRUST - /* If the trust mode of a given DCF is taken away without the DCF - * gracefully relinquishing the DCF functionality, remove ALL switch - * filters that were added by the DCF and treat this VF as any other - * untrusted AVF. - */ - if (ice_is_vf_dcf(vf) && !trusted && - ice_dcf_get_state(pf) != ICE_DCF_STATE_OFF) { - ice_rm_all_dcf_sw_rules(pf); - ice_clear_dcf_acl_cfg(pf); - ice_clear_dcf_udp_tunnel_cfg(pf); - pf->hw.dcf_caps &= ~(DCF_ACL_CAP | DCF_UDP_TUNNEL_CAP); - ice_dcf_set_state(pf, ICE_DCF_STATE_OFF); - pf->dcf.vf = NULL; - vf->driver_caps &= ~VIRTCHNL_VF_CAP_DCF; - } - - ice_vc_reset_vf(vf); -#endif /* HAVE_NDO_SET_VF_TRUST */ - vf->trusted = trusted; - dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n", vf_id, - trusted ? "" : "un"); - - return 0; -} - -#ifdef HAVE_NDO_SET_VF_LINK_STATE -/** - * ice_set_vf_link_state - * @netdev: network interface device structure - * @vf_id: VF identifier - * @link_state: required link state - * - * Set VF's link state, irrespective of physical link state status - */ -int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) -{ - struct ice_pf *pf = ice_netdev_to_pf(netdev); - struct ice_vf *vf; - int ret; - - if (ice_validate_vf_id(pf, vf_id)) - return -EINVAL; - - vf = &pf->vf[vf_id]; - ret = ice_check_vf_ready_for_cfg(vf); - if (ret) - return ret; - - /* disallow link state change if eeprom is corrupted */ - if (test_bit(ICE_BAD_EEPROM, pf->state)) - return -EOPNOTSUPP; - - switch (link_state) { - case IFLA_VF_LINK_STATE_AUTO: - vf->link_forced = false; - break; - case IFLA_VF_LINK_STATE_ENABLE: - vf->link_forced = true; - vf->link_up = true; - break; - case IFLA_VF_LINK_STATE_DISABLE: - vf->link_forced = true; - vf->link_up = false; - break; - default: - return -EINVAL; - } - - if (vf->repr) { - struct net_device *pr_netdev = vf->repr->netdev; - unsigned int flags = pr_netdev->flags; - - flags = vf->link_up ? flags | IFF_UP : flags & ~IFF_UP; - dev_change_flags(pr_netdev, flags, NULL); - } - - ice_vc_notify_vf_link_state(vf); - - return 0; -} -#endif /* HAVE_NDO_SET_VF_LINK_STATE */ - -#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE -/** - * ice_calc_all_vfs_min_tx_rate - calculate cummulative min Tx rate on all VFs - * @pf: PF associated with VFs - */ -static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf) -{ - int rate = 0, i; - - ice_for_each_vf(pf, i) - rate += pf->vf[i].min_tx_rate; - - return rate; -} - -/** - * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription - * @vf: VF trying to configure min_tx_rate - * @min_tx_rate: min Tx rate in Mbps - * - * Check if the min_tx_rate being passed in will cause oversubscription of total - * min_tx_rate based on the current link speed and all other VFs configured - * min_tx_rate - * - * Return true if the passed min_tx_rate would cause oversubscription, else - * return false - */ -static bool -ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate) -{ - int link_speed_mbps = ice_get_link_speed_mbps(ice_get_vf_vsi(vf)); - int all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf); - - /* this VF's previous rate is being overwritten */ - all_vfs_min_tx_rate -= vf->min_tx_rate; - - if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) { - dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n", - min_tx_rate, vf->vf_id, - all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps, - link_speed_mbps); - return true; - } - - return false; -} -#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ - -#ifdef HAVE_TC_SETUP_CLSFLOWER -/** - * ice_vf_adq_total_max_tx_rate - cummulative max_tx_rate when VF ADQ is enabled - * @vf: Pointer to VF - * - * This function cummulative max Tx rate of all TCs if VF ADQ is enabled - */ -static u64 ice_vf_adq_total_max_tx_rate(struct ice_vf *vf) -{ - u64 cummulative_max_tx_rate = 0; - int i; - - if (!ice_is_vf_adq_ena(vf)) - return 0; - - for (i = 0; i < vf->num_tc; i++) - cummulative_max_tx_rate += vf->ch[i].max_tx_rate; - - return cummulative_max_tx_rate; -} -#endif /* HAVE_TC_SETUP_CLSFLOWER */ - -/** - * ice_set_vf_bw - set min/max VF bandwidth - * @netdev: network interface device structure - * @vf_id: VF identifier - * @min_tx_rate: Minimum Tx rate in Mbps - * @max_tx_rate: Maximum Tx rate in Mbps - */ -#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE -int -ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, - int max_tx_rate) -#else -int ice_set_vf_bw(struct net_device *netdev, int vf_id, int max_tx_rate) -#endif -{ - struct ice_pf *pf = ice_netdev_to_pf(netdev); - struct ice_vsi *vsi; - struct device *dev; - struct ice_vf *vf; - int ret; - - dev = ice_pf_to_dev(pf); - if (ice_validate_vf_id(pf, vf_id)) - return -EINVAL; - - vf = &pf->vf[vf_id]; - ret = ice_check_vf_ready_for_cfg(vf); - if (ret) - return ret; - - vsi = ice_get_vf_vsi(vf); - -#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE - /* when max_tx_rate is zero that means no max Tx rate limiting, so only - * check if max_tx_rate is non-zero - */ - if (max_tx_rate && min_tx_rate > max_tx_rate) { - dev_err(dev, "Cannot set min Tx rate %d Mbps greater than max Tx rate %d Mbps\n", - min_tx_rate, max_tx_rate); - return -EINVAL; - } - -#ifdef NETIF_F_HW_TC - if (min_tx_rate && ice_is_adq_active(pf)) { - dev_err(dev, "ADQ on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n"); - return -EOPNOTSUPP; - } -#endif /* NETIF_F_HW_TC */ - - if (min_tx_rate && ice_is_dcb_active(pf)) { - dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n"); - return -EOPNOTSUPP; - } - - if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) - return -EINVAL; - - if (vf->min_tx_rate != (unsigned int)min_tx_rate) { - ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000); - if (ret) { - dev_err(dev, "Unable to set min-tx-rate for VF %d\n", - vf->vf_id); - return ret; - } - - vf->min_tx_rate = min_tx_rate; - } - -#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ - if (vf->max_tx_rate != (unsigned int)max_tx_rate) { -#ifdef HAVE_TC_SETUP_CLSFLOWER - u64 adq_max_tx_rate; -#endif - ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000); - if (ret) { - dev_err(dev, "Unable to set max-tx-rate for VF %d\n", - vf->vf_id); - return ret; - } - - vf->max_tx_rate = max_tx_rate; -#ifdef HAVE_TC_SETUP_CLSFLOWER - adq_max_tx_rate = ice_vf_adq_total_max_tx_rate(vf); - if (vf->max_tx_rate < adq_max_tx_rate) - dev_warn(dev, "Host managed max_tx_rate %u Mpbs for VF %d is less VF ADQ cummulative max_tx_rate %llu Mpbs\n", - vf->vf_id, vf->max_tx_rate, adq_max_tx_rate); -#endif - } - - return 0; -} - -#ifdef HAVE_VF_STATS -/** - * ice_get_vf_stats - populate some stats for the VF - * @netdev: the netdev of the PF - * @vf_id: the host OS identifier (0-255) - * @vf_stats: pointer to the OS memory to be initialized - */ -int ice_get_vf_stats(struct net_device *netdev, int vf_id, - struct ifla_vf_stats *vf_stats) -{ - struct ice_pf *pf = ice_netdev_to_pf(netdev); - struct ice_eth_stats *stats; - struct ice_vsi *vsi; - struct ice_vf *vf; - int ret; - - if (ice_validate_vf_id(pf, vf_id)) - return -EINVAL; - - vf = &pf->vf[vf_id]; - ret = ice_check_vf_ready_for_cfg(vf); - if (ret) - return ret; - - vsi = ice_get_vf_vsi(vf); - if (!vsi) - return -EINVAL; - - ice_update_eth_stats(vsi); - stats = &vsi->eth_stats; - - memset(vf_stats, 0, sizeof(*vf_stats)); - - vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast + - stats->rx_multicast; - vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast + - stats->tx_multicast; - vf_stats->rx_bytes = stats->rx_bytes; - vf_stats->tx_bytes = stats->tx_bytes; - vf_stats->broadcast = stats->rx_broadcast; - vf_stats->multicast = stats->rx_multicast; -#ifdef HAVE_VF_STATS_DROPPED - vf_stats->rx_dropped = stats->rx_discards; - vf_stats->tx_dropped = stats->tx_discards; -#endif - - return 0; -} -#endif /* HAVE_VF_STATS */ - -/** - * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event - * @vf: pointer to the VF structure - */ -void ice_print_vf_rx_mdd_event(struct ice_vf *vf) -{ - struct ice_pf *pf = vf->pf; - struct device *dev; - - dev = ice_pf_to_dev(pf); - - dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n", - vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id, - vf->dev_lan_addr.addr, - test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags) - ? "on" : "off"); -} - -/** - * ice_print_vfs_mdd_events - print VFs malicious driver detect event - * @pf: pointer to the PF structure - * - * Called from ice_handle_mdd_event to rate limit and print VFs MDD events. - */ -void ice_print_vfs_mdd_events(struct ice_pf *pf) -{ - struct device *dev = ice_pf_to_dev(pf); - struct ice_hw *hw = &pf->hw; - int i; - - /* check that there are pending MDD events to print */ - if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state)) - return; - - /* VF MDD event logs are rate limited to one second intervals */ - if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1)) - return; - - pf->last_printed_mdd_jiffies = jiffies; - - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; - - /* only print Rx MDD event message if there are new events */ - if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) { - vf->mdd_rx_events.last_printed = - vf->mdd_rx_events.count; - ice_print_vf_rx_mdd_event(vf); - } - - /* only print Tx MDD event message if there are new events */ - if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) { - vf->mdd_tx_events.last_printed = - vf->mdd_tx_events.count; - - dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n", - vf->mdd_tx_events.count, hw->pf_id, i, - vf->dev_lan_addr.addr); - } - } -} - -/** - * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR - * @pdev: pointer to a pci_dev structure - * - * Called when recovering from a PF FLR to restore interrupt capability to - * the VFs. - */ -void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) -{ - u16 vf_id; - int pos; - - if (!pci_num_vf(pdev)) - return; - - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); - if (pos) { - struct pci_dev *vfdev; - - pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, - &vf_id); - vfdev = pci_get_device(pdev->vendor, vf_id, NULL); - while (vfdev) { - if (vfdev->is_virtfn && vfdev->physfn == pdev) - pci_restore_msi_state(vfdev); - vfdev = pci_get_device(pdev->vendor, vf_id, - vfdev); - } - } -} - -/** - * ice_is_malicious_vf - helper function to detect a malicious VF - * @pf: ptr to struct ice_pf - * @event: pointer to the AQ event - * @num_msg_proc: the number of messages processed so far - * @num_msg_pending: the number of messages peinding in admin queue - */ -bool -ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event, - u16 num_msg_proc, u16 num_msg_pending) -{ - s16 vf_id = le16_to_cpu(event->desc.retval); - struct device *dev = ice_pf_to_dev(pf); - struct ice_mbx_data mbxdata; - enum ice_status status; - bool malvf = false; - struct ice_vf *vf; - - if (ice_validate_vf_id(pf, vf_id)) - return false; - - vf = &pf->vf[vf_id]; - /* Check if VF is disabled. */ - if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) - return false; - - mbxdata.num_msg_proc = num_msg_proc; - mbxdata.num_pending_arq = num_msg_pending; - mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries; -#define ICE_MBX_OVERFLOW_WATERMARK 64 - mbxdata.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK; - - /* check to see if we have a malicious VF */ - status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf); - if (status) - return false; - - if (malvf) { - bool report_vf = false; - - /* if the VF is malicious and we haven't let the user - * know about it, then let them know now - */ - status = ice_mbx_report_malvf(&pf->hw, pf->malvfs, - ICE_MAX_VF_COUNT, vf_id, - &report_vf); - if (status) - dev_dbg(dev, "Error reporting malicious VF\n"); - - if (report_vf) { - struct ice_vsi *pf_vsi = ice_get_main_vsi(pf); - - if (pf_vsi) - dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n", - &vf->dev_lan_addr.addr[0], - pf_vsi->netdev->dev_addr); - } - - return true; - } - - /* if there was an error in detection or the VF is not malicious then - * return false - */ - return false; -} - -static void ice_dump_vf(struct ice_vf *vf) -{ - struct ice_vsi *vsi; - struct device *dev; - struct ice_pf *pf; - - if (!vf) - return; - - pf = vf->pf; - vsi = ice_get_vf_vsi(vf); - if (!vsi) - return; - - dev = ice_pf_to_dev(pf); - dev_info(dev, "VF[%d]:\n", vf->vf_id); - dev_info(dev, "\tvf_ver.major = %d vf_ver.minor = %d\n", - vf->vf_ver.major, vf->vf_ver.minor); - dev_info(dev, "\tdriver_caps = 0x%08x\n", vf->driver_caps); - dev_info(dev, "\tvf_caps = 0x%08lx\n", vf->vf_caps); - dev_info(dev, "\tvf_states:\n"); - if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) - dev_info(dev, "\t\tICE_VF_STATE_INIT\n"); - if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) - dev_info(dev, "\t\tICE_VF_STATE_ACTIVE\n"); - if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) - dev_info(dev, "\t\tICE_VF_STATE_QS_ENA\n"); - if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) - dev_info(dev, "\t\tICE_VF_STATE_MC_PROMISC\n"); - if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) - dev_info(dev, "\t\tICE_VF_STATE_UC_PROMISC\n"); - dev_info(dev, "\tvsi = %pK, vsi->idx = %d, vsi->vsi_num = %d\n", - vsi, vsi->idx, vsi->vsi_num); - dev_info(dev, "\tlan_vsi_idx = %d\n", vf->lan_vsi_idx); - dev_info(dev, "\tlan_vsi_num = %d\n", vf->lan_vsi_num); - dev_info(dev, "\tnum_mac = %d\n", vf->num_mac); - dev_info(dev, "\tdev_lan_addr = %pM\n", &vf->dev_lan_addr.addr[0]); - dev_info(dev, "\thw_lan_addr = %pM\n", &vf->hw_lan_addr.addr[0]); - dev_info(dev, "\tnum_req_qs = %d\n", vf->num_req_qs); - dev_info(dev, "\trxq_ena = 0x%lx\n", *vf->rxq_ena); - dev_info(dev, "\ttxq_ena = 0x%lx\n", *vf->txq_ena); - dev_info(dev, "\tPort VLAN status: %s\n", - ice_vf_is_port_vlan_ena(vf) ? "enabled" : "disabled"); - dev_info(dev, "\t\tPort VLAN ID = %d\n", ice_vf_get_port_vlan_id(vf)); - dev_info(dev, "\t\tQoS = %d\n", ice_vf_get_port_vlan_prio(vf)); - dev_info(dev, "\t\tTPID = 0x%x", ice_vf_get_port_vlan_tpid(vf)); - dev_info(dev, "\tpf_set_mac = %s\n", vf->pf_set_mac ? "true" : "false"); - dev_info(dev, "\ttrusted = %s\n", vf->trusted ? "true" : "false"); - dev_info(dev, "\tspoofchk = %s\n", vf->spoofchk ? "true" : "false"); -#ifdef HAVE_NDO_SET_VF_LINK_STATE - dev_info(dev, "\tlink_forced = %s, link_up (only valid when link_forced is true) = %s\n", - vf->link_forced ? "true" : "false", - vf->link_up ? "true" : "false"); -#endif - dev_info(dev, "\tmax_tx_rate = %d\n", vf->max_tx_rate); - dev_info(dev, "\tmin_tx_rate = %d\n", vf->min_tx_rate); - dev_info(dev, "\tnum_inval_msgs = %lld\n", vf->num_inval_msgs); - dev_info(dev, "\tnum_valid_msgs = %lld\n", vf->num_valid_msgs); - dev_info(dev, "\tmdd_rx_events = %u\n", vf->mdd_rx_events.count); - dev_info(dev, "\tmdd_tx_events = %u\n", vf->mdd_tx_events.count); - dev_info(dev, "\tfirst_vector_idx = %d\n", vf->first_vector_idx); - dev_info(dev, "\tvf_sw_id = %pK\n", vf->vf_sw_id); - dev_info(dev, "\tadq_enabled = %s\n", - vf->adq_enabled ? "true" : "false"); - dev_info(dev, "\tadq_fltr_ena = %s\n", - vf->adq_fltr_ena ? "true" : "false"); - dev_info(dev, "\tnum_tc = %u\n", vf->num_tc); - dev_info(dev, "\tnum_dmac_chnl_fltrs = %u\n", vf->num_dmac_chnl_fltrs); -} - -void ice_dump_all_vfs(struct ice_pf *pf) -{ - u16 v; - - ice_for_each_vf(pf, v) - ice_dump_vf(&pf->vf[v]); + +finish: + mutex_unlock(&vf->cfg_lock); + ice_put_vf(vf); } diff --git a/drivers/thirdparty/ice/ice_virtchnl.h b/drivers/thirdparty/ice/ice_virtchnl.h new file mode 100644 index 000000000000..e71dd4fc23cd --- /dev/null +++ b/drivers/thirdparty/ice/ice_virtchnl.h @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_VIRTCHNL_H_ +#define _ICE_VIRTCHNL_H_ + +#include +#include +#include +#include "virtchnl.h" +#include "ice_vf_lib.h" + +/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */ +#define ICE_MAX_VLAN_PER_VF 8 + +/* MAC filters: 1 is reserved for the VF's default/perm_addr/LAA MAC, 1 for + * broadcast, and 16 for additional unicast/multicast filters + */ +#define ICE_MAX_MACADDR_PER_VF 18 + +/* Maximum number of queue pairs to configure by default for a VF */ +#define ICE_MAX_DFLT_QS_PER_VF 16 + +#define ICE_MAX_RSS_QS_PER_LARGE_VF 64 +#define ICE_MAX_RSS_QS_PER_VF 16 + +/* Max number of flexible descriptor rxdid */ +#define ICE_FLEX_DESC_RXDID_MAX_NUM 64 + +#define ICE_DFLT_QUANTA 1024 +#define ICE_MAX_QUANTA_SIZE 4096 +#define ICE_MIN_QUANTA_SIZE 256 +#define calc_quanta_desc(x) \ + max_t(u16, 12, min_t(u16, 63, ((x + 66) / 132) * 2 + 4)) + +#define ICE_VF_UCAST_PROMISC_BITS ICE_PROMISC_UCAST_RX +#define ICE_VF_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_RX | \ + ICE_PROMISC_VLAN_RX) + +struct ice_virtchnl_ops { + int (*get_ver_msg)(struct ice_vf *vf, u8 *msg); + int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg); + void (*reset_vf)(struct ice_vf *vf); + int (*add_mac_addr_msg)(struct ice_vf *vf, u8 *msg); + int (*del_mac_addr_msg)(struct ice_vf *vf, u8 *msg); + int (*cfg_qs_msg)(struct ice_vf *vf, u8 *msg); + int (*ena_qs_msg)(struct ice_vf *vf, u8 *msg); + int (*dis_qs_msg)(struct ice_vf *vf, u8 *msg); + int (*request_qs_msg)(struct ice_vf *vf, u8 *msg); + int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg); + int (*config_rss_key)(struct ice_vf *vf, u8 *msg); + int (*config_rss_lut)(struct ice_vf *vf, u8 *msg); + int (*get_stats_msg)(struct ice_vf *vf, u8 *msg); + int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg); + int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg); + int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg); + int (*query_rxdid)(struct ice_vf *vf); + int (*get_rss_hena)(struct ice_vf *vf); + int (*set_rss_hena_msg)(struct ice_vf *vf, u8 *msg); + int (*ena_vlan_stripping)(struct ice_vf *vf); + int (*dis_vlan_stripping)(struct ice_vf *vf); +#ifdef HAVE_TC_SETUP_CLSFLOWER + int (*add_qch_msg)(struct ice_vf *vf, u8 *msg); + int (*add_switch_filter_msg)(struct ice_vf *vf, u8 *msg); + int (*del_switch_filter_msg)(struct ice_vf *vf, u8 *msg); + int (*del_qch_msg)(struct ice_vf *vf, u8 *msg); +#endif /* HAVE_TC_SETUP_CLSFLOWER */ + int (*rdma_msg)(struct ice_vf *vf, u8 *msg, u16 msglen); + int (*cfg_rdma_irq_map_msg)(struct ice_vf *vf, u8 *msg); + int (*clear_rdma_irq_map)(struct ice_vf *vf); + int (*dcf_vlan_offload_msg)(struct ice_vf *vf, u8 *msg); + int (*dcf_cmd_desc_msg)(struct ice_vf *vf, u8 *msg, u16 msglen); + int (*dcf_cmd_buff_msg)(struct ice_vf *vf, u8 *msg, u16 msglen); + int (*dis_dcf_cap)(struct ice_vf *vf); + int (*dcf_get_vsi_map)(struct ice_vf *vf); + int (*dcf_query_pkg_info)(struct ice_vf *vf); + int (*dcf_config_vf_tc)(struct ice_vf *vf, u8 *msg); + int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add); + int (*get_qos_caps)(struct ice_vf *vf); + int (*cfg_q_tc_map)(struct ice_vf *vf, u8 *msg); + int (*cfg_q_bw)(struct ice_vf *vf, u8 *msg); + int (*cfg_q_quanta)(struct ice_vf *vf, u8 *msg); + int (*add_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg); + int (*del_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg); + int (*flow_sub_fltr_msg)(struct ice_vf *vf, u8 *msg); + int (*flow_unsub_fltr_msg)(struct ice_vf *vf, u8 *msg); + int (*get_max_rss_qregion)(struct ice_vf *vf); + int (*ena_qs_v2_msg)(struct ice_vf *vf, u8 *msg); + int (*dis_qs_v2_msg)(struct ice_vf *vf, u8 *msg); + int (*map_q_vector_msg)(struct ice_vf *vf, u8 *msg); + int (*get_offload_vlan_v2_caps)(struct ice_vf *vf); + int (*add_vlan_v2_msg)(struct ice_vf *vf, u8 *msg); + int (*remove_vlan_v2_msg)(struct ice_vf *vf, u8 *msg); + int (*ena_vlan_stripping_v2_msg)(struct ice_vf *vf, u8 *msg); + int (*dis_vlan_stripping_v2_msg)(struct ice_vf *vf, u8 *msg); + int (*ena_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg); + int (*dis_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg); +}; + +/** + * ice_vc_get_max_chnl_tc_allowed + * @vf: pointer to the VF info + * + * This function returns max channel TC allowed depends upon "driver_caps" + */ +static inline u32 ice_vc_get_max_chnl_tc_allowed(struct ice_vf *vf) +{ + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ_V2) + return VIRTCHNL_MAX_ADQ_V2_CHANNELS; + else + return VIRTCHNL_MAX_ADQ_CHANNELS; +} + +#ifdef CONFIG_PCI_IOV +void ice_virtchnl_set_dflt_ops(struct ice_vf *vf); +void ice_virtchnl_set_repr_ops(struct ice_vf *vf); +void +ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode, + enum virtchnl_status_code v_retval, u8 *msg, u16 msglen); +void ice_vc_notify_vf_link_state(struct ice_vf *vf); +void ice_vc_notify_link_state(struct ice_pf *pf); +void ice_vc_notify_reset(struct ice_pf *pf); +int +ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, + enum virtchnl_status_code v_retval, u8 *msg, u16 msglen); +bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id); +#else /* CONFIG_PCI_IOV */ +static inline void ice_virtchnl_set_dflt_ops(struct ice_vf *vf) { } +static inline void ice_virtchnl_set_repr_ops(struct ice_vf *vf) { } +static inline void +ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode, + enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) +{ +} + +static inline void ice_vc_notify_vf_link_state(struct ice_vf *vf) { } +static inline void ice_vc_notify_link_state(struct ice_pf *pf) { } +static inline void ice_vc_notify_reset(struct ice_pf *pf) { } + +static inline int +ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, + enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) +{ + return -EOPNOTSUPP; +} + +static inline bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) +{ + return 0; +} +#endif /* !CONFIG_PCI_IOV */ + +#endif /* _ICE_VIRTCHNL_H_ */ diff --git a/drivers/thirdparty/ice/ice_virtchnl_allowlist.c b/drivers/thirdparty/ice/ice_virtchnl_allowlist.c index ffbbe0bfc3d0..0912a2bd4064 100644 --- a/drivers/thirdparty/ice/ice_virtchnl_allowlist.c +++ b/drivers/thirdparty/ice/ice_virtchnl_allowlist.c @@ -93,8 +93,7 @@ static const u32 cap_dcf_allowlist_opcodes[] = { VIRTCHNL_OP_DCF_VLAN_OFFLOAD, VIRTCHNL_OP_DCF_CMD_DESC, VIRTCHNL_OP_DCF_CMD_BUFF, VIRTCHNL_OP_DCF_DISABLE, VIRTCHNL_OP_DCF_GET_VSI_MAP, - VIRTCHNL_OP_DCF_GET_PKG_INFO, - VIRTCHNL_OP_DCF_RULE_FLUSH, + VIRTCHNL_OP_DCF_GET_PKG_INFO, VIRTCHNL_OP_DCF_CONFIG_BW, }; /* VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC */ @@ -112,6 +111,16 @@ static const u32 fdir_pf_allowlist_opcodes[] = { VIRTCHNL_OP_ADD_FDIR_FILTER, VIRTCHNL_OP_DEL_FDIR_FILTER, }; +/* VIRTCHNL_VF_OFFLOAD_FSUB_PF */ +static const u32 fsub_pf_allowlist_opcodes[] = { + VIRTCHNL_OP_FLOW_SUBSCRIBE, VIRTCHNL_OP_FLOW_UNSUBSCRIBE, +}; + +/* VIRTCHNL_VF_OFFLOAD_QoS */ +static const u32 tc_allowlist_opcodes[] = { + VIRTCHNL_OP_GET_QOS_CAPS, VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP, + VIRTCHNL_OP_CONFIG_QUEUE_BW, VIRTCHNL_OP_CONFIG_QUANTA, +}; static const u32 large_num_qpairs_allowlist_opcodes[] = { VIRTCHNL_OP_GET_MAX_RSS_QREGION, @@ -143,6 +152,8 @@ static const struct allowlist_opcode_info allowlist_opcodes[] = { ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC, rx_flex_desc_allowlist_opcodes), ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF, adv_rss_pf_allowlist_opcodes), ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_FDIR_PF, fdir_pf_allowlist_opcodes), + ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_FSUB_PF, fsub_pf_allowlist_opcodes), + ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_QOS, tc_allowlist_opcodes), ALLOW_ITEM(VIRTCHNL_VF_LARGE_NUM_QPAIRS, large_num_qpairs_allowlist_opcodes), ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_VLAN_V2, vlan_v2_allowlist_opcodes), }; diff --git a/drivers/thirdparty/ice/ice_virtchnl_allowlist.h b/drivers/thirdparty/ice/ice_virtchnl_allowlist.h index c33bc6ac3f54..1c54e81667f2 100644 --- a/drivers/thirdparty/ice/ice_virtchnl_allowlist.h +++ b/drivers/thirdparty/ice/ice_virtchnl_allowlist.h @@ -10,4 +10,4 @@ bool ice_vc_is_opcode_allowed(struct ice_vf *vf, u32 opcode); void ice_vc_set_default_allowlist(struct ice_vf *vf); void ice_vc_set_working_allowlist(struct ice_vf *vf); void ice_vc_set_caps_allowlist(struct ice_vf *vf); -#endif +#endif /* _ICE_VIRTCHNL_ALLOWLIST_H_ */ diff --git a/drivers/thirdparty/ice/ice_virtchnl_fdir.c b/drivers/thirdparty/ice/ice_virtchnl_fdir.c index a02ebef5772a..83dba559798e 100644 --- a/drivers/thirdparty/ice/ice_virtchnl_fdir.c +++ b/drivers/thirdparty/ice/ice_virtchnl_fdir.c @@ -4,6 +4,7 @@ #include "ice.h" #include "ice_base.h" #include "ice_lib.h" +#include "ice_vf_lib_private.h" #define to_fltr_conf_from_desc(p) \ container_of(p, struct virtchnl_fdir_fltr_conf, input) @@ -38,11 +39,39 @@ #define FDIR_INSET_FLAG_ECPRI_UDP BIT_ULL(FDIR_INSET_FLAG_ECPRI_S) #define FDIR_INSET_FLAG_ECPRI_MAC (0ULL << FDIR_INSET_FLAG_ECPRI_S) +/* These macros are used to set/check flow/tunnel type. + * @param input: a struct of ice_fdir_fltr + * @param conf: a struct of virtchnl_fdir_fltr_conf + * @param f_type: NONF_IPV4_GTPU/NONF_IPV4_GTPU_IPV4, etc + * @param tun_type: GTPU/ECPRI, etc + */ +#define FDIR_SET_FTYPE(f_type) \ + ((input)->flow_type = ICE_FLTR_PTYPE_NONF_ ## f_type) +#define FDIR_CHK_FTYPE(f_type) \ + ((input)->flow_type == ICE_FLTR_PTYPE_NONF_ ## f_type) +#define FDIR_SET_TTYPE(tun_type) \ + ((conf)->ttype = ICE_FDIR_TUNNEL_TYPE_ ## tun_type) +#define FDIR_CHK_TTYPE(tun_type) \ + ((conf)->ttype == ICE_FDIR_TUNNEL_TYPE_ ## tun_type) +#define FDIR_REPLACE_FTYPE(old_ftype, new_ftype) \ +do { \ + if (FDIR_CHK_FTYPE(old_ftype)) \ + FDIR_SET_FTYPE(new_ftype); \ +} while (0) + enum ice_fdir_tunnel_type { ICE_FDIR_TUNNEL_TYPE_NONE = 0, ICE_FDIR_TUNNEL_TYPE_GTPU, ICE_FDIR_TUNNEL_TYPE_GTPU_EH, ICE_FDIR_TUNNEL_TYPE_ECPRI, + ICE_FDIR_TUNNEL_TYPE_GTPU_INNER, + ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER, + ICE_FDIR_TUNNEL_TYPE_GRE, + ICE_FDIR_TUNNEL_TYPE_GTPOGRE, + ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER, + ICE_FDIR_TUNNEL_TYPE_GRE_INNER, + ICE_FDIR_TUNNEL_TYPE_L2TPV2, + ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER, }; struct virtchnl_fdir_fltr_conf { @@ -50,6 +79,11 @@ struct virtchnl_fdir_fltr_conf { enum ice_fdir_tunnel_type ttype; u64 inset_flag; u32 flow_id; + + struct ice_parser_profile *prof; + bool parser_ena; + u8 *pkt_buf; + u8 pkt_len; }; struct virtchnl_fdir_inset_map { @@ -60,6 +94,10 @@ struct virtchnl_fdir_inset_map { }; static const struct virtchnl_fdir_inset_map fdir_inset_map[] = { + {VIRTCHNL_PROTO_HDR_ETH_SRC, ICE_FLOW_FIELD_IDX_ETH_SA, + 0, 0}, + {VIRTCHNL_PROTO_HDR_ETH_DST, ICE_FLOW_FIELD_IDX_ETH_DA, + 0, 0}, {VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0}, {VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, @@ -98,6 +136,10 @@ static const struct virtchnl_fdir_inset_map fdir_inset_map[] = { 0, 0}, {VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0}, + {VIRTCHNL_PROTO_HDR_GTPU_UP_QFI, ICE_FLOW_FIELD_IDX_GTPU_UP_QFI, + 0, 0}, + {VIRTCHNL_PROTO_HDR_GTPU_DWN_QFI, ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI, + 0, 0}, {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI, FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M}, {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI, @@ -120,6 +162,15 @@ static const struct virtchnl_fdir_inset_map fdir_inset_map[] = { FDIR_INSET_FLAG_ECPRI_UDP, FDIR_INSET_FLAG_ECPRI_M }, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID, ICE_FLOW_FIELD_IDX_IPV4_ID, + 0, 0}, + {VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID, ICE_FLOW_FIELD_IDX_IPV6_ID, + 0, 0}, + {VIRTCHNL_PROTO_HDR_L2TPV2_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID, + 0, 0}, + {VIRTCHNL_PROTO_HDR_L2TPV2_LEN_SESS_ID, + ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID, + 0, 0}, }; /** @@ -319,7 +370,8 @@ ice_vc_fdir_parse_flow_fld(struct ice_vf *vf, * @vf: pointer to the VF structure * @fltr: virtual channel add cmd buffer * @conf: FDIR configuration for each filter - * @seg: array of one or more packet segments that describe the flow + * @segs: array of one or more packet segments that describe the flow + * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter * * Parse the virtual channel add msg buffer's field vector and store them into * flow's packet segment field @@ -330,12 +382,14 @@ static int ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, struct virtchnl_fdir_fltr_conf *conf, - struct ice_flow_seg_info *seg) + struct ice_flow_seg_info *segs, + int tun) { struct virtchnl_fdir_rule *rule = &fltr->rule_cfg; enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX]; struct device *dev = ice_pf_to_dev(vf->pf); struct virtchnl_proto_hdrs *proto; + struct ice_flow_seg_info *seg; int fld_cnt = 0; int i; @@ -354,6 +408,8 @@ ice_vc_fdir_set_flow_fld(struct ice_vf *vf, return -EINVAL; } + seg = (tun) ? &segs[tun] : segs; + for (i = 0; i < fld_cnt; i++) { ice_flow_set_fld(seg, fld[i], ICE_FLOW_FLD_OFF_INVAL, @@ -368,18 +424,123 @@ ice_vc_fdir_set_flow_fld(struct ice_vf *vf, * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header * @vf: pointer to the VF structure * @conf: FDIR configuration for each filter - * @seg: array of one or more packet segments that describe the flow + * @segs: array of one or more packet segments that describe the flow + * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter * * Return: 0 on success, and other on error. */ static int ice_vc_fdir_set_flow_hdr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf, - struct ice_flow_seg_info *seg) + struct ice_flow_seg_info *segs, + int tun) { enum ice_fltr_ptype flow = conf->input.flow_type; enum ice_fdir_tunnel_type ttype = conf->ttype; struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_flow_seg_info *seg = NULL; + + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_INNER || + ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER || + ttype == ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER || + ttype == ICE_FDIR_TUNNEL_TYPE_GRE_INNER || + ttype == ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER) { + seg = &segs[0]; + switch (flow) { + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV6_TCP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER | + ICE_FLOW_SEG_HDR_GRE); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV6_TCP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER | + ICE_FLOW_SEG_HDR_GRE); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6_TCP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6_TCP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + default: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + } + } + + seg = (tun) ? &segs[tun] : segs; switch (flow) { case ICE_FLTR_PTYPE_NON_IP_L2: @@ -427,6 +588,10 @@ ice_vc_fdir_set_flow_hdr(struct ice_vf *vf, ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER); break; + case ICE_FLTR_PTYPE_FRAG_IPV4: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_FRAG); + break; case ICE_FLTR_PTYPE_NONF_IPV4_TCP: ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 | @@ -438,10 +603,6 @@ ice_vc_fdir_set_flow_hdr(struct ice_vf *vf, ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV4_GTPU: - case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP: - case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP: - case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP: - case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER: if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) { ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4 | @@ -452,8 +613,77 @@ ice_vc_fdir_set_flow_hdr(struct ice_vf *vf, return -EINVAL; } break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV6: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV6_UDP: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV6_TCP: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH: - case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_OTHER: if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) { ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_GTPU_IP | @@ -465,6 +695,82 @@ ice_vc_fdir_set_flow_hdr(struct ice_vf *vf, return -EINVAL; } break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_UDP: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_TCP: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6_UDP: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6_TCP: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW: if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) { ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_DWN | @@ -477,6 +783,82 @@ ice_vc_fdir_set_flow_hdr(struct ice_vf *vf, return -EINVAL; } break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_DWN | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4_UDP: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_DWN | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4_TCP: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_DWN | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_DWN | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6_UDP: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_DWN | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6_TCP: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_DWN | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP: if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) { ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_UP | @@ -489,6 +871,82 @@ ice_vc_fdir_set_flow_hdr(struct ice_vf *vf, return -EINVAL; } break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_UP | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4_UDP: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_UP | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4_TCP: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_UP | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_UP | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6_UDP: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_UP | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6_TCP: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_UP | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; case ICE_FLTR_PTYPE_NONF_IPV6_GTPU: case ICE_FLTR_PTYPE_NONF_IPV6_GTPU_IPV6_OTHER: if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) { @@ -538,6 +996,230 @@ ice_vc_fdir_set_flow_hdr(struct ice_vf *vf, return -EINVAL; } break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_DWN | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_UP | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_DWN | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_UP | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV4: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV4_UDP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV4_TCP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV4: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV4_UDP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV4_TCP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV4: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_DWN | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV4_UDP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_DWN | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV4_TCP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_DWN | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV4: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_UP | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV4_UDP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_UP | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV4_TCP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_UP | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV6: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV6_UDP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_IPV6_TCP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV6: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV6_UDP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_IPV6_TCP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV6: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_DWN | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV6_UDP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_DWN | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_DW_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_DW_IPV6_TCP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_DWN | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV6: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_UP | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV6_UDP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_UP | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_GTPU_EH_UP_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_GTPU_EH_UP_IPV6_TCP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_UP | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 | @@ -577,6 +1259,10 @@ ice_vc_fdir_set_flow_hdr(struct ice_vf *vf, ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER); break; + case ICE_FLTR_PTYPE_FRAG_IPV6: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_FRAG); + break; case ICE_FLTR_PTYPE_NONF_IPV6_TCP: ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 | @@ -592,6 +1278,216 @@ ice_vc_fdir_set_flow_hdr(struct ice_vf *vf, ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER); break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GRE_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GRE | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_UDP: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GRE_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GRE | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV4_TCP: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GRE_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GRE | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GRE_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GRE | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_UDP: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GRE_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GRE | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GRE_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_GRE_IPV6_TCP: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GRE_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GRE | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_CONTROL: + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2: + if (ttype == ICE_FDIR_TUNNEL_TYPE_L2TPV2) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV2 | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_ETH); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP: + if (ttype == ICE_FDIR_TUNNEL_TYPE_L2TPV2) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV2 | + ICE_FLOW_SEG_HDR_PPP | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_ETH); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_CONTROL: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2: + if (ttype == ICE_FDIR_TUNNEL_TYPE_L2TPV2) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV2 | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_ETH); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP: + if (ttype == ICE_FDIR_TUNNEL_TYPE_L2TPV2) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV2 | + ICE_FLOW_SEG_HDR_PPP | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_ETH); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4: + if (ttype == ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV2 | + ICE_FLOW_SEG_HDR_PPP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_UDP: + if (ttype == ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV2 | + ICE_FLOW_SEG_HDR_PPP | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_TCP: + if (ttype == ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV2 | + ICE_FLOW_SEG_HDR_PPP | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6: + if (ttype == ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV2 | + ICE_FLOW_SEG_HDR_PPP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6_UDP: + if (ttype == ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV2 | + ICE_FLOW_SEG_HDR_PPP | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6_TCP: + if (ttype == ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV2 | + ICE_FLOW_SEG_HDR_PPP | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; default: dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n", flow, vf->vf_id); @@ -674,6 +1570,20 @@ void ice_vc_fdir_rem_prof_all(struct ice_vf *vf) } } +/** + * ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR + * @fdir: pointer to the VF FDIR structure + */ +static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir) +{ + enum ice_fltr_ptype flow = ICE_FLTR_PTYPE_NONF_NONE; + + for (; flow < ICE_FLTR_PTYPE_MAX; flow++) { + fdir->fdir_fltr_cnt[flow][0] = 0; + fdir->fdir_fltr_cnt[flow][1] = 0; + } +} + /** * ice_vc_fdir_write_flow_prof * @vf: pointer to the VF structure @@ -696,7 +1606,6 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, struct ice_flow_seg_info *old_seg; struct ice_flow_prof *prof = NULL; struct ice_fd_hw_prof *vf_prof; - enum ice_status status; struct device *dev; struct ice_pf *pf; struct ice_hw *hw; @@ -719,7 +1628,7 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, vf_prof = fdir->fdir_prof[flow]; old_seg = vf_prof->fdir_seg[tun]; if (old_seg) { - if (!memcmp(old_seg, seg, sizeof(*seg) * ICE_FD_HW_SEG_MAX)) { + if (!memcmp(old_seg, seg, sizeof(*seg) * (tun + 1))) { dev_dbg(dev, "Duplicated profile for VF %d!\n", vf->vf_id); return -EEXIST; @@ -739,29 +1648,26 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow, tun ? ICE_FLTR_PTYPE_MAX : 0); - status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, - tun + 1, NULL, 0, &prof); - ret = ice_status_to_errno(status); + ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, + tun + 1, NULL, 0, &prof); if (ret) { dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n", flow, vf->vf_id); goto err_exit; } - status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, - vf_vsi->idx, ICE_FLOW_PRIO_NORMAL, - seg, NULL, 0, &entry1_h); - ret = ice_status_to_errno(status); + ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, + vf_vsi->idx, ICE_FLOW_PRIO_NORMAL, + seg, NULL, 0, &entry1_h); if (ret) { dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n", flow, vf->vf_id); goto err_prof; } - status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, - ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, - seg, NULL, 0, &entry2_h); - ret = ice_status_to_errno(status); + ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, + ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, + seg, NULL, 0, &entry2_h); if (ret) { dev_dbg(dev, "Could not add flow 0x%x Ctrl VSI entry for VF %d\n", @@ -852,6 +1758,374 @@ ice_vc_fdir_has_prof_conflict(struct ice_vf *vf, flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP) return true; break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4: + if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_GTPU || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP: + if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_GTPU || + flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_TCP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4_TCP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4_TCP) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_UDP: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4_UDP) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_TCP: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4_TCP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4_TCP) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_TCP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4_TCP) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4_UDP: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4_TCP: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_TCP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV4) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_TCP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4_TCP) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4_UDP: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4_TCP: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_TCP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV4) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV6: + if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_GTPU || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV6_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV6_TCP) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV6_TCP: + if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_GTPU || + flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV6) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6_TCP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6_TCP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6_TCP) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6_UDP: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6_UDP) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6_TCP: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6_TCP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6_TCP) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6_TCP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6_TCP) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6_UDP: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6_TCP: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6_TCP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW_IPV6) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6_TCP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6_TCP) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6_UDP: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6_TCP: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV6_TCP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP_IPV6) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_CONTROL || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_TCP) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_CONTROL || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_TCP) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_TCP: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_CONTROL || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_CONTROL || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6_TCP) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6_TCP: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_CONTROL || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_CONTROL || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_TCP) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_CONTROL || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_TCP) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_TCP: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_CONTROL || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_CONTROL || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6_UDP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6_TCP) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6_TCP: + if (flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_CONTROL || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2 || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP || + flow_type_b == + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6) + return true; + break; default: break; } @@ -896,17 +2170,17 @@ ice_vc_fdir_config_input_set(struct ice_vf *vf, return ret; } - seg = kcalloc(ICE_FD_HW_SEG_MAX, sizeof(*seg), GFP_KERNEL); + seg = kcalloc((tun + 1), sizeof(*seg), GFP_KERNEL); if (!seg) return -ENOMEM; - ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, &seg[tun]); + ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg, tun); if (ret) { dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id); goto err_exit; } - ret = ice_vc_fdir_set_flow_hdr(vf, conf, &seg[tun]); + ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg, tun); if (ret) { dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id); goto err_exit; @@ -928,6 +2202,95 @@ err_exit: return ret; } +/** + * ice_vc_fdir_is_raw_flow + * @proto: virtchnl protocol headers + * + * Check if the FDIR rule is raw flow (protocol agnostic flow) or not. + * Note that common FDIR rule must have non-zero proto->count. + * Thus, we choose the tunnel_level and count of proto as the indicators. + * If both tunnel_level and count of proto are zeros, this FDIR rule will + * be regarded as raw flow. + * + * Returns wheater headers describe raw flow or not. + */ +static bool +ice_vc_fdir_is_raw_flow(struct virtchnl_proto_hdrs *proto) +{ + return (proto->tunnel_level == 0 && proto->count == 0); +} + +/** + * ice_vc_fdir_parse_raw + * @vf: pointer to the VF info + * @proto: virtchnl protocol headers + * @conf: FDIR configuration for each filter + * + * Parse the virtual channel filter's raw flow and store them into @conf + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_parse_raw(struct ice_vf *vf, + struct virtchnl_proto_hdrs *proto, + struct virtchnl_fdir_fltr_conf *conf) +{ + int status = -ENOMEM; + struct ice_parser_result rslt; + struct ice_pf *pf = vf->pf; + struct ice_parser *psr; + u8 *pkt_buf, *msk_buf; + struct ice_hw *hw; + u16 udp_port = 0; + + pkt_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL); + msk_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL); + if (!pkt_buf || !msk_buf) + goto err_pkt_msk_buf_alloc; + + memcpy(pkt_buf, proto->raw.spec, proto->raw.pkt_len); + memcpy(msk_buf, proto->raw.mask, proto->raw.pkt_len); + + hw = &pf->hw; + /* Get raw profile info via Parser Lib */ + if (ice_parser_create(hw, &psr)) + goto err_parser_process; + if (ice_get_open_tunnel_port(hw, TNL_VXLAN, &udp_port)) + ice_parser_vxlan_tunnel_set(psr, udp_port, true); + if (ice_parser_run(psr, pkt_buf, proto->raw.pkt_len, &rslt)) + goto err_parser_process; + ice_parser_destroy(psr); + + conf->prof = kzalloc(sizeof(*conf->prof), GFP_KERNEL); + if (!conf->prof) + goto err_conf_prof_alloc; + + status = ice_parser_profile_init(&rslt, pkt_buf, msk_buf, + proto->raw.pkt_len, ICE_BLK_FD, true, + conf->prof); + if (status) + goto err_parser_profile_init; + + /* Store raw flow info into @conf */ + conf->pkt_len = proto->raw.pkt_len; + conf->pkt_buf = pkt_buf; + kfree(msk_buf); + + conf->parser_ena = true; + + return 0; + +err_parser_profile_init: + kfree(conf->prof); +err_conf_prof_alloc: +err_parser_process: + ice_parser_destroy(psr); +err_pkt_msk_buf_alloc: + kfree(msk_buf); + kfree(pkt_buf); + return status; +} + /** * ice_vc_fdir_parse_pattern * @vf: pointer to the VF info @@ -956,8 +2319,13 @@ ice_vc_fdir_parse_pattern(struct ice_vf *vf, return -EINVAL; } + /* For Protocol Agnostic Flow Offloading case only */ + if (ice_vc_fdir_is_raw_flow(proto)) + return ice_vc_fdir_parse_raw(vf, proto, conf); + for (i = 0; i < proto->count; i++) { struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; + struct frag_hdr *ip6h_ef; struct ip_esp_hdr *esph; struct ip_auth_hdr *ah; struct sctphdr *sctph; @@ -966,22 +2334,76 @@ ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct tcphdr *tcph; struct ethhdr *eth; struct iphdr *iph; + u16 frag_offset; u8 msg_type; u8 s_field; u8 *rawh; + u16 flags_version; + u16 pos; switch (hdr->type) { case VIRTCHNL_PROTO_HDR_ETH: eth = (struct ethhdr *)hdr->buffer; input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2; - if (hdr->field_selector) + if (hdr->field_selector) { + ether_addr_copy(input->ext_data_outer.dst_mac, + eth->h_dest); + ether_addr_copy(input->ext_data_outer.src_mac, + eth->h_source); input->ext_data.ether_type = eth->h_proto; + } break; case VIRTCHNL_PROTO_HDR_IPV4: iph = (struct iphdr *)hdr->buffer; l3 = VIRTCHNL_PROTO_HDR_IPV4; - input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER; + + if (FDIR_CHK_FTYPE(IPV4_GTPU)) + FDIR_SET_FTYPE(IPV4_GTPU_IPV4); + else if (FDIR_CHK_FTYPE(IPV4_GTPU_EH)) + FDIR_SET_FTYPE(IPV4_GTPU_EH_IPV4); + else if (FDIR_CHK_FTYPE(IPV4_GTPU_EH_DW)) + FDIR_SET_FTYPE(IPV4_GTPU_EH_DW_IPV4); + else if (FDIR_CHK_FTYPE(IPV4_GTPU_EH_UP)) + FDIR_SET_FTYPE(IPV4_GTPU_EH_UP_IPV4); + else if (FDIR_CHK_FTYPE(IPV4_GRE)) + FDIR_SET_FTYPE(IPV4_GRE_IPV4); + else if (FDIR_CHK_FTYPE(IPV6_GRE)) + FDIR_SET_FTYPE(IPV6_GRE_IPV4); + else if (FDIR_CHK_FTYPE(IPV4_GRE_IPV4_GTPU)) + FDIR_SET_FTYPE(IPV4_GRE_IPV4_GTPU_IPV4); + else if (FDIR_CHK_FTYPE(IPV6_GRE_IPV4_GTPU)) + FDIR_SET_FTYPE(IPV6_GRE_IPV4_GTPU_IPV4); + else if (FDIR_CHK_FTYPE(IPV4_GRE_IPV4_GTPU_EH)) + FDIR_SET_FTYPE(IPV4_GRE_IPV4_GTPU_EH_IPV4); + else if (FDIR_CHK_FTYPE(IPV6_GRE_IPV4_GTPU_EH)) + FDIR_SET_FTYPE(IPV6_GRE_IPV4_GTPU_EH_IPV4); + else if (FDIR_CHK_FTYPE(IPV4_GRE_IPV4_GTPU_EH_DW)) + FDIR_SET_FTYPE(IPV4_GRE_IPV4_GTPU_EH_DW_IPV4); + else if (FDIR_CHK_FTYPE(IPV6_GRE_IPV4_GTPU_EH_DW)) + FDIR_SET_FTYPE(IPV6_GRE_IPV4_GTPU_EH_DW_IPV4); + else if (FDIR_CHK_FTYPE(IPV4_GRE_IPV4_GTPU_EH_UP)) + FDIR_SET_FTYPE(IPV4_GRE_IPV4_GTPU_EH_UP_IPV4); + else if (FDIR_CHK_FTYPE(IPV6_GRE_IPV4_GTPU_EH_UP)) + FDIR_SET_FTYPE(IPV6_GRE_IPV4_GTPU_EH_UP_IPV4); + else if (FDIR_CHK_FTYPE(IPV4_L2TPV2_PPP)) + FDIR_SET_FTYPE(IPV4_L2TPV2_PPP_IPV4); + else if (FDIR_CHK_FTYPE(IPV6_L2TPV2_PPP)) + FDIR_SET_FTYPE(IPV6_L2TPV2_PPP_IPV4); + else + FDIR_SET_FTYPE(IPV4_OTHER); + + if (FDIR_CHK_TTYPE(GTPU)) + FDIR_SET_TTYPE(GTPU_INNER); + else if (FDIR_CHK_TTYPE(GTPU_EH)) + FDIR_SET_TTYPE(GTPU_EH_INNER); + else if (FDIR_CHK_TTYPE(GRE)) + FDIR_SET_TTYPE(GRE_INNER); + else if (FDIR_CHK_TTYPE(L2TPV2)) + FDIR_SET_TTYPE(L2TPV2_INNER); + + if (FDIR_CHK_TTYPE(GTPOGRE)) + FDIR_SET_TTYPE(GTPOGRE_INNER); if (hdr->field_selector) { input->ip.v4.src_ip = iph->saddr; @@ -991,10 +2413,82 @@ ice_vc_fdir_parse_pattern(struct ice_vf *vf, input->ip.v4.proto = iph->protocol; } break; + case VIRTCHNL_PROTO_HDR_IPV4_FRAG: + iph = (struct iphdr *)hdr->buffer; + l3 = VIRTCHNL_PROTO_HDR_IPV4; + frag_offset = be16_to_cpu(iph->frag_off); + + if (frag_offset >> ICE_FDIR_IPV4_PKT_FLAG_MF_SHIFT & + ICE_FDIR_IPV4_PKT_FLAG_MF) { + input->flow_type = ICE_FLTR_PTYPE_FRAG_IPV4; + } else { + dev_err(dev, "Invalid fragment fdir for VF %d\n", + vf->vf_id); + return -EINVAL; + } + if (hdr->field_selector) { + input->ip.v4.src_ip = iph->saddr; + input->ip.v4.dst_ip = iph->daddr; + input->ip.v4.ttl = iph->ttl; + input->ip.v4.tos = iph->tos; + input->ip.v4.proto = iph->protocol; + input->ip.v4.packet_id = iph->id; + } + break; case VIRTCHNL_PROTO_HDR_IPV6: ip6h = (struct ipv6hdr *)hdr->buffer; l3 = VIRTCHNL_PROTO_HDR_IPV6; - input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER; + + if (FDIR_CHK_FTYPE(IPV4_GTPU)) + FDIR_SET_FTYPE(IPV4_GTPU_IPV6); + else if (FDIR_CHK_FTYPE(IPV4_GTPU_EH)) + FDIR_SET_FTYPE(IPV4_GTPU_EH_IPV6); + else if (FDIR_CHK_FTYPE(IPV4_GTPU_EH_DW)) + FDIR_SET_FTYPE(IPV4_GTPU_EH_DW_IPV6); + else if (FDIR_CHK_FTYPE(IPV4_GTPU_EH_UP)) + FDIR_SET_FTYPE(IPV4_GTPU_EH_UP_IPV6); + else if (FDIR_CHK_FTYPE(IPV4_GRE)) + FDIR_SET_FTYPE(IPV4_GRE_IPV6); + else if (FDIR_CHK_FTYPE(IPV6_GRE)) + FDIR_SET_FTYPE(IPV6_GRE_IPV6); + else if (FDIR_CHK_FTYPE(IPV4_GRE_IPV4_GTPU)) + FDIR_SET_FTYPE(IPV4_GRE_IPV4_GTPU_IPV6); + else if (FDIR_CHK_FTYPE(IPV6_GRE_IPV4_GTPU)) + FDIR_SET_FTYPE(IPV6_GRE_IPV4_GTPU_IPV6); + else if (FDIR_CHK_FTYPE(IPV4_GRE_IPV4_GTPU_EH)) + FDIR_SET_FTYPE(IPV4_GRE_IPV4_GTPU_EH_IPV6); + else if (FDIR_CHK_FTYPE(IPV6_GRE_IPV4_GTPU_EH)) + FDIR_SET_FTYPE(IPV6_GRE_IPV4_GTPU_EH_IPV6); + else if (FDIR_CHK_FTYPE(IPV4_GRE_IPV4_GTPU_EH_DW)) + FDIR_SET_FTYPE(IPV4_GRE_IPV4_GTPU_EH_DW_IPV6); + else if (FDIR_CHK_FTYPE(IPV6_GRE_IPV4_GTPU_EH_DW)) + FDIR_SET_FTYPE(IPV6_GRE_IPV4_GTPU_EH_DW_IPV6); + else if (FDIR_CHK_FTYPE(IPV4_GRE_IPV4_GTPU_EH_UP)) + FDIR_SET_FTYPE(IPV4_GRE_IPV4_GTPU_EH_UP_IPV6); + else if (FDIR_CHK_FTYPE(IPV6_GRE_IPV4_GTPU_EH_UP)) + FDIR_SET_FTYPE(IPV6_GRE_IPV4_GTPU_EH_UP_IPV6); + else if (FDIR_CHK_FTYPE(IPV4_GRE)) + FDIR_SET_FTYPE(IPV4_GRE_IPV6); + else if (FDIR_CHK_FTYPE(IPV6_GRE)) + FDIR_SET_FTYPE(IPV6_GRE_IPV6); + else if (FDIR_CHK_FTYPE(IPV4_L2TPV2_PPP)) + FDIR_SET_FTYPE(IPV4_L2TPV2_PPP_IPV6); + else if (FDIR_CHK_FTYPE(IPV6_L2TPV2_PPP)) + FDIR_SET_FTYPE(IPV6_L2TPV2_PPP_IPV6); + else + FDIR_SET_FTYPE(IPV6_OTHER); + + if (FDIR_CHK_TTYPE(GTPU)) + FDIR_SET_TTYPE(GTPU_INNER); + else if (FDIR_CHK_TTYPE(GTPU_EH)) + FDIR_SET_TTYPE(GTPU_EH_INNER); + else if (FDIR_CHK_TTYPE(GRE)) + FDIR_SET_TTYPE(GRE_INNER); + else if (FDIR_CHK_TTYPE(L2TPV2)) + FDIR_SET_TTYPE(L2TPV2_INNER); + + if (FDIR_CHK_TTYPE(GTPOGRE)) + FDIR_SET_TTYPE(GTPOGRE_INNER); if (hdr->field_selector) { memcpy(input->ip.v6.src_ip, @@ -1008,14 +2502,100 @@ ice_vc_fdir_parse_pattern(struct ice_vf *vf, (ip6h->flow_lbl[0] >> 4); input->ip.v6.proto = ip6h->nexthdr; } + break; + case VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG: + ip6h_ef = (struct frag_hdr *)hdr->buffer; + frag_offset = be16_to_cpu(ip6h_ef->frag_off); + if (frag_offset << ICE_FDIR_IPV6_PKT_FLAG_MF_SHIFT & + ICE_FDIR_IPV6_PKT_FLAG_MF) { + input->flow_type = ICE_FLTR_PTYPE_FRAG_IPV6; + } else { + dev_err(dev, "Invalid fragment fdir for VF %d\n", + vf->vf_id); + return -EINVAL; + } + + if (hdr->field_selector) + input->ip.v6.packet_id = + ip6h_ef->identification; + break; case VIRTCHNL_PROTO_HDR_TCP: tcph = (struct tcphdr *)hdr->buffer; - if (l3 == VIRTCHNL_PROTO_HDR_IPV4) + if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && !conf->ttype) input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP; - else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) + else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && !conf->ttype) input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP; + if (FDIR_CHK_FTYPE(IPV4_GTPU_IPV4)) + FDIR_SET_FTYPE(IPV4_GTPU_IPV4_TCP); + else if (FDIR_CHK_FTYPE(IPV4_GTPU_EH_IPV4)) + FDIR_SET_FTYPE(IPV4_GTPU_EH_IPV4_TCP); + else if (FDIR_CHK_FTYPE(IPV4_GTPU_EH_DW_IPV4)) + FDIR_SET_FTYPE(IPV4_GTPU_EH_DW_IPV4_TCP); + else if (FDIR_CHK_FTYPE(IPV4_GTPU_EH_UP_IPV4)) + FDIR_SET_FTYPE(IPV4_GTPU_EH_UP_IPV4_TCP); + + if (FDIR_CHK_FTYPE(IPV4_GTPU_IPV6)) + FDIR_SET_FTYPE(IPV4_GTPU_IPV6_TCP); + else if (FDIR_CHK_FTYPE(IPV4_GTPU_EH_IPV6)) + FDIR_SET_FTYPE(IPV4_GTPU_EH_IPV6_TCP); + else if (FDIR_CHK_FTYPE(IPV4_GTPU_EH_DW_IPV6)) + FDIR_SET_FTYPE(IPV4_GTPU_EH_DW_IPV6_TCP); + else if (FDIR_CHK_FTYPE(IPV4_GTPU_EH_UP_IPV6)) + FDIR_SET_FTYPE(IPV4_GTPU_EH_UP_IPV6_TCP); + + if (FDIR_CHK_FTYPE(IPV4_GRE_IPV4)) + FDIR_SET_FTYPE(IPV4_GRE_IPV4_TCP); + else if (FDIR_CHK_FTYPE(IPV4_GRE_IPV6)) + FDIR_SET_FTYPE(IPV4_GRE_IPV6_TCP); + else if (FDIR_CHK_FTYPE(IPV6_GRE_IPV4)) + FDIR_SET_FTYPE(IPV6_GRE_IPV4_TCP); + else if (FDIR_CHK_FTYPE(IPV6_GRE_IPV6)) + FDIR_SET_FTYPE(IPV6_GRE_IPV6_TCP); + + FDIR_REPLACE_FTYPE(IPV4_GRE_IPV4_GTPU_IPV4, + IPV4_GRE_IPV4_GTPU_IPV4_TCP); + FDIR_REPLACE_FTYPE(IPV6_GRE_IPV4_GTPU_IPV4, + IPV6_GRE_IPV4_GTPU_IPV4_TCP); + FDIR_REPLACE_FTYPE(IPV4_GRE_IPV4_GTPU_IPV6, + IPV4_GRE_IPV4_GTPU_IPV6_TCP); + FDIR_REPLACE_FTYPE(IPV6_GRE_IPV4_GTPU_IPV6, + IPV6_GRE_IPV4_GTPU_IPV6_TCP); + FDIR_REPLACE_FTYPE(IPV4_GRE_IPV4_GTPU_EH_IPV4, + IPV4_GRE_IPV4_GTPU_EH_IPV4_TCP); + FDIR_REPLACE_FTYPE(IPV6_GRE_IPV4_GTPU_EH_IPV4, + IPV6_GRE_IPV4_GTPU_EH_IPV4_TCP); + FDIR_REPLACE_FTYPE(IPV4_GRE_IPV4_GTPU_EH_IPV6, + IPV4_GRE_IPV4_GTPU_EH_IPV6_TCP); + FDIR_REPLACE_FTYPE(IPV6_GRE_IPV4_GTPU_EH_IPV6, + IPV6_GRE_IPV4_GTPU_EH_IPV6_TCP); + FDIR_REPLACE_FTYPE(IPV4_GRE_IPV4_GTPU_EH_DW_IPV4, + IPV4_GRE_IPV4_GTPU_EH_DW_IPV4_TCP); + FDIR_REPLACE_FTYPE(IPV6_GRE_IPV4_GTPU_EH_DW_IPV4, + IPV6_GRE_IPV4_GTPU_EH_DW_IPV4_TCP); + FDIR_REPLACE_FTYPE(IPV4_GRE_IPV4_GTPU_EH_DW_IPV6, + IPV4_GRE_IPV4_GTPU_EH_DW_IPV6_TCP); + FDIR_REPLACE_FTYPE(IPV6_GRE_IPV4_GTPU_EH_DW_IPV6, + IPV6_GRE_IPV4_GTPU_EH_DW_IPV6_TCP); + FDIR_REPLACE_FTYPE(IPV4_GRE_IPV4_GTPU_EH_UP_IPV4, + IPV4_GRE_IPV4_GTPU_EH_UP_IPV4_TCP); + FDIR_REPLACE_FTYPE(IPV6_GRE_IPV4_GTPU_EH_UP_IPV4, + IPV6_GRE_IPV4_GTPU_EH_UP_IPV4_TCP); + FDIR_REPLACE_FTYPE(IPV4_GRE_IPV4_GTPU_EH_UP_IPV6, + IPV4_GRE_IPV4_GTPU_EH_UP_IPV6_TCP); + FDIR_REPLACE_FTYPE(IPV6_GRE_IPV4_GTPU_EH_UP_IPV6, + IPV6_GRE_IPV4_GTPU_EH_UP_IPV6_TCP); + + if (FDIR_CHK_FTYPE(IPV4_L2TPV2_PPP_IPV4)) + FDIR_SET_FTYPE(IPV4_L2TPV2_PPP_IPV4_TCP); + else if (FDIR_CHK_FTYPE(IPV4_L2TPV2_PPP_IPV6)) + FDIR_SET_FTYPE(IPV4_L2TPV2_PPP_IPV6_TCP); + else if (FDIR_CHK_FTYPE(IPV6_L2TPV2_PPP_IPV4)) + FDIR_SET_FTYPE(IPV6_L2TPV2_PPP_IPV4_TCP); + else if (FDIR_CHK_FTYPE(IPV6_L2TPV2_PPP_IPV6)) + FDIR_SET_FTYPE(IPV6_L2TPV2_PPP_IPV6_TCP); + if (hdr->field_selector) { if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { input->ip.v4.src_port = tcph->source; @@ -1029,11 +2609,80 @@ ice_vc_fdir_parse_pattern(struct ice_vf *vf, case VIRTCHNL_PROTO_HDR_UDP: udph = (struct udphdr *)hdr->buffer; l4 = VIRTCHNL_PROTO_HDR_UDP; - if (l3 == VIRTCHNL_PROTO_HDR_IPV4) + if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && !conf->ttype) input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP; - else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) + else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && !conf->ttype) input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP; + if (FDIR_CHK_FTYPE(IPV4_GTPU_IPV4)) + FDIR_SET_FTYPE(IPV4_GTPU_IPV4_UDP); + else if (FDIR_CHK_FTYPE(IPV4_GTPU_EH_IPV4)) + FDIR_SET_FTYPE(IPV4_GTPU_EH_IPV4_UDP); + else if (FDIR_CHK_FTYPE(IPV4_GTPU_EH_DW_IPV4)) + FDIR_SET_FTYPE(IPV4_GTPU_EH_DW_IPV4_UDP); + else if (FDIR_CHK_FTYPE(IPV4_GTPU_EH_UP_IPV4)) + FDIR_SET_FTYPE(IPV4_GTPU_EH_UP_IPV4_UDP); + + if (FDIR_CHK_FTYPE(IPV4_GTPU_IPV6)) + FDIR_SET_FTYPE(IPV4_GTPU_IPV6_UDP); + else if (FDIR_CHK_FTYPE(IPV4_GTPU_EH_IPV6)) + FDIR_SET_FTYPE(IPV4_GTPU_EH_IPV6_UDP); + else if (FDIR_CHK_FTYPE(IPV4_GTPU_EH_DW_IPV6)) + FDIR_SET_FTYPE(IPV4_GTPU_EH_DW_IPV6_UDP); + else if (FDIR_CHK_FTYPE(IPV4_GTPU_EH_UP_IPV6)) + FDIR_SET_FTYPE(IPV4_GTPU_EH_UP_IPV6_UDP); + + if (FDIR_CHK_FTYPE(IPV4_GRE_IPV4)) + FDIR_SET_FTYPE(IPV4_GRE_IPV4_UDP); + else if (FDIR_CHK_FTYPE(IPV4_GRE_IPV6)) + FDIR_SET_FTYPE(IPV4_GRE_IPV6_UDP); + else if (FDIR_CHK_FTYPE(IPV6_GRE_IPV4)) + FDIR_SET_FTYPE(IPV6_GRE_IPV4_UDP); + else if (FDIR_CHK_FTYPE(IPV6_GRE_IPV6)) + FDIR_SET_FTYPE(IPV6_GRE_IPV6_UDP); + + FDIR_REPLACE_FTYPE(IPV4_GRE_IPV4_GTPU_IPV4, + IPV4_GRE_IPV4_GTPU_IPV4_UDP); + FDIR_REPLACE_FTYPE(IPV6_GRE_IPV4_GTPU_IPV4, + IPV6_GRE_IPV4_GTPU_IPV4_UDP); + FDIR_REPLACE_FTYPE(IPV4_GRE_IPV4_GTPU_IPV6, + IPV4_GRE_IPV4_GTPU_IPV6_UDP); + FDIR_REPLACE_FTYPE(IPV6_GRE_IPV4_GTPU_IPV6, + IPV6_GRE_IPV4_GTPU_IPV6_UDP); + FDIR_REPLACE_FTYPE(IPV4_GRE_IPV4_GTPU_EH_IPV4, + IPV4_GRE_IPV4_GTPU_EH_IPV4_UDP); + FDIR_REPLACE_FTYPE(IPV6_GRE_IPV4_GTPU_EH_IPV4, + IPV6_GRE_IPV4_GTPU_EH_IPV4_UDP); + FDIR_REPLACE_FTYPE(IPV4_GRE_IPV4_GTPU_EH_IPV6, + IPV4_GRE_IPV4_GTPU_EH_IPV6_UDP); + FDIR_REPLACE_FTYPE(IPV6_GRE_IPV4_GTPU_EH_IPV6, + IPV6_GRE_IPV4_GTPU_EH_IPV6_UDP); + FDIR_REPLACE_FTYPE(IPV4_GRE_IPV4_GTPU_EH_DW_IPV4, + IPV4_GRE_IPV4_GTPU_EH_DW_IPV4_UDP); + FDIR_REPLACE_FTYPE(IPV6_GRE_IPV4_GTPU_EH_DW_IPV4, + IPV6_GRE_IPV4_GTPU_EH_DW_IPV4_UDP); + FDIR_REPLACE_FTYPE(IPV4_GRE_IPV4_GTPU_EH_DW_IPV6, + IPV4_GRE_IPV4_GTPU_EH_DW_IPV6_UDP); + FDIR_REPLACE_FTYPE(IPV6_GRE_IPV4_GTPU_EH_DW_IPV6, + IPV6_GRE_IPV4_GTPU_EH_DW_IPV6_UDP); + FDIR_REPLACE_FTYPE(IPV4_GRE_IPV4_GTPU_EH_UP_IPV4, + IPV4_GRE_IPV4_GTPU_EH_UP_IPV4_UDP); + FDIR_REPLACE_FTYPE(IPV6_GRE_IPV4_GTPU_EH_UP_IPV4, + IPV6_GRE_IPV4_GTPU_EH_UP_IPV4_UDP); + FDIR_REPLACE_FTYPE(IPV4_GRE_IPV4_GTPU_EH_UP_IPV6, + IPV4_GRE_IPV4_GTPU_EH_UP_IPV6_UDP); + FDIR_REPLACE_FTYPE(IPV6_GRE_IPV4_GTPU_EH_UP_IPV6, + IPV6_GRE_IPV4_GTPU_EH_UP_IPV6_UDP); + + if (FDIR_CHK_FTYPE(IPV4_L2TPV2_PPP_IPV4)) + FDIR_SET_FTYPE(IPV4_L2TPV2_PPP_IPV4_UDP); + else if (FDIR_CHK_FTYPE(IPV4_L2TPV2_PPP_IPV6)) + FDIR_SET_FTYPE(IPV4_L2TPV2_PPP_IPV6_UDP); + else if (FDIR_CHK_FTYPE(IPV6_L2TPV2_PPP_IPV4)) + FDIR_SET_FTYPE(IPV6_L2TPV2_PPP_IPV4_UDP); + else if (FDIR_CHK_FTYPE(IPV6_L2TPV2_PPP_IPV6)) + FDIR_SET_FTYPE(IPV6_L2TPV2_PPP_IPV6_UDP); + if (hdr->field_selector) { if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { input->ip.v4.src_port = udph->source; @@ -1143,63 +2792,102 @@ ice_vc_fdir_parse_pattern(struct ice_vf *vf, cpu_to_be16(PFCP_PORT_NR); } break; + case VIRTCHNL_PROTO_HDR_GRE: + if (FDIR_CHK_FTYPE(IPV4_OTHER)) + FDIR_SET_FTYPE(IPV4_GRE); + if (FDIR_CHK_FTYPE(IPV6_OTHER)) + FDIR_SET_FTYPE(IPV6_GRE); + FDIR_SET_TTYPE(GRE); + break; case VIRTCHNL_PROTO_HDR_GTPU_IP: rawh = (u8 *)hdr->buffer; - if (input->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP) - input->flow_type = - ICE_FLTR_PTYPE_NONF_IPV4_GTPU; - else - input->flow_type = - ICE_FLTR_PTYPE_NONF_IPV6_GTPU; + if (FDIR_CHK_FTYPE(IPV4_UDP)) + FDIR_SET_FTYPE(IPV4_GTPU); + else if (FDIR_CHK_FTYPE(IPV6_UDP)) + FDIR_SET_FTYPE(IPV6_GTPU); + else if (FDIR_CHK_FTYPE(IPV4_GRE_IPV4_UDP)) + FDIR_SET_FTYPE(IPV4_GRE_IPV4_GTPU); + else if (FDIR_CHK_FTYPE(IPV4_GRE_IPV6_UDP)) + FDIR_SET_FTYPE(IPV4_GRE_IPV4_GTPU); + else if (FDIR_CHK_FTYPE(IPV6_GRE_IPV4_UDP)) + FDIR_SET_FTYPE(IPV6_GRE_IPV4_GTPU); + else if (FDIR_CHK_FTYPE(IPV6_GRE_IPV6_UDP)) + FDIR_SET_FTYPE(IPV6_GRE_IPV4_GTPU); + if (hdr->field_selector) input->gtpu_data.teid = *(__force __be32 *)(&rawh[GTPU_TEID_OFFSET]); conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU; + if (FDIR_CHK_FTYPE(IPV4_GRE_IPV4_GTPU)) + FDIR_SET_TTYPE(GTPOGRE); + if (FDIR_CHK_FTYPE(IPV6_GRE_IPV4_GTPU)) + FDIR_SET_TTYPE(GTPOGRE); break; case VIRTCHNL_PROTO_HDR_GTPU_EH: rawh = (u8 *)hdr->buffer; - if (input->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_GTPU) - input->flow_type = - ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH; - else - input->flow_type = - ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH; + if (FDIR_CHK_FTYPE(IPV4_GTPU)) + FDIR_SET_FTYPE(IPV4_GTPU_EH); + else if (FDIR_CHK_FTYPE(IPV6_GTPU)) + FDIR_SET_FTYPE(IPV6_GTPU_EH); + else if (FDIR_CHK_FTYPE(IPV4_GRE_IPV4_GTPU)) + FDIR_SET_FTYPE(IPV4_GRE_IPV4_GTPU_EH); + else if (FDIR_CHK_FTYPE(IPV4_GRE_IPV6_GTPU)) + FDIR_SET_FTYPE(IPV4_GRE_IPV4_GTPU_EH); + else if (FDIR_CHK_FTYPE(IPV6_GRE_IPV4_GTPU)) + FDIR_SET_FTYPE(IPV6_GRE_IPV4_GTPU_EH); + else if (FDIR_CHK_FTYPE(IPV6_GRE_IPV6_GTPU)) + FDIR_SET_FTYPE(IPV6_GRE_IPV4_GTPU_EH); if (hdr->field_selector) input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK; - conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH; + if (conf->ttype != ICE_FDIR_TUNNEL_TYPE_GTPOGRE) + FDIR_SET_TTYPE(GTPU_EH); break; case VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN: rawh = (u8 *)hdr->buffer; - if (input->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_GTPU) - input->flow_type = - ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_DW; - else - input->flow_type = - ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH_DW; + if (FDIR_CHK_FTYPE(IPV4_GTPU)) + FDIR_SET_FTYPE(IPV4_GTPU_EH_DW); + else if (FDIR_CHK_FTYPE(IPV6_GTPU)) + FDIR_SET_FTYPE(IPV6_GTPU_EH_DW); + else if (FDIR_CHK_FTYPE(IPV4_GRE_IPV4_GTPU)) + FDIR_SET_FTYPE(IPV4_GRE_IPV4_GTPU_EH_DW); + else if (FDIR_CHK_FTYPE(IPV4_GRE_IPV6_GTPU)) + FDIR_SET_FTYPE(IPV4_GRE_IPV4_GTPU_EH_DW); + else if (FDIR_CHK_FTYPE(IPV6_GRE_IPV4_GTPU)) + FDIR_SET_FTYPE(IPV6_GRE_IPV4_GTPU_EH_DW); + else if (FDIR_CHK_FTYPE(IPV6_GRE_IPV6_GTPU)) + FDIR_SET_FTYPE(IPV6_GRE_IPV4_GTPU_EH_DW); if (hdr->field_selector) input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK; - conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH; + if (conf->ttype != ICE_FDIR_TUNNEL_TYPE_GTPOGRE) + FDIR_SET_TTYPE(GTPU_EH); break; case VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP: rawh = (u8 *)hdr->buffer; - if (input->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_GTPU) - input->flow_type = - ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_UP; - else - input->flow_type = - ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH_UP; + if (FDIR_CHK_FTYPE(IPV4_GTPU)) + FDIR_SET_FTYPE(IPV4_GTPU_EH_UP); + else if (FDIR_CHK_FTYPE(IPV6_GTPU)) + FDIR_SET_FTYPE(IPV6_GTPU_EH_UP); + else if (FDIR_CHK_FTYPE(IPV4_GRE_IPV4_GTPU)) + FDIR_SET_FTYPE(IPV4_GRE_IPV4_GTPU_EH_UP); + else if (FDIR_CHK_FTYPE(IPV4_GRE_IPV6_GTPU)) + FDIR_SET_FTYPE(IPV4_GRE_IPV4_GTPU_EH_UP); + else if (FDIR_CHK_FTYPE(IPV6_GRE_IPV4_GTPU)) + FDIR_SET_FTYPE(IPV6_GRE_IPV4_GTPU_EH_UP); + else if (FDIR_CHK_FTYPE(IPV6_GRE_IPV6_GTPU)) + FDIR_SET_FTYPE(IPV6_GRE_IPV4_GTPU_EH_UP); if (hdr->field_selector) input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK; - conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH; + if (conf->ttype != ICE_FDIR_TUNNEL_TYPE_GTPOGRE) + FDIR_SET_TTYPE(GTPU_EH); break; case VIRTCHNL_PROTO_HDR_ECPRI: rawh = (u8 *)hdr->buffer; @@ -1225,6 +2913,64 @@ ice_vc_fdir_parse_pattern(struct ice_vf *vf, input->ecpri_data.pc_id = *(__force __be16 *)(&rawh[4]); break; + case VIRTCHNL_PROTO_HDR_L2TPV2: + rawh = (u8 *)hdr->buffer; + if (FDIR_CHK_FTYPE(IPV4_UDP)) + FDIR_SET_FTYPE(IPV4_L2TPV2); + else if (FDIR_CHK_FTYPE(IPV6_UDP)) + FDIR_SET_FTYPE(IPV6_L2TPV2); + + pos = 0; + input->l2tpv2_data.flags_version = + *(__force __be16 *)(&rawh[pos]); + pos += 2; + + flags_version = + be16_to_cpu(input->l2tpv2_data.flags_version); + if (flags_version & ICE_L2TPV2_FLAGS_CTRL) { + if (FDIR_CHK_FTYPE(IPV4_L2TPV2)) + FDIR_SET_FTYPE(IPV4_L2TPV2_CONTROL); + else if (FDIR_CHK_FTYPE(IPV6_L2TPV2)) + FDIR_SET_FTYPE(IPV6_L2TPV2_CONTROL); + } + + if (flags_version & ICE_L2TPV2_FLAGS_LEN) { + input->l2tpv2_data.length = + *(__force __be16 *)(&rawh[pos]); + pos += 2; + } + + input->l2tpv2_data.tunnel_id = + *(__force __be16 *)(&rawh[pos]); + pos += 2; + + input->l2tpv2_data.session_id = + *(__force __be16 *)(&rawh[pos]); + pos += 2; + + if (flags_version & ICE_L2TPV2_FLAGS_SEQ) { + input->l2tpv2_data.ns = + *(__force __be16 *)(&rawh[pos]); + pos += 2; + + input->l2tpv2_data.nr = + *(__force __be16 *)(&rawh[pos]); + pos += 2; + } + /* get l2tpv2 offset */ + if (flags_version & ICE_L2TPV2_FLAGS_OFF) { + input->l2tpv2_data.offset_size = + *(__force __be16 *)(&rawh[pos]); + } + + conf->ttype = ICE_FDIR_TUNNEL_TYPE_L2TPV2; + break; + case VIRTCHNL_PROTO_HDR_PPP: + if (FDIR_CHK_FTYPE(IPV4_L2TPV2)) + FDIR_SET_FTYPE(IPV4_L2TPV2_PPP); + else if (FDIR_CHK_FTYPE(IPV6_L2TPV2)) + FDIR_SET_FTYPE(IPV6_L2TPV2_PPP); + break; default: dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n", hdr->type, vf->vf_id); @@ -1332,8 +3078,10 @@ ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; int ret; - if (!ice_vc_validate_pattern(vf, proto)) - return -EINVAL; + /* For Protocol Agnostic Flow Offloading case only */ + if (!ice_vc_fdir_is_raw_flow(proto)) + if (!ice_vc_validate_pattern(vf, proto)) + return -EINVAL; ret = ice_vc_fdir_parse_pattern(vf, fltr, conf); if (ret) @@ -1362,30 +3110,7 @@ ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a, if (conf_a->ttype != conf_b->ttype) return false; - if (a->flow_type != b->flow_type) - return false; - if (memcmp(&a->ip, &b->ip, sizeof(a->ip))) - return false; - if (memcmp(&a->mask, &b->mask, sizeof(a->mask))) - return false; - if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data))) - return false; - if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask))) - return false; - if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data))) - return false; - if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask))) - return false; - if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data))) - return false; - if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask))) - return false; - if (memcmp(&a->ecpri_data, &b->ecpri_data, sizeof(a->ecpri_data))) - return false; - if (memcmp(&a->ecpri_mask, &b->ecpri_mask, sizeof(a->ecpri_mask))) - return false; - - return true; + return ice_fdir_comp_rules_extended(a, b); } /** @@ -1491,6 +3216,64 @@ static void ice_vc_fdir_flush_entry(struct ice_vf *vf) } } +/** + * ice_vc_fdir_add_del_raw - write raw flow filter rule into hardware + * @vf: pointer to the VF info + * @conf: FDIR configuration for each filter + * @add: true implies add rule, false implies del rules + * + * Return: 0 on success, and other on error. + */ +static int ice_vc_fdir_add_del_raw(struct ice_vf *vf, + struct virtchnl_fdir_fltr_conf *conf, + bool add) +{ + struct ice_fdir_fltr *input = &conf->input; + struct ice_vsi *vsi, *ctrl_vsi; + struct ice_fltr_desc desc; + struct device *dev; + struct ice_pf *pf; + struct ice_hw *hw; + int ret; + u8 *pkt; + + pf = vf->pf; + dev = ice_pf_to_dev(pf); + hw = &pf->hw; + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id); + return -EINVAL; + } + + input->dest_vsi = vsi->idx; + input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW; + + ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; + if (!ctrl_vsi) { + dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id); + return -EINVAL; + } + + pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL); + if (!pkt) + return -ENOMEM; + + memcpy(pkt, conf->pkt_buf, conf->pkt_len); + + ice_fdir_get_prgm_desc(hw, input, &desc, add); + + ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); + if (ret) + goto err_free_pkt; + + return 0; + +err_free_pkt: + devm_kfree(dev, pkt); + return ret; +} + /** * ice_vc_fdir_write_fltr - write filter rule into hardware * @vf: pointer to the VF info @@ -1508,7 +3291,6 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf, struct ice_fdir_fltr *input = &conf->input; struct ice_vsi *vsi, *ctrl_vsi; struct ice_fltr_desc desc; - enum ice_status status; struct device *dev; struct ice_pf *pf; struct ice_hw *hw; @@ -1538,8 +3320,7 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf, return -ENOMEM; ice_fdir_get_prgm_desc(hw, input, &desc, add); - status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); - ret = ice_status_to_errno(status); + ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); if (ret) { dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n", vf->vf_id, input->flow_type); @@ -1603,15 +3384,16 @@ ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, union ice_32b_rx_flex_desc *rx_desc) { struct ice_pf *pf = ctrl_vsi->back; + struct ice_vf *vf = ctrl_vsi->vf; struct ice_vf_fdir_ctx *ctx_done; struct ice_vf_fdir_ctx *ctx_irq; struct ice_vf_fdir *fdir; unsigned long flags; struct device *dev; - struct ice_vf *vf; int ret; - vf = &pf->vf[ctrl_vsi->vf_id]; + if (WARN_ON(!vf)) + return; fdir = &vf->fdir; ctx_done = &fdir->ctx_done; @@ -1658,11 +3440,16 @@ static void ice_vf_fdir_dump_info(struct ice_vf *vf) hw = &pf->hw; dev = ice_pf_to_dev(pf); vf_vsi = ice_get_vf_vsi(vf); + if (!vf_vsi) { + dev_dbg(dev, "VF %d: invalid VSI pointer\n", vf->vf_id); + return; + } + vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx); fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num)); fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num)); - dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x", + dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x\n", vf->vf_id, (fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S, (fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S, @@ -1752,7 +3539,12 @@ err_exit: static int ice_fdir_is_tunnel(enum ice_fdir_tunnel_type ttype) { - return ttype == ICE_FDIR_TUNNEL_TYPE_ECPRI; + return (ttype == ICE_FDIR_TUNNEL_TYPE_GRE_INNER || + ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_INNER || + ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER || + ttype == ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER || + ttype == ICE_FDIR_TUNNEL_TYPE_ECPRI || + ttype == ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER); } /** @@ -1894,15 +3686,16 @@ err_exit: */ void ice_flush_fdir_ctx(struct ice_pf *pf) { - int i; + struct ice_vf *vf; + unsigned int bkt; if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state)) return; - ice_for_each_vf(pf, i) { + mutex_lock(&pf->vfs.table_lock); + ice_for_each_vf(pf, bkt, vf) { struct device *dev = ice_pf_to_dev(pf); enum virtchnl_fdir_prgm_status status; - struct ice_vf *vf = &pf->vf[i]; struct ice_vf_fdir_ctx *ctx; unsigned long flags; int ret; @@ -1956,6 +3749,7 @@ err_exit: ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); } + mutex_unlock(&pf->vfs.table_lock); } /** @@ -2015,6 +3809,167 @@ static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf) spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); } +/** + * ice_vc_parser_fv_check_diff - check two parsed FDIR profile fv context + * @fv_a: struct of parsed FDIR profile field vector + * @fv_b: struct of parsed FDIR profile field vector + * + * Check if the two parsed FDIR profile field vector context are different, + * including proto_id, offset and mask. + * + * Return: true on differnet, false on otherwise. + */ +static bool ice_vc_parser_fv_check_diff(struct ice_parser_fv *fv_a, + struct ice_parser_fv *fv_b) +{ + return (fv_a->proto_id != fv_b->proto_id || + fv_a->offset != fv_b->offset || + fv_a->msk != fv_b->msk); +} + +/** + * ice_vc_parser_fv_save - save parsed FDIR profile fv context + * @fv: struct of parsed FDIR profile field vector + * @fv_src: parsed FDIR profile field vector context to save + * + * Save the parsed FDIR profile field vector context, including proto_id, + * offset and mask. + */ +static void ice_vc_parser_fv_save(struct ice_parser_fv *fv, + struct ice_parser_fv *fv_src) +{ + fv->proto_id = fv_src->proto_id; + fv->offset = fv_src->offset; + fv->msk = fv_src->msk; +} + +/** + * ice_vc_add_fdir_raw - add a raw FDIR filter for VF + * @vf: pointer to the VF info + * @conf: FDIR configuration for each filter + * @stat: pointer to the VIRTCHNL_OP_ADD_FDIR_FILTER + * @len: length of the stat + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_add_fdir_raw(struct ice_vf *vf, + struct virtchnl_fdir_fltr_conf *conf, + struct virtchnl_fdir_add *stat, int len) +{ + struct ice_vsi *vf_vsi, *ctrl_vsi; + enum virtchnl_status_code v_ret; + struct ice_fdir_prof_info *pi; + struct ice_pf *pf = vf->pf; + int ret, ptg, id, i; + struct device *dev; + struct ice_hw *hw; + bool fv_found; + + dev = ice_pf_to_dev(pf); + hw = &pf->hw; + + id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX); + ptg = hw->blk[ICE_BLK_FD].xlt1.t[id]; + + v_ret = VIRTCHNL_STATUS_SUCCESS; + vf_vsi = ice_get_vf_vsi(vf); + if (!vf_vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + dev_err(dev, "Can not get FDIR vf_vsi for VF %d\n", vf->vf_id); + goto err_exit; + } + + ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; + if (!ctrl_vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + dev_err(dev, "Can not get FDIR ctrl_vsi for VF %d\n", + vf->vf_id); + goto err_exit; + } + + fv_found = false; + + /* Check if profile info already existed, then update the counter */ + pi = &vf->fdir_prof_info[ptg]; + if (pi->fdir_active_cnt != 0) { + for (i = 0; i < ICE_MAX_FV_WORDS; i++) + if (ice_vc_parser_fv_check_diff(&pi->prof.fv[i], + &conf->prof->fv[i])) + break; + if (i == ICE_MAX_FV_WORDS) { + fv_found = true; + pi->fdir_active_cnt++; + } + } + + /* HW profile setting is only required for the first time */ + if (!fv_found) { + ret = ice_flow_set_hw_prof(hw, vf_vsi->idx, + ctrl_vsi->idx, conf->prof, + ICE_BLK_FD); + + if (ret) + goto err_free_conf; + } + + ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id); + if (ret) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + dev_dbg(dev, "VF %d: insert FDIR list failed\n", + vf->vf_id); + goto err_free_conf; + } + + ret = ice_vc_fdir_set_irq_ctx(vf, conf, + VIRTCHNL_OP_ADD_FDIR_FILTER); + if (ret) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + dev_dbg(dev, "VF %d: set FDIR context failed\n", + vf->vf_id); + goto err_rem_entry; + } + + ret = ice_vc_fdir_add_del_raw(vf, conf, true); + if (ret) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + ice_dev_err_errno(dev, ret, "VF %d: adding FDIR raw flow rule failed", + vf->vf_id); + goto err_clr_irq; + } + + /* Save parsed profile fv info of the FDIR rule for the first time */ + if (!fv_found) { + for (i = 0; i < conf->prof->fv_num; i++) + ice_vc_parser_fv_save(&pi->prof.fv[i], + &conf->prof->fv[i]); + pi->fdir_active_cnt = 1; + } + + return 0; + +err_clr_irq: + ice_vc_fdir_clear_irq_ctx(vf); +err_rem_entry: + ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); +err_free_conf: + if (conf->parser_ena) + conf->parser_ena = false; + kfree(conf->prof); + kfree(conf->pkt_buf); + kfree(conf); +err_exit: + ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret, + (u8 *)stat, len); + kfree(stat); + return ret; +} + /** * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer * @vf: pointer to the VF info @@ -2046,8 +4001,8 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) ret = ice_vf_start_ctrl_vsi(vf); if (ret && (ret != -EEXIST)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; - dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n", - vf->vf_id, ret); + ice_dev_err_errno(dev, ret, "Init FDIR for VF %d failed", + vf->vf_id); goto err_exit; } @@ -2074,6 +4029,10 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) goto err_free_conf; } + /* For Protocol Agnostic Flow Offloading case only */ + if (conf->parser_ena) + return ice_vc_add_fdir_raw(vf, conf, stat, len); + if (fltr->validate_only) { v_ret = VIRTCHNL_STATUS_SUCCESS; stat->status = VIRTCHNL_FDIR_SUCCESS; @@ -2088,8 +4047,9 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) if (ret) { v_ret = VIRTCHNL_STATUS_SUCCESS; stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT; - dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n", - vf->vf_id, ret); + ice_dev_err_errno(dev, ret, + "VF %d: FDIR input set configure failed", + vf->vf_id); goto err_free_conf; } @@ -2122,8 +4082,8 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) if (ret) { v_ret = VIRTCHNL_STATUS_SUCCESS; stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; - dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n", - vf->vf_id, ret); + ice_dev_err_errno(dev, ret, "VF %d: writing FDIR rule failed", + vf->vf_id); goto err_clr_irq; } @@ -2144,6 +4104,93 @@ err_exit: return ret; } +/** + * ice_vc_del_fdir_raw - delete a raw FDIR filter for VF + * @vf: pointer to the VF info + * @conf: FDIR configuration for each filter + * @stat: pointer to the VIRTCHNL_OP_DEL_FDIR_FILTER + * @len: length of the stat + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_del_fdir_raw(struct ice_vf *vf, + struct virtchnl_fdir_fltr_conf *conf, + struct virtchnl_fdir_del *stat, int len) +{ + struct ice_vsi *vf_vsi, *ctrl_vsi; + enum ice_block blk = ICE_BLK_FD; + enum virtchnl_status_code v_ret; + struct ice_fdir_prof_info *pi; + struct ice_pf *pf = vf->pf; + struct device *dev; + struct ice_hw *hw; + u16 vsi_num; + int ptg; + int ret; + int id; + + dev = ice_pf_to_dev(pf); + hw = &pf->hw; + + id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX); + ptg = hw->blk[ICE_BLK_FD].xlt1.t[id]; + + ret = ice_vc_fdir_add_del_raw(vf, conf, false); + if (ret) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + ice_dev_err_errno(dev, ret, "VF %d: deleting FDIR raw flow rule failed", + vf->vf_id); + goto err_del_tmr; + } + + vf_vsi = ice_get_vf_vsi(vf); + if (!vf_vsi) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + dev_err(dev, "Can not get FDIR vf_vsi for VF %d\n", vf->vf_id); + goto err_exit; + } + + ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; + if (!ctrl_vsi) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + dev_err(dev, "Can not get FDIR ctrl_vsi for VF %d\n", + vf->vf_id); + goto err_exit; + } + + pi = &vf->fdir_prof_info[ptg]; + if (pi->fdir_active_cnt != 0) { + pi->fdir_active_cnt--; + /* Remove the profile id flow if no active FDIR rule left */ + if (!pi->fdir_active_cnt) { + vsi_num = ice_get_hw_vsi_num(hw, ctrl_vsi->idx); + ice_rem_prof_id_flow(hw, blk, vsi_num, id); + + vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx); + ice_rem_prof_id_flow(hw, blk, vsi_num, id); + } + } + + kfree(conf->prof); + kfree(conf->pkt_buf); + conf->parser_ena = false; + kfree(stat); + + return ret; + +err_del_tmr: + ice_vc_fdir_clear_irq_ctx(vf); +err_exit: + ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret, + (u8 *)stat, len); + kfree(stat); + return ret; +} + /** * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer * @vf: pointer to the VF info @@ -2209,13 +4256,17 @@ int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg) goto err_exit; } + /* For Protocol Agnostic Flow Offloading case only */ + if (conf->parser_ena) + return ice_vc_del_fdir_raw(vf, conf, stat, len); + is_tun = ice_fdir_is_tunnel(conf->ttype); ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun); if (ret) { v_ret = VIRTCHNL_STATUS_SUCCESS; stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; - dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n", - vf->vf_id, ret); + ice_dev_err_errno(dev, ret, "VF %d: writing FDIR rule failed", + vf->vf_id); goto err_del_tmr; } @@ -2252,6 +4303,7 @@ void ice_vf_fdir_init(struct ice_vf *vf) spin_lock_init(&fdir->ctx_lock); fdir->ctx_irq.flags = 0; fdir->ctx_done.flags = 0; + ice_vc_fdir_reset_cnt_all(fdir); } /** diff --git a/drivers/thirdparty/ice/ice_virtchnl_fdir.h b/drivers/thirdparty/ice/ice_virtchnl_fdir.h index eaf5d8359c70..7b3f08ed8f66 100644 --- a/drivers/thirdparty/ice/ice_virtchnl_fdir.h +++ b/drivers/thirdparty/ice/ice_virtchnl_fdir.h @@ -6,6 +6,7 @@ struct ice_vf; struct ice_pf; +struct ice_vsi; enum ice_fdir_ctx_stat { ICE_FDIR_CTX_READY, @@ -50,8 +51,8 @@ ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, union ice_32b_rx_flex_desc *rx_desc); void ice_flush_fdir_ctx(struct ice_pf *pf); #else -static inline -void ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, union ice_32b_rx_flex_desc *rx_desc) { } +static inline void +ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, union ice_32b_rx_flex_desc *rx_desc) { } static inline void ice_flush_fdir_ctx(struct ice_pf *pf) { } #endif /* CONFIG_PCI_IOV */ #endif /* _ICE_VIRTCHNL_FDIR_H_ */ diff --git a/drivers/thirdparty/ice/ice_virtchnl_fsub.c b/drivers/thirdparty/ice/ice_virtchnl_fsub.c new file mode 100644 index 000000000000..8201966d30ae --- /dev/null +++ b/drivers/thirdparty/ice/ice_virtchnl_fsub.c @@ -0,0 +1,865 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#include "ice.h" +#include "ice_base.h" +#include "ice_lib.h" + +#define to_fltr_conf_from_desc(p) \ + container_of(p, struct ice_flow_sub_conf, fsub_fltr) + +struct ice_flow_sub_fltr { + struct list_head fltr_node; + struct ice_adv_lkup_elem *list; + struct ice_adv_rule_info rule_info; + struct ice_rule_query_data rule_data; +}; + +struct ice_flow_sub_conf { + u32 flow_id; + struct ice_flow_sub_fltr fsub_fltr; +}; + +/** + * ice_vc_fsub_param_check + * @vf: pointer to the VF structure + * @vsi_id: VF relative VSI ID + * + * Check for the valid VSI ID, PF's state and VF's state + * + * Return: 0 on success, and -EINVAL on error. + */ +static int +ice_vc_fsub_param_check(struct ice_vf *vf, u16 vsi_id) +{ + struct device *dev; + struct ice_pf *pf; + + pf = vf->pf; + dev = ice_pf_to_dev(pf); + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + dev_dbg(dev, "The state is not activeted for VF: %d\n", + vf->vf_id); + return -EPERM; + } + + if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FSUB_PF)) { + dev_dbg(dev, "Invalid VF capability flag for VF: %d\n", + vf->vf_id); + return -EACCES; + } + + if (vsi_id != vf->lan_vsi_num) { + dev_dbg(dev, "Incorrect vsi_id for VF: %d\n", vf->vf_id); + return -EEXIST; + } + + if (!ice_vc_isvalid_vsi_id(vf, vsi_id)) { + dev_dbg(dev, "Invalid vsi_id for VF: %d\n", vf->vf_id); + return -EINVAL; + } + + if (!ice_get_vf_vsi(vf)) { + dev_dbg(dev, "Get vsi failed for VF: %d\n", vf->vf_id); + return -ENOENT; + } + + if (!vf->trusted) { + dev_dbg(dev, "It is not trusted for VF: %d\n", vf->vf_id); + return -EACCES; + } + + return 0; +} + +/** + * is_zero_buffer + * @buffer: pointer to the input buffer + * @size: size of the input buffer + * + * Detect the input buffer if it is zero or not + * + * Returns true if buffer contains only zeroes, false otherwise. + */ +static bool +is_zero_buffer(const u8 *buffer, int size) +{ + int i; + + for (i = 0; i < size; i++) { + if (buffer[i] != 0) + return false; + } + + return true; +} + +/** + * ice_vc_parse_fsub_pattern + * @vf: pointer to the VF info + * @fltr: virtual channel add cmd buffer + * @conf: fsub filter configuration + * @lkups_cnt: num of entries in the fsub lkups array + * + * Parse the virtual channel fsub's pattern and store them into @list + * and @rule_info + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_parse_fsub_pattern(struct ice_vf *vf, + struct virtchnl_flow_sub *fltr, + struct ice_flow_sub_conf *conf, + u16 *lkups_cnt) +{ + struct ice_adv_rule_info *rule_info = &conf->fsub_fltr.rule_info; + struct ice_adv_lkup_elem *list = conf->fsub_fltr.list; + struct virtchnl_proto_hdrs *proto = &fltr->proto_hdrs; + enum ice_sw_tunnel_type tun_type = ICE_NON_TUN; + bool ipv6_valid = false; + bool ipv4_valid = false; + bool udp_valid = false; + bool tcp_valid = false; + struct ice_vsi *vsi; + struct device *dev; + struct ice_pf *pf; + int i, count; + u16 idx = 0; + + pf = vf->pf; + dev = ice_pf_to_dev(pf); + + vsi = ice_get_main_vsi(pf); + if (!vsi) { + dev_dbg(dev, "Get main vsi for VF %d failed\n", vf->vf_id); + return -EINVAL; + } + + count = proto->count - VIRTCHNL_MAX_NUM_PROTO_HDRS; + if (count > VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK || count < 0) { + dev_dbg(dev, "Invalid protocol count: 0x%x for VF %d\n", + proto->count, vf->vf_id); + return -EINVAL; + } + + for (i = 0; i < count; i++) { + struct virtchnl_proto_hdr_w_msk *hdr = + &proto->proto_hdr_w_msk[i]; + struct vlan_hdr *vlan_spec, *vlan_mask; + struct ipv6hdr *ip6_spec, *ip6_mask; + struct ethhdr *eth_spec, *eth_mask; + struct tcphdr *tcp_spec, *tcp_mask; + struct udphdr *udp_spec, *udp_mask; + struct iphdr *ip4_spec, *ip4_mask; + + switch (hdr->type) { + case VIRTCHNL_PROTO_HDR_IPV4: + ipv4_valid = true; + break; + case VIRTCHNL_PROTO_HDR_IPV6: + ipv6_valid = true; + break; + case VIRTCHNL_PROTO_HDR_UDP: + udp_valid = true; + break; + case VIRTCHNL_PROTO_HDR_TCP: + tcp_valid = true; + break; + default: + break; + } + + if (is_zero_buffer(hdr->buffer_spec, + sizeof(hdr->buffer_spec)) || + is_zero_buffer(hdr->buffer_mask, + sizeof(hdr->buffer_mask))) { + if (hdr->type == VIRTCHNL_PROTO_HDR_ETH) { + /** + * make sure to include PF's MAC address + * when adding FSUB filter + */ + struct ice_ether_hdr *h; + struct ice_ether_hdr *m; + + list[idx].type = ICE_MAC_OFOS; + + h = &list[idx].h_u.eth_hdr; + m = &list[idx].m_u.eth_hdr; + + ether_addr_copy(h->dst_addr, + vsi->netdev->dev_addr); + eth_broadcast_addr(m->dst_addr); + + idx++; + } + + continue; + } + + switch (hdr->type) { + case VIRTCHNL_PROTO_HDR_ETH: + { + struct ice_ether_hdr *h; + struct ice_ether_hdr *m; + + eth_spec = (struct ethhdr *)hdr->buffer_spec; + eth_mask = (struct ethhdr *)hdr->buffer_mask; + + list[idx].type = ICE_MAC_OFOS; + + h = &list[idx].h_u.eth_hdr; + m = &list[idx].m_u.eth_hdr; + if (!is_zero_ether_addr(eth_mask->h_dest)) { + if (!ether_addr_equal(eth_spec->h_dest, vsi->netdev->dev_addr)) + return -EINVAL; + + ether_addr_copy(h->dst_addr, + eth_spec->h_dest); + ether_addr_copy(m->dst_addr, + eth_mask->h_dest); + } else { + /** + * make sure to include PF's MAC address + * when adding FSUB filter + */ + ether_addr_copy(h->dst_addr, + vsi->netdev->dev_addr); + eth_broadcast_addr(m->dst_addr); + } + + if (!is_zero_ether_addr(eth_mask->h_source)) { + ether_addr_copy(h->src_addr, + eth_spec->h_source); + ether_addr_copy(m->src_addr, + eth_mask->h_source); + } + + idx++; + + if (eth_mask->h_proto) { + list[idx].type = ICE_ETYPE_OL; + list[idx].h_u.ethertype.ethtype_id = + eth_spec->h_proto; + list[idx].m_u.ethertype.ethtype_id = + eth_mask->h_proto; + idx++; + } + + break; + } + case VIRTCHNL_PROTO_HDR_IPV4: + { + ip4_spec = (struct iphdr *)hdr->buffer_spec; + ip4_mask = (struct iphdr *)hdr->buffer_mask; + + list[idx].type = ICE_IPV4_OFOS; + + if (ip4_mask->saddr) { + list[idx].h_u.ipv4_hdr.src_addr = + ip4_spec->saddr; + list[idx].m_u.ipv4_hdr.src_addr = + ip4_mask->saddr; + } + + if (ip4_mask->daddr) { + list[idx].h_u.ipv4_hdr.dst_addr = + ip4_spec->daddr; + list[idx].m_u.ipv4_hdr.dst_addr = + ip4_mask->daddr; + } + + if (ip4_mask->ttl) { + list[idx].h_u.ipv4_hdr.time_to_live = + ip4_spec->ttl; + list[idx].m_u.ipv4_hdr.time_to_live = + ip4_mask->ttl; + } + + if (ip4_mask->protocol) { + if ((ip4_spec->protocol & + ip4_mask->protocol) == + ICE_IPV4_PROTO_NVGRE) + tun_type = ICE_SW_TUN_AND_NON_TUN; + + list[idx].h_u.ipv4_hdr.protocol = + ip4_spec->protocol; + list[idx].m_u.ipv4_hdr.protocol = + ip4_mask->protocol; + } + + if (ip4_mask->tos) { + list[idx].h_u.ipv4_hdr.tos = + ip4_spec->tos; + list[idx].m_u.ipv4_hdr.tos = + ip4_mask->tos; + } + + idx++; + + break; + } + case VIRTCHNL_PROTO_HDR_IPV6: + { + struct ice_ipv6_hdr *h; + struct ice_ipv6_hdr *m; + + ip6_spec = (struct ipv6hdr *)hdr->buffer_spec; + ip6_mask = (struct ipv6hdr *)hdr->buffer_mask; + + list[idx].type = ICE_IPV6_OFOS; + + h = &list[idx].h_u.ipv6_hdr; + m = &list[idx].m_u.ipv6_hdr; + + if (!is_zero_buffer(ip6_mask->saddr.s6_addr, + sizeof(ip6_mask->saddr))) { + memcpy(h->src_addr, + ip6_spec->saddr.in6_u.u6_addr8, + sizeof(ip6_spec->saddr)); + memcpy(m->src_addr, + ip6_mask->saddr.in6_u.u6_addr8, + sizeof(ip6_mask->saddr)); + } + + if (!is_zero_buffer(ip6_mask->daddr.s6_addr, + sizeof(ip6_mask->daddr))) { + memcpy(h->dst_addr, + ip6_spec->daddr.in6_u.u6_addr8, + sizeof(ip6_spec->daddr)); + memcpy(m->dst_addr, + ip6_mask->daddr.in6_u.u6_addr8, + sizeof(ip6_mask->daddr)); + } + + if (ip6_mask->nexthdr) { + h->next_hdr = ip6_spec->nexthdr; + m->next_hdr = ip6_mask->nexthdr; + } + + if (ip6_mask->hop_limit) { + h->hop_limit = ip6_spec->hop_limit; + m->hop_limit = ip6_mask->hop_limit; + } + + if (ip6_mask->priority || ip6_mask->flow_lbl[0]) { + struct ice_le_ver_tc_flow vtf_s, vtf_m; + + vtf_s.u.fld.version = 0; + vtf_s.u.fld.flow_label = 0; + vtf_s.u.fld.tc = + ((u8)(ip6_spec->priority) << 4) | + (ip6_spec->flow_lbl[0] >> 4); + h->be_ver_tc_flow = cpu_to_be32(vtf_s.u.val); + + vtf_m.u.fld.version = 0; + vtf_m.u.fld.flow_label = 0; + vtf_m.u.fld.tc = + ((u8)(ip6_mask->priority) << 4) | + (ip6_mask->flow_lbl[0] >> 4); + m->be_ver_tc_flow = cpu_to_be32(vtf_m.u.val); + } + + idx++; + + break; + } + case VIRTCHNL_PROTO_HDR_UDP: + { + udp_spec = (struct udphdr *)hdr->buffer_spec; + udp_mask = (struct udphdr *)hdr->buffer_mask; + + list[idx].type = ICE_UDP_ILOS; + + if (udp_mask->source) { + list[idx].h_u.l4_hdr.src_port = + udp_spec->source; + list[idx].m_u.l4_hdr.src_port = + udp_mask->source; + } + + if (udp_mask->dest) { + list[idx].h_u.l4_hdr.dst_port = + udp_spec->dest; + list[idx].m_u.l4_hdr.dst_port = + udp_mask->dest; + } + + idx++; + + break; + } + case VIRTCHNL_PROTO_HDR_TCP: + { + tcp_spec = (struct tcphdr *)hdr->buffer_spec; + tcp_mask = (struct tcphdr *)hdr->buffer_mask; + + list[idx].type = ICE_TCP_IL; + + if (tcp_mask->source) { + list[idx].h_u.l4_hdr.src_port = + tcp_spec->source; + list[idx].m_u.l4_hdr.src_port = + tcp_mask->source; + } + + if (tcp_mask->dest) { + list[idx].h_u.l4_hdr.dst_port = + tcp_spec->dest; + list[idx].m_u.l4_hdr.dst_port = + tcp_mask->dest; + } + + idx++; + + break; + } + case VIRTCHNL_PROTO_HDR_S_VLAN: + { + vlan_spec = (struct vlan_hdr *)hdr->buffer_spec; + vlan_mask = (struct vlan_hdr *)hdr->buffer_mask; + + list[idx].type = ICE_VLAN_OFOS; + + if (vlan_mask->h_vlan_TCI) { + list[idx].h_u.vlan_hdr.vlan = + vlan_spec->h_vlan_TCI; + list[idx].m_u.vlan_hdr.vlan = + vlan_mask->h_vlan_TCI; + } + + idx++; + + break; + } + default: + dev_err(dev, "Invalid header type 0x:%x for VF %d\n", + hdr->type, vf->vf_id); + return -EINVAL; + } + } + + if (tun_type == ICE_NON_TUN) { + if (ipv4_valid && tcp_valid) + tun_type = ICE_SW_IPV4_TCP; + else if (ipv4_valid && udp_valid) + tun_type = ICE_SW_IPV4_UDP; + else if (ipv6_valid && tcp_valid) + tun_type = ICE_SW_IPV6_TCP; + else if (ipv6_valid && udp_valid) + tun_type = ICE_SW_IPV6_UDP; + } + + rule_info->tun_type = tun_type; + rule_info->rx = 1; + rule_info->add_dir_lkup = true; + rule_info->priority = ICE_FSUB_PRI_BASE - fltr->priority; + + *lkups_cnt = idx; + + return 0; +} + +/** + * ice_vc_parse_fsub_action + * @vf: pointer to the VF info + * @fltr: virtual channel add cmd buffer + * @conf: fsub filter configuration + * + * Parse the virtual channel fsub's action and store them into @rule_info + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_parse_fsub_action(struct ice_vf *vf, + struct virtchnl_flow_sub *fltr, + struct ice_flow_sub_conf *conf) +{ + struct ice_adv_rule_info *rule_info = &conf->fsub_fltr.rule_info; + struct virtchnl_filter_action_set *as = &fltr->actions; + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vsi *vsi; + u32 reg, rxq_id = 0; + u16 base_queue = 0; + int i; + + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + dev_dbg(dev, "Get vsi for VF %d failed\n", vf->vf_id); + return -EINVAL; + } + + if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) { + dev_dbg(dev, "Invalid action numbers: 0x%x for VF %d\n", + as->count, vf->vf_id); + return -EINVAL; + } + + /* fsub filter default action is to VF */ + rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI; + + for (i = 0; i < as->count; i++) { + struct virtchnl_filter_action *action = &as->actions[i]; + + switch (action->type) { + case VIRTCHNL_ACTION_DROP: + break; + case VIRTCHNL_ACTION_QUEUE: + rule_info->sw_act.fltr_act = ICE_FWD_TO_Q; + rxq_id = action->act_conf.queue.index; + break; + case VIRTCHNL_ACTION_Q_REGION: + rule_info->sw_act.fltr_act = ICE_FWD_TO_QGRP; + rxq_id = action->act_conf.queue.index; + rule_info->sw_act.qgrp_size = + action->act_conf.queue.region; + break; + default: + dev_dbg(dev, "Invalid action type 0x:%x for VF %d\n", + action->type, vf->vf_id); + break; + } + } + + rule_info->sw_act.vsi_handle = vsi->idx; + rule_info->sw_act.src = rule_info->sw_act.vsi_handle; + rule_info->sw_act.flag = ICE_FLTR_RX; + + if (rule_info->sw_act.fltr_act != ICE_FWD_TO_VSI) { + reg = rd32(&vf->pf->hw, PFLAN_RX_QALLOC); + if (reg & PFLAN_RX_QALLOC_VALID_M) { + base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M; + } else { + dev_dbg(dev, "Failed to get Rx base queue index"); + return -EINVAL; + } + + rule_info->sw_act.fwd_id.q_id = + vsi->rxq_map[rxq_id] + base_queue; + } + + return 0; +} + +/** + * ice_vc_fsub_insert_entry + * @vf: pointer to the VF info + * @conf: SWITCH configuration for each filter + * @id: pointer to ID value allocated by driver + * + * Insert SWITCH conf entry into list and allocate ID for this filter + * + * Return: 0 true success, and other on error. + */ +static int +ice_vc_fsub_insert_entry(struct ice_vf *vf, + struct ice_flow_sub_conf *conf, + u32 *id) +{ + struct ice_flow_sub_fltr *fsub_fltr = &conf->fsub_fltr; + int i; + + /* alloc ID corresponding with conf */ + i = idr_alloc(&vf->fsub.fsub_rule_idr, conf, 0, + ICE_FSUB_MAX_FLTRS, GFP_KERNEL); + if (i < 0) + return i; + *id = i; + + list_add(&fsub_fltr->fltr_node, &vf->fsub.fsub_rule_list); + return 0; +} + +/** + * ice_vc_fsub_lookup_entry - lookup SWITCH conf entry by ID value + * @vf: pointer to the VF info + * @id: filter rule's ID + * + * Return: NULL on error, and other on success. + */ +static struct ice_flow_sub_conf * +ice_vc_fsub_lookup_entry(struct ice_vf *vf, u32 id) +{ + return idr_find(&vf->fsub.fsub_rule_idr, id); +} + +/** + * ice_vc_fsub_remove_entry - remove SWITCH conf entry by ID value + * @vf: pointer to the VF info + * @conf: SWITCH configuration for each filter + * @id: filter rule's ID + */ +static void +ice_vc_fsub_remove_entry(struct ice_vf *vf, + struct ice_flow_sub_conf *conf, + u32 id) +{ + struct ice_flow_sub_fltr *fsub_fltr = &conf->fsub_fltr; + + idr_remove(&vf->fsub.fsub_rule_idr, id); + list_del(&fsub_fltr->fltr_node); +} + +/** + * ice_vf_fsub_init - init SWITCH resource for VF + * @vf: pointer to the VF info + */ +void ice_vf_fsub_init(struct ice_vf *vf) +{ + struct ice_vf_fsub *fsub = &vf->fsub; + + idr_init(&fsub->fsub_rule_idr); + INIT_LIST_HEAD(&fsub->fsub_rule_list); +} + +/** + * ice_vc_flow_sub_fltr - subscribe flow filter for VF by the msg buffer + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * Return: 0 on success, and other on error. + */ +int ice_vc_flow_sub_fltr(struct ice_vf *vf, u8 *msg) +{ + enum virtchnl_fsub_prgm_status status = + VIRTCHNL_FSUB_FAILURE_RULE_INVALID; + struct virtchnl_flow_sub *fltr = (struct virtchnl_flow_sub *)msg; + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + u32 v_opcode = VIRTCHNL_OP_FLOW_SUBSCRIBE; + struct virtchnl_flow_sub *stat = NULL; + struct ice_flow_sub_conf *conf; + struct ice_adv_lkup_elem *list; + struct device *dev; + struct ice_pf *pf; + u16 lkups_cnt = 0; + int lkups_num = 0; + int ret; + + pf = vf->pf; + dev = ice_pf_to_dev(pf); + + ret = ice_vc_fsub_param_check(vf, fltr->vsi_id); + if (ret) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id); + goto err_exit; + } + + conf = kzalloc(sizeof(*conf), GFP_KERNEL); + if (!conf) { + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id); + goto err_exit; + } + + /** + * reserve one more memory slot for ETH + * which may consume 2 lookup items + */ + lkups_num = fltr->proto_hdrs.count - VIRTCHNL_MAX_NUM_PROTO_HDRS + 1; + if (lkups_num < 1) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_dbg(dev, "Invalid fsub filter for VF %d\n", vf->vf_id); + goto err_free_conf; + } + + list = kzalloc(lkups_num * sizeof(*list), GFP_KERNEL); + if (!list) { + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + dev_dbg(dev, "Alloc list for VF %d failed\n", vf->vf_id); + goto err_free_conf; + } + conf->fsub_fltr.list = list; + + if (!ice_vc_validate_pattern(vf, &fltr->proto_hdrs)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + status = VIRTCHNL_FSUB_FAILURE_RULE_INVALID; + dev_dbg(dev, "Invalid FSUB filter from VF %d\n", vf->vf_id); + goto err_free; + } + + if (fltr->validate_only) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + status = VIRTCHNL_FSUB_SUCCESS; + goto err_free; + } + + ret = ice_vc_parse_fsub_pattern(vf, fltr, conf, &lkups_cnt); + if (ret) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + status = VIRTCHNL_FSUB_FAILURE_RULE_INVALID; + dev_dbg(dev, "Parse FSUB pattern from VF %d\n", vf->vf_id); + goto err_free; + } + + ret = ice_vc_parse_fsub_action(vf, fltr, conf); + if (ret) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + status = VIRTCHNL_FSUB_FAILURE_RULE_INVALID; + dev_dbg(dev, "Parse FSUB action from VF %d\n", vf->vf_id); + goto err_free; + } + + ret = ice_add_adv_rule(&pf->hw, conf->fsub_fltr.list, lkups_cnt, + &conf->fsub_fltr.rule_info, + &conf->fsub_fltr.rule_data); + if (ret) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + status = VIRTCHNL_FSUB_FAILURE_RULE_INVALID; + dev_dbg(dev, + "Subscribe flow rule failed from VF %d, ret = %08x\n", + vf->vf_id, ret); + goto err_free; + } + + ret = ice_vc_fsub_insert_entry(vf, conf, &conf->flow_id); + if (ret) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + status = VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE; + dev_dbg(dev, "VF %d: insert FSUB list failed\n", vf->vf_id); + goto err_free; + } + + fltr->flow_id = conf->flow_id; + + ret = ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, + (u8 *)fltr, sizeof(*fltr)); + + return ret; + +err_free: + kfree(conf->fsub_fltr.list); +err_free_conf: + kfree(conf); + +err_exit: + stat = kzalloc(sizeof(*stat), GFP_KERNEL); + if (!stat) { + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id); + ret = ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0); + return ret; + } + + stat->status = status; + ret = ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, + (u8 *)stat, sizeof(*stat)); + + kfree(stat); + return ret; +} + +/** + * ice_vc_flow_unsub_fltr - unsubscribe flow filter for VF by the msg buffer + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * Return: 0 on success, and other on error. + */ +int ice_vc_flow_unsub_fltr(struct ice_vf *vf, u8 *msg) +{ + enum virtchnl_fsub_prgm_status status = + VIRTCHNL_FSUB_FAILURE_RULE_INVALID; + struct virtchnl_flow_unsub *fltr = (struct virtchnl_flow_unsub *)msg; + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + u32 v_opcode = VIRTCHNL_OP_FLOW_UNSUBSCRIBE; + struct virtchnl_flow_unsub *stat = NULL; + struct ice_flow_sub_conf *conf; + struct device *dev; + struct ice_pf *pf; + int ret; + + pf = vf->pf; + dev = ice_pf_to_dev(pf); + + ret = ice_vc_fsub_param_check(vf, fltr->vsi_id); + if (ret) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id); + goto err_exit; + } + + conf = ice_vc_fsub_lookup_entry(vf, fltr->flow_id); + if (!conf) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + status = VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST; + dev_dbg(dev, "VF %d: FSUB invalid flow_id:0x%X\n", + vf->vf_id, fltr->flow_id); + goto err_exit; + } + + /* remove advance rule */ + ret = ice_rem_adv_rule_by_id(&pf->hw, &conf->fsub_fltr.rule_data); + if (ret) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + status = VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE; + dev_dbg(dev, "Delete FSUB filter from VF %d\n", vf->vf_id); + goto err_free; + } + + ice_vc_fsub_remove_entry(vf, conf, fltr->flow_id); + + ret = ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, + (u8 *)fltr, sizeof(*fltr)); + + kfree(conf->fsub_fltr.list); + kfree(conf); + return ret; + +err_free: + kfree(conf->fsub_fltr.list); + kfree(conf); + +err_exit: + stat = kzalloc(sizeof(*stat), GFP_KERNEL); + if (!stat) { + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id); + ret = ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0); + return ret; + } + + stat->status = status; + ret = ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, + (u8 *)stat, sizeof(*stat)); + + kfree(stat); + return ret; +} + +/** + * ice_vf_fsub_exit - destroy SWITCH resource for VF + * @vf: pointer to the VF info + */ +void ice_vf_fsub_exit(struct ice_vf *vf) +{ + struct ice_flow_sub_fltr *desc, *temp; + struct ice_rule_query_data rule; + struct ice_pf *pf = vf->pf; + struct device *dev; + + dev = ice_pf_to_dev(pf); + + list_for_each_entry_safe(desc, temp, &vf->fsub.fsub_rule_list, + fltr_node) { + struct ice_flow_sub_conf *conf = + to_fltr_conf_from_desc(desc); + int ret = 0; + + rule.rid = conf->fsub_fltr.rule_data.rid; + rule.rule_id = conf->fsub_fltr.rule_data.rule_id; + rule.vsi_handle = conf->fsub_fltr.rule_data.vsi_handle; + ret = ice_rem_adv_rule_by_id(&pf->hw, &rule); + if (ret) { + dev_dbg(dev, + "VF %d: Failed to unsub flow filter, rule_id = %d\n", + vf->vf_id, rule.rule_id); + } + + list_del(&desc->fltr_node); + kfree(conf->fsub_fltr.list); + kfree(conf); + } + + idr_destroy(&vf->fsub.fsub_rule_idr); +} diff --git a/drivers/thirdparty/ice/ice_virtchnl_fsub.h b/drivers/thirdparty/ice/ice_virtchnl_fsub.h new file mode 100644 index 000000000000..2430a7348ef4 --- /dev/null +++ b/drivers/thirdparty/ice/ice_virtchnl_fsub.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_VIRTCHNL_FSUB_H_ +#define _ICE_VIRTCHNL_FSUB_H_ + +struct ice_vf; +struct ice_pf; +struct ice_vsi; + +#define ICE_IPV4_PROTO_NVGRE 0x002F +#define ICE_FSUB_MAX_FLTRS 16384 +#define ICE_FSUB_PRI_BASE 6 + +/* VF FSUB information structure */ +struct ice_vf_fsub { + struct idr fsub_rule_idr; + struct list_head fsub_rule_list; +}; + +void ice_vf_fsub_init(struct ice_vf *vf); +int ice_vc_flow_sub_fltr(struct ice_vf *vf, u8 *msg); +int ice_vc_flow_unsub_fltr(struct ice_vf *vf, u8 *msg); +void ice_vf_fsub_exit(struct ice_vf *vf); +#endif /* _ICE_VIRTCHNL_FSUB_H_ */ diff --git a/drivers/thirdparty/ice/ice_virtchnl_pf.h b/drivers/thirdparty/ice/ice_virtchnl_pf.h deleted file mode 100644 index ba727a4c986e..000000000000 --- a/drivers/thirdparty/ice/ice_virtchnl_pf.h +++ /dev/null @@ -1,560 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (C) 2018-2021, Intel Corporation. */ - -#ifndef _ICE_VIRTCHNL_PF_H_ -#define _ICE_VIRTCHNL_PF_H_ -#include "ice.h" -#include "ice_virtchnl_fdir.h" -#include "ice_dcf.h" -#include "ice_vsi_vlan_ops.h" - -#define ICE_VIRTCHNL_SUPPORTED_QTYPES 2 - -/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */ -#define ICE_MAX_VLAN_PER_VF 8 -/* MAC filters: 1 is reserved for the VF's default/perm_addr/LAA MAC, 1 for - * broadcast, and 16 for additional unicast/multicast filters - */ -#define ICE_MAX_MACADDR_PER_VF 18 - -/* Malicious Driver Detection */ -#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10 -#define ICE_MDD_EVENTS_THRESHOLD 30 - -/* Static VF transaction/status register def */ -#define VF_DEVICE_STATUS 0xAA -#define VF_TRANS_PENDING_M 0x20 - -/* wait defines for polling PF_PCI_CIAD register status */ -#define ICE_PCI_CIAD_WAIT_COUNT 100 -#define ICE_PCI_CIAD_WAIT_DELAY_US 1 - -/* VF resource constraints */ -#define ICE_MAX_VF_COUNT 256 -#define ICE_MAX_QS_PER_VF 256 -/* Maximum number of queue pairs to configure by default for a VF */ -#define ICE_MAX_DFLT_QS_PER_VF 16 -#define ICE_MIN_QS_PER_VF 1 -#define ICE_NONQ_VECS_VF 1 -#define ICE_MAX_SCATTER_QS_PER_VF 16 -#define ICE_MAX_RSS_QS_PER_LARGE_VF 64 -#define ICE_MAX_RSS_QS_PER_VF 16 -#define ICE_NUM_VF_MSIX_MAX 65 -#define ICE_NUM_VF_MSIX_LARGE 33 -#define ICE_NUM_VF_MSIX_MED 17 -#define ICE_NUM_VF_MSIX_SMALL 5 -#define ICE_NUM_VF_MSIX_MULTIQ_MIN 3 -#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1) -#define ICE_MAX_VF_RESET_TRIES 40 -#define ICE_MAX_VF_RESET_SLEEP_MS 20 -#define ICE_MAX_IPSEC_CAPABLE_VF_ID 127 - -#define ice_for_each_vf(pf, i) \ - for ((i) = 0; (i) < (pf)->num_alloc_vfs; (i)++) - -/* Max number of flexible descriptor rxdid */ -#define ICE_FLEX_DESC_RXDID_MAX_NUM 64 - -/* Specific VF states */ -enum ice_vf_states { - ICE_VF_STATE_INIT = 0, /* PF is initializing VF */ - ICE_VF_STATE_ACTIVE, /* VF resources are allocated for use */ - ICE_VF_STATE_QS_ENA, /* VF queue(s) enabled */ - ICE_VF_STATE_DIS, - ICE_VF_STATE_MC_PROMISC, - ICE_VF_STATE_UC_PROMISC, - ICE_VF_STATES_NBITS -}; - -/* VF capabilities */ -enum ice_virtchnl_cap { - ICE_VIRTCHNL_VF_CAP_L2 = 0, - ICE_VIRTCHNL_VF_CAP_PRIVILEGE, -}; - -/* DDP package type */ -enum ice_pkg_type { - ICE_PKG_TYPE_UNKNOWN = 0, - ICE_PKG_TYPE_OS_DEFAULT, - ICE_PKG_TYPE_COMMS, - ICE_PKG_TYPE_WIRELESS_EDGE, - ICE_PKG_TYPE_GTP_OVER_GRE, - ICE_PKG_TYPE_END, -}; - -/* In ADQ, max 4 VSI's can be allocated per VF including primary VF VSI. - * These variables are used to store indices, ID's and number of queues - * for each VSI including that of primary VF VSI. Each Traffic class is - * termed as channel and each channel can in-turn have 4 queues which - * means max 16 queues overall per VF. - */ -struct ice_channel_vf { - u16 vsi_idx; /* index in PF struct for all channel VSIs */ - u16 vsi_num; /* HW (absolute) index of this VSI */ - u16 num_qps; /* number of queue pairs requested by user */ - u16 offset; - u64 max_tx_rate; /* Tx rate limiting for channels */ - - /* type of filter: dest/src/dest+src port */ - u32 fltr_type; -}; - -struct ice_time_mac { - unsigned long time_modified; - u8 addr[ETH_ALEN]; -}; - -/* VF MDD events print structure */ -struct ice_mdd_vf_events { - u16 count; /* total count of Rx|Tx events */ - /* count number of the last printed event */ - u16 last_printed; -}; - -/* The VF VLAN information controlled by DCF */ -struct ice_dcf_vlan_info { - struct ice_vlan outer_port_vlan; - u16 outer_stripping_tpid; - u8 outer_stripping_ena:1; - u8 applying:1; -}; - -#define ICE_HASH_IP_CTX_IP 0 -#define ICE_HASH_IP_CTX_IP_ESP 1 -#define ICE_HASH_IP_CTX_IP_UDP_ESP 2 -#define ICE_HASH_IP_CTX_IP_AH 3 -#define ICE_HASH_IP_CTX_IP_L2TPV3 4 -#define ICE_HASH_IP_CTX_IP_PFCP 5 -#define ICE_HASH_IP_CTX_IP_UDP 6 -#define ICE_HASH_IP_CTX_IP_TCP 7 -#define ICE_HASH_IP_CTX_IP_SCTP 8 -#define ICE_HASH_IP_CTX_MAX 9 - -struct ice_vf_hash_ip_ctx { - struct ice_rss_hash_cfg ctx[ICE_HASH_IP_CTX_MAX]; -}; - -#define ICE_HASH_GTPU_CTX_EH_IP 0 -#define ICE_HASH_GTPU_CTX_EH_IP_UDP 1 -#define ICE_HASH_GTPU_CTX_EH_IP_TCP 2 -#define ICE_HASH_GTPU_CTX_UP_IP 3 -#define ICE_HASH_GTPU_CTX_UP_IP_UDP 4 -#define ICE_HASH_GTPU_CTX_UP_IP_TCP 5 -#define ICE_HASH_GTPU_CTX_DW_IP 6 -#define ICE_HASH_GTPU_CTX_DW_IP_UDP 7 -#define ICE_HASH_GTPU_CTX_DW_IP_TCP 8 -#define ICE_HASH_GTPU_CTX_MAX 9 - -struct ice_vf_hash_gtpu_ctx { - struct ice_rss_hash_cfg ctx[ICE_HASH_GTPU_CTX_MAX]; -}; - -struct ice_vf_hash_ctx { - struct ice_vf_hash_ip_ctx v4; - struct ice_vf_hash_ip_ctx v6; - struct ice_vf_hash_gtpu_ctx ipv4; - struct ice_vf_hash_gtpu_ctx ipv6; -}; - -struct ice_vf; - -struct ice_vc_vf_ops { - int (*get_ver_msg)(struct ice_vf *vf, u8 *msg); - int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg); - void (*reset_vf)(struct ice_vf *vf); - int (*add_mac_addr_msg)(struct ice_vf *vf, u8 *msg); - int (*del_mac_addr_msg)(struct ice_vf *vf, u8 *msg); - int (*cfg_qs_msg)(struct ice_vf *vf, u8 *msg); - int (*ena_qs_msg)(struct ice_vf *vf, u8 *msg); - int (*dis_qs_msg)(struct ice_vf *vf, u8 *msg); - int (*request_qs_msg)(struct ice_vf *vf, u8 *msg); - int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg); - int (*config_rss_key)(struct ice_vf *vf, u8 *msg); - int (*config_rss_lut)(struct ice_vf *vf, u8 *msg); - int (*get_stats_msg)(struct ice_vf *vf, u8 *msg); - int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg); - int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg); - int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg); - int (*query_rxdid)(struct ice_vf *vf); - int (*get_rss_hena)(struct ice_vf *vf); - int (*set_rss_hena_msg)(struct ice_vf *vf, u8 *msg); - int (*ena_vlan_stripping)(struct ice_vf *vf); - int (*dis_vlan_stripping)(struct ice_vf *vf); -#ifdef HAVE_TC_SETUP_CLSFLOWER - int (*add_qch_msg)(struct ice_vf *vf, u8 *msg); - int (*add_switch_filter_msg)(struct ice_vf *vf, u8 *msg); - int (*del_switch_filter_msg)(struct ice_vf *vf, u8 *msg); - int (*del_qch_msg)(struct ice_vf *vf, u8 *msg); -#endif /* HAVE_TC_SETUP_CLSFLOWER */ - int (*rdma_msg)(struct ice_vf *vf, u8 *msg, u16 msglen); - int (*cfg_rdma_irq_map_msg)(struct ice_vf *vf, u8 *msg); - int (*clear_rdma_irq_map)(struct ice_vf *vf); - int (*dcf_vlan_offload_msg)(struct ice_vf *vf, u8 *msg); - int (*dcf_cmd_desc_msg)(struct ice_vf *vf, u8 *msg, u16 msglen); - int (*dcf_cmd_buff_msg)(struct ice_vf *vf, u8 *msg, u16 msglen); - int (*dis_dcf_cap)(struct ice_vf *vf); - int (*dcf_get_vsi_map)(struct ice_vf *vf); - int (*dcf_query_pkg_info)(struct ice_vf *vf); - int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add); - int (*add_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg); - int (*del_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg); - int (*get_max_rss_qregion)(struct ice_vf *vf); - int (*ena_qs_v2_msg)(struct ice_vf *vf, u8 *msg); - int (*dis_qs_v2_msg)(struct ice_vf *vf, u8 *msg); - int (*map_q_vector_msg)(struct ice_vf *vf, u8 *msg); - int (*get_offload_vlan_v2_caps)(struct ice_vf *vf); - int (*add_vlan_v2_msg)(struct ice_vf *vf, u8 *msg); - int (*remove_vlan_v2_msg)(struct ice_vf *vf, u8 *msg); - int (*ena_vlan_stripping_v2_msg)(struct ice_vf *vf, u8 *msg); - int (*dis_vlan_stripping_v2_msg)(struct ice_vf *vf, u8 *msg); - int (*ena_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg); - int (*dis_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg); -}; - -/* VF information structure */ -struct ice_vf { - struct ice_pf *pf; - - - u16 vf_id; /* VF ID in the PF space */ - u16 lan_vsi_idx; /* index into PF struct */ - u16 ctrl_vsi_idx; - struct ice_vf_fdir fdir; - struct ice_vf_hash_ctx hash_ctx; - /* first vector index of this VF in the PF space */ - int first_vector_idx; - struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */ - struct virtchnl_version_info vf_ver; - u32 driver_caps; /* reported by VF driver */ - u16 stag; /* VF Port Extender (PE) stag if used */ - struct virtchnl_ether_addr dev_lan_addr; - struct virtchnl_ether_addr hw_lan_addr; - struct ice_time_mac legacy_last_added_umac; - DECLARE_BITMAP(txq_ena, ICE_MAX_QS_PER_VF); - DECLARE_BITMAP(rxq_ena, ICE_MAX_QS_PER_VF); - struct ice_vlan port_vlan_info; /* Port VLAN ID, QoS, and TPID */ - struct virtchnl_vlan_caps vlan_v2_caps; - struct ice_dcf_vlan_info dcf_vlan_info; - u8 pf_set_mac:1; /* VF MAC address set by VMM admin */ - u8 trusted:1; - u8 spoofchk:1; -#ifdef HAVE_NDO_SET_VF_LINK_STATE - u8 link_forced:1; - u8 link_up:1; /* only valid if VF link is forced */ -#endif - /* VSI indices - actual VSI pointers are maintained in the PF structure - * When assigned, these will be non-zero, because VSI 0 is always - * the main LAN VSI for the PF. - */ - u16 lan_vsi_num; /* ID as used by firmware */ - unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */ - unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */ - DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */ - - u64 num_inval_msgs; /* number of continuous invalid msgs */ - u64 num_valid_msgs; /* number of valid msgs detected */ - unsigned long vf_caps; /* VF's adv. capabilities */ - u16 num_req_qs; /* num of queue pairs requested by VF */ - u16 num_mac; - u16 num_vf_qs; /* num of queue configured per VF */ - u8 vlan_strip_ena; /* Outer and Inner VLAN strip enable */ -#define ICE_INNER_VLAN_STRIP_ENA BIT(0) -#define ICE_OUTER_VLAN_STRIP_ENA BIT(1) - /* ADQ related variables */ - u8 adq_enabled; /* flag to enable ADQ */ - u8 adq_fltr_ena; /* flag to denote that ADQ filters are applied */ - u8 num_tc; - u16 num_dmac_chnl_fltrs; - struct ice_channel_vf ch[VIRTCHNL_MAX_ADQ_V2_CHANNELS]; - struct hlist_head tc_flower_fltr_list; - struct ice_mdd_vf_events mdd_rx_events; - struct ice_mdd_vf_events mdd_tx_events; - struct ice_repr *repr; - DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX); - struct ice_vc_vf_ops vc_ops; - -#if IS_ENABLED(CONFIG_NET_DEVLINK) - /* devlink port data */ - struct devlink_port devlink_port; -#endif /* CONFIG_NET_DEVLINK */ -}; - -/** - * ice_vc_get_max_chnl_tc_allowed - * @vf: pointer to the VF info - * - * This function returns max channel TC allowed depends upon "driver_caps" - */ -static inline u32 ice_vc_get_max_chnl_tc_allowed(struct ice_vf *vf) -{ - if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ_V2) - return VIRTCHNL_MAX_ADQ_V2_CHANNELS; - else - return VIRTCHNL_MAX_ADQ_CHANNELS; -} - -/** - * ice_vf_chnl_dmac_fltr_cnt - number of dmac based channel filters - * @vf: pointer to the VF info - */ -static inline u16 ice_vf_chnl_dmac_fltr_cnt(struct ice_vf *vf) -{ - return vf->num_dmac_chnl_fltrs; -} - - -#ifdef CONFIG_PCI_IOV -void ice_dump_all_vfs(struct ice_pf *pf); -struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf); -void ice_process_vflr_event(struct ice_pf *pf); -int ice_sriov_configure(struct pci_dev *pdev, int num_vfs); -int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); -int -ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi); - -void ice_free_vfs(struct ice_pf *pf); -void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event); - -/* VF configuration related iplink handlers */ -void ice_vc_notify_link_state(struct ice_pf *pf); -void ice_vc_notify_reset(struct ice_pf *pf); -void ice_vc_notify_vf_link_state(struct ice_vf *vf); -void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops); -void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops); -bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr); -bool ice_reset_vf(struct ice_vf *vf, bool is_vflr); -void ice_restore_all_vfs_msi_state(struct pci_dev *pdev); -bool -ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event, - u16 num_msg_proc, u16 num_msg_pending); - - -#ifdef IFLA_VF_VLAN_INFO_MAX -int -ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, - __be16 vlan_proto); -#else -int -ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos); -#endif - -#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE -int -ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, - int max_tx_rate); -#else -int ice_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate); -#endif - -int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted); - -#ifdef HAVE_NDO_SET_VF_LINK_STATE -int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state); -#endif - -int ice_check_vf_ready_for_cfg(struct ice_vf *vf); - -int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena); - -int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector, - u8 tc); - -void ice_set_vf_state_qs_dis(struct ice_vf *vf); -#ifdef HAVE_VF_STATS -int -ice_get_vf_stats(struct net_device *netdev, int vf_id, - struct ifla_vf_stats *vf_stats); -#endif /* HAVE_VF_STATS */ -bool ice_is_any_vf_in_promisc(struct ice_pf *pf); -void -ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event); -void ice_print_vfs_mdd_events(struct ice_pf *pf); -void ice_print_vf_rx_mdd_event(struct ice_vf *vf); -enum ice_pkg_type ice_pkg_name_to_type(struct ice_hw *hw); -bool ice_vc_validate_pattern(struct ice_vf *vf, - struct virtchnl_proto_hdrs *proto); -struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf); -int -ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, - enum virtchnl_status_code v_retval, u8 *msg, u16 msglen); -bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id); -bool ice_vf_is_port_vlan_ena(struct ice_vf *vf); -#else /* CONFIG_PCI_IOV */ -#if IS_ENABLED(CONFIG_NET_DEVLINK) -static inline struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf) -{ - return NULL; -} -#endif /* CONFIG_NET_DEVLINK */ -static inline void ice_dump_all_vfs(struct ice_pf *pf) { } -static inline void ice_process_vflr_event(struct ice_pf *pf) { } -static inline void ice_free_vfs(struct ice_pf *pf) { } -static inline -void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) { } -static inline void ice_vc_notify_link_state(struct ice_pf *pf) { } -static inline void ice_vc_notify_reset(struct ice_pf *pf) { } -static inline void ice_vc_notify_vf_link_state(struct ice_vf *vf) { } -static inline void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops) { } -static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf) -{ - return -EOPNOTSUPP; -} -static inline void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops) { } -static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf) { } -static inline -void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { } -static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { } -static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { } -static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { } -static inline bool -ice_is_malicious_vf(struct ice_pf __always_unused *pf, - struct ice_rq_event_info __always_unused *event, - u16 __always_unused num_msg_proc, - u16 __always_unused num_msg_pending) -{ - return false; -} - -static inline bool -ice_reset_all_vfs(struct ice_pf __always_unused *pf, - bool __always_unused is_vflr) -{ - return true; -} - -static inline bool -ice_reset_vf(struct ice_vf __always_unused *vf, bool __always_unused is_vflr) -{ - return true; -} - -static inline int -ice_sriov_configure(struct pci_dev __always_unused *pdev, - int __always_unused num_vfs) -{ - return -EOPNOTSUPP; -} - -static inline int -ice_set_vf_mac(struct net_device __always_unused *netdev, - int __always_unused vf_id, u8 __always_unused *mac) -{ - return -EOPNOTSUPP; -} - -static inline int -ice_get_vf_cfg(struct net_device __always_unused *netdev, - int __always_unused vf_id, - struct ifla_vf_info __always_unused *ivi) -{ - return -EOPNOTSUPP; -} - -#ifdef HAVE_NDO_SET_VF_TRUST -static inline int -ice_set_vf_trust(struct net_device __always_unused *netdev, - int __always_unused vf_id, bool __always_unused trusted) -{ - return -EOPNOTSUPP; -} -#endif /* HAVE_NDO_SET_VF_TRUST */ - -#ifdef IFLA_VF_VLAN_INFO_MAX -static inline int -ice_set_vf_port_vlan(struct net_device __always_unused *netdev, - int __always_unused vf_id, u16 __always_unused vid, - u8 __always_unused qos, __be16 __always_unused v_proto) -{ - return -EOPNOTSUPP; -} -#else -static inline int -ice_set_vf_port_vlan(struct net_device __always_unused *netdev, - int __always_unused vf_id, u16 __always_unused vid, - u8 __always_unused qos) -{ - return -EOPNOTSUPP; -} -#endif /* IFLA_VF_VLAN_INFO_MAX */ - -static inline int -ice_set_vf_spoofchk(struct net_device __always_unused *netdev, - int __always_unused vf_id, bool __always_unused ena) -{ - return -EOPNOTSUPP; -} - -#ifdef HAVE_NDO_SET_VF_LINK_STATE -static inline int -ice_set_vf_link_state(struct net_device __always_unused *netdev, - int __always_unused vf_id, int __always_unused link_state) -{ - return -EOPNOTSUPP; -} -#endif /* HAVE_NDO_SET_VF_LINK_STATE */ - -#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE -static inline int -ice_set_vf_bw(struct net_device __always_unused *netdev, - int __always_unused vf_id, int __always_unused min_tx_rate, - int __always_unused max_tx_rate) -#else -static inline int -ice_set_vf_bw(struct net_device __always_unused *netdev, - int __always_unused vf_id, int __always_unused max_tx_rate) -#endif -{ - return -EOPNOTSUPP; -} - -static inline int -ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf, - struct ice_q_vector __always_unused *q_vector, - u8 __always_unused tc) -{ - return 0; -} - -#ifdef HAVE_VF_STATS -static inline int -ice_get_vf_stats(struct net_device __always_unused *netdev, - int __always_unused vf_id, - struct ifla_vf_stats __always_unused *vf_stats) -{ - return -EOPNOTSUPP; -} -#endif /* HAVE_VF_STATS */ - -static inline bool ice_is_any_vf_in_promisc(struct ice_pf __always_unused *pf) -{ - return false; -} - -static inline enum ice_pkg_type ice_pkg_name_to_type(struct ice_hw *hw) -{ - return ICE_PKG_TYPE_UNKNOWN; -} - -static inline struct ice_vsi * -ice_vf_ctrl_vsi_setup(struct ice_vf __always_unused *vf) -{ - return NULL; -} - -static inline int -ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, - enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) -{ - return 0; -} - -static inline bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) -{ - return 0; -} -static inline bool ice_vf_is_port_vlan_ena(struct ice_vf __always_unused *vf) -{ - return false; -} -#endif /* CONFIG_PCI_IOV */ -#endif /* _ICE_VIRTCHNL_PF_H_ */ diff --git a/drivers/thirdparty/ice/ice_vlan.h b/drivers/thirdparty/ice/ice_vlan.h index 69b78750c1f5..f196a75ed49f 100644 --- a/drivers/thirdparty/ice/ice_vlan.h +++ b/drivers/thirdparty/ice/ice_vlan.h @@ -14,4 +14,6 @@ struct ice_vlan { enum ice_sw_fwd_act_type fwd_act; }; +#define ICE_VLAN(tpid, vid, prio, fwd_action) \ + ((struct ice_vlan){ tpid, vid, prio, fwd_action }) #endif /* _ICE_VLAN_H_ */ diff --git a/drivers/thirdparty/ice/ice_vlan_mode.c b/drivers/thirdparty/ice/ice_vlan_mode.c index 5cddf9ec042f..536270fb1d46 100644 --- a/drivers/thirdparty/ice/ice_vlan_mode.c +++ b/drivers/thirdparty/ice/ice_vlan_mode.c @@ -3,18 +3,19 @@ #include "ice_common.h" +#include "ice_ddp.h" /** * ice_pkg_get_supported_vlan_mode - chk if DDP supports Double VLAN mode (DVM) * @hw: pointer to the HW struct * @dvm: output variable to determine if DDP supports DVM(true) or SVM(false) */ -static enum ice_status +static int ice_pkg_get_supported_vlan_mode(struct ice_hw *hw, bool *dvm) { u16 meta_init_size = sizeof(struct ice_meta_init_section); struct ice_meta_init_section *sect; struct ice_buf_build *bld; - enum ice_status status; + int status; /* if anything fails, we assume there is no DVM support */ *dvm = false; @@ -23,7 +24,7 @@ ice_pkg_get_supported_vlan_mode(struct ice_hw *hw, bool *dvm) ICE_SID_RXPARSER_METADATA_INIT, meta_init_size, (void **)§); if (!bld) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* only need to read a single section */ sect->count = cpu_to_le16(1); @@ -59,14 +60,14 @@ ice_pkg_get_supported_vlan_mode(struct ice_hw *hw, bool *dvm) * * Get VLAN Mode Parameters (0x020D) */ -static enum ice_status +static int ice_aq_get_vlan_mode(struct ice_hw *hw, struct ice_aqc_get_vlan_mode *get_params) { struct ice_aq_desc desc; if (!get_params) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vlan_mode_parameters); @@ -89,7 +90,7 @@ ice_aq_get_vlan_mode(struct ice_hw *hw, static bool ice_aq_is_dvm_ena(struct ice_hw *hw) { struct ice_aqc_get_vlan_mode get_params = { 0 }; - enum ice_status status; + int status; status = ice_aq_get_vlan_mode(hw, &get_params); if (status) { @@ -134,7 +135,7 @@ static void ice_cache_vlan_mode(struct ice_hw *hw) */ static bool ice_pkg_supports_dvm(struct ice_hw *hw) { - enum ice_status status; + int status; bool pkg_supports_dvm; status = ice_pkg_get_supported_vlan_mode(hw, &pkg_supports_dvm); @@ -154,7 +155,7 @@ static bool ice_pkg_supports_dvm(struct ice_hw *hw) static bool ice_fw_supports_dvm(struct ice_hw *hw) { struct ice_aqc_get_vlan_mode get_vlan_mode = { 0 }; - enum ice_status status; + int status; /* If firmware returns success, then it supports DVM, else it only * supports SVM @@ -241,13 +242,13 @@ static struct ice_update_recipe_lkup_idx_params ice_dvm_dflt_recipes[] = { * ice_dvm_update_dflt_recipes - update default switch recipes in DVM * @hw: hardware structure used to update the recipes */ -static enum ice_status ice_dvm_update_dflt_recipes(struct ice_hw *hw) +static int ice_dvm_update_dflt_recipes(struct ice_hw *hw) { unsigned long i; for (i = 0; i < ARRAY_SIZE(ice_dvm_dflt_recipes); i++) { struct ice_update_recipe_lkup_idx_params *params; - enum ice_status status; + int status; params = &ice_dvm_dflt_recipes[i]; @@ -271,7 +272,7 @@ static enum ice_status ice_dvm_update_dflt_recipes(struct ice_hw *hw) * * Set VLAN Mode Parameters (0x020C) */ -static enum ice_status +static int ice_aq_set_vlan_mode(struct ice_hw *hw, struct ice_aqc_set_vlan_mode *set_params) { @@ -279,20 +280,20 @@ ice_aq_set_vlan_mode(struct ice_hw *hw, struct ice_aq_desc desc; if (!set_params) - return ICE_ERR_PARAM; + return -EINVAL; if (set_params->l2tag_prio_tagging > ICE_AQ_VLAN_PRIO_TAG_MAX) - return ICE_ERR_PARAM; + return -EINVAL; rdma_packet = set_params->rdma_packet; if (rdma_packet != ICE_AQ_SVM_VLAN_RDMA_PKT_FLAG_SETTING && rdma_packet != ICE_AQ_DVM_VLAN_RDMA_PKT_FLAG_SETTING) - return ICE_ERR_PARAM; + return -EINVAL; mng_vlan_prot_id = set_params->mng_vlan_prot_id; if (mng_vlan_prot_id != ICE_AQ_VLAN_MNG_PROTOCOL_ID_OUTER && mng_vlan_prot_id != ICE_AQ_VLAN_MNG_PROTOCOL_ID_INNER) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_vlan_mode_parameters); @@ -306,10 +307,10 @@ ice_aq_set_vlan_mode(struct ice_hw *hw, * ice_set_dvm - sets up software and hardware for double VLAN mode * @hw: pointer to the hardware structure */ -static enum ice_status ice_set_dvm(struct ice_hw *hw) +static int ice_set_dvm(struct ice_hw *hw) { struct ice_aqc_set_vlan_mode params = { 0 }; - enum ice_status status; + int status; params.l2tag_prio_tagging = ICE_AQ_VLAN_PRIO_TAG_OUTER_CTAG; params.rdma_packet = ICE_AQ_DVM_VLAN_RDMA_PKT_FLAG_SETTING; @@ -351,10 +352,10 @@ static enum ice_status ice_set_dvm(struct ice_hw *hw) * ice_set_svm - set single VLAN mode * @hw: pointer to the HW structure */ -static enum ice_status ice_set_svm(struct ice_hw *hw) +static int ice_set_svm(struct ice_hw *hw) { struct ice_aqc_set_vlan_mode *set_params; - enum ice_status status; + int status; status = ice_aq_set_port_params(hw->port_info, 0, false, false, false, NULL); if (status) { @@ -365,7 +366,7 @@ static enum ice_status ice_set_svm(struct ice_hw *hw) set_params = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*set_params), GFP_KERNEL); if (!set_params) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* default configuration for SVM configurations */ set_params->l2tag_prio_tagging = ICE_AQ_VLAN_PRIO_TAG_INNER_CTAG; @@ -384,7 +385,7 @@ static enum ice_status ice_set_svm(struct ice_hw *hw) * ice_set_vlan_mode * @hw: pointer to the HW structure */ -enum ice_status ice_set_vlan_mode(struct ice_hw *hw) +int ice_set_vlan_mode(struct ice_hw *hw) { if (!ice_is_dvm_supported(hw)) return 0; diff --git a/drivers/thirdparty/ice/ice_vlan_mode.h b/drivers/thirdparty/ice/ice_vlan_mode.h index 5072529c1e03..25b9553559df 100644 --- a/drivers/thirdparty/ice/ice_vlan_mode.h +++ b/drivers/thirdparty/ice/ice_vlan_mode.h @@ -9,7 +9,7 @@ struct ice_hw; bool ice_is_dvm_ena(struct ice_hw *hw); -enum ice_status ice_set_vlan_mode(struct ice_hw *hw); +int ice_set_vlan_mode(struct ice_hw *hw); void ice_post_pkg_dwnld_vlan_mode_cfg(struct ice_hw *hw); #endif /* _ICE_VLAN_MODE_H */ diff --git a/drivers/thirdparty/ice/ice_vsi_vlan_lib.c b/drivers/thirdparty/ice/ice_vsi_vlan_lib.c index 705e496b7f74..94c1e11b374c 100644 --- a/drivers/thirdparty/ice/ice_vsi_vlan_lib.c +++ b/drivers/thirdparty/ice/ice_vsi_vlan_lib.c @@ -19,7 +19,7 @@ static void print_invalid_tpid(struct ice_vsi *vsi, u16 tpid) * * Return true if the VLAN TPID is valid or if the VLAN TPID is 0 and the VLAN * VID is 0, which allows for non-zero VLAN filters with the specified VLAN TPID - * and untagged VLAN 0 filtersto be added to the prune list respectively. + * and untagged VLAN 0 filters to be added to the prune list respectively. */ static bool validate_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan) { @@ -39,22 +39,20 @@ static bool validate_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan) */ int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan) { - enum ice_status status; - int err = 0; + int err; if (!validate_vlan(vsi, vlan)) return -EINVAL; - status = ice_fltr_add_vlan(vsi, vlan); - if (status && status != ICE_ERR_ALREADY_EXISTS) { - err = -ENODEV; - dev_err(ice_pf_to_dev(vsi->back), "Failure Adding VLAN %d on VSI %i, status %s\n", - vlan->vid, vsi->vsi_num, ice_stat_str(status)); - } else { - vsi->num_vlan++; + err = ice_fltr_add_vlan(vsi, vlan); + if (err && err != -EEXIST) { + dev_err(ice_pf_to_dev(vsi->back), "Failure Adding VLAN %d on VSI %i, status %d\n", + vlan->vid, vsi->vsi_num, err); + return err; } - return err; + vsi->num_vlan++; + return 0; } /** @@ -65,24 +63,22 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan) int ice_vsi_del_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan) { struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; - int err = 0; + int err; dev = ice_pf_to_dev(pf); if (!validate_vlan(vsi, vlan)) return -EINVAL; - status = ice_fltr_remove_vlan(vsi, vlan); - if (!status) { + err = ice_fltr_remove_vlan(vsi, vlan); + if (!err) vsi->num_vlan--; - } else if (status != ICE_ERR_DOES_NOT_EXIST && - status != ICE_ERR_RESET_ONGOING) { - dev_err(dev, "Error removing VLAN %d on VSI %i error: %s\n", - vlan->vid, vsi->vsi_num, ice_stat_str(status)); - err = ice_status_to_errno(status); - } + else if (err == -ENOENT || err == -EBUSY) + err = 0; + else + dev_err(dev, "Error removing VLAN %d on VSI %i error: %d\n", + vlan->vid, vsi->vsi_num, err); return err; } @@ -95,8 +91,7 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) { struct ice_hw *hw = &vsi->back->hw; struct ice_vsi_ctx *ctxt; - enum ice_status status; - int err = 0; + int err; ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) @@ -114,12 +109,10 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); - status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - err = -EIO; + err = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (err) { + dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); goto out; } @@ -138,8 +131,7 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) { struct ice_hw *hw = &vsi->back->hw; struct ice_vsi_ctx *ctxt; - enum ice_status status; - int err = 0; + int err; /* do not allow modifying VLAN stripping when a port VLAN is configured * on this VSI @@ -167,12 +159,10 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); - status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %s aq_err %s\n", - ena, ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - err = -EIO; + err = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (err) { + dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %s\n", + ena, err, ice_aq_str(hw->adminq.sq_last_status)); goto out; } @@ -222,8 +212,7 @@ static int __ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, u16 pvid_info) struct ice_hw *hw = &vsi->back->hw; struct ice_aqc_vsi_props *info; struct ice_vsi_ctx *ctxt; - enum ice_status status; - int ret = 0; + int ret; ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) @@ -240,12 +229,10 @@ static int __ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, u16 pvid_info) info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | ICE_AQ_VSI_PROP_SW_VALID); - status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); - if (status) { - dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - ret = -EIO; + ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (ret) { + dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n", + ret, ice_aq_str(hw->adminq.sq_last_status)); goto out; } @@ -282,8 +269,8 @@ int ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan) static int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena) { struct ice_vsi_ctx *ctxt; - enum ice_status status; struct ice_pf *pf; + int status; if (!vsi) return -EINVAL; @@ -311,10 +298,9 @@ static int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena) status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL); if (status) { - netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %s, aq_err = %s\n", + netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %s\n", ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, - ice_stat_str(status), - ice_aq_str(pf->hw.adminq.sq_last_status)); + status, ice_aq_str(pf->hw.adminq.sq_last_status)); goto err_out; } @@ -325,7 +311,7 @@ static int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena) err_out: kfree(ctxt); - return -EIO; + return status; } int ice_vsi_ena_rx_vlan_filtering(struct ice_vsi *vsi) @@ -341,8 +327,7 @@ int ice_vsi_dis_rx_vlan_filtering(struct ice_vsi *vsi) static int ice_cfg_vlan_antispoof(struct ice_vsi *vsi, bool enable) { struct ice_vsi_ctx *ctx; - enum ice_status status; - int err = 0; + int err; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) @@ -358,15 +343,12 @@ static int ice_cfg_vlan_antispoof(struct ice_vsi *vsi, bool enable) ctx->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); - status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx VLAN anti-spoof %s for VSI %d, error %s\n", - enable ? "ON" : "OFF", vsi->vsi_num, - ice_stat_str(status)); - err = ice_status_to_errno(status); - } else { + err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL); + if (err) + dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx VLAN anti-spoof %s for VSI %d, error %d\n", + enable ? "ON" : "OFF", vsi->vsi_num, err); + else vsi->info.sec_flags = ctx->info.sec_flags; - } kfree(ctx); @@ -431,9 +413,8 @@ int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi, u16 tpid) { struct ice_hw *hw = &vsi->back->hw; struct ice_vsi_ctx *ctxt; - enum ice_status status; u8 tag_type; - int err = 0; + int err; /* do not allow modifying VLAN stripping when a port VLAN is configured * on this VSI @@ -459,15 +440,12 @@ int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi, u16 tpid) ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) & ICE_AQ_VSI_OUTER_TAG_TYPE_M)); - status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "update VSI for enabling outer VLAN stripping failed, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - err = -EIO; - } else { + err = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (err) + dev_err(ice_pf_to_dev(vsi->back), "update VSI for enabling outer VLAN stripping failed, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); + else vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags; - } kfree(ctxt); return err; @@ -492,8 +470,7 @@ int ice_vsi_dis_outer_stripping(struct ice_vsi *vsi) { struct ice_hw *hw = &vsi->back->hw; struct ice_vsi_ctx *ctxt; - enum ice_status status; - int err = 0; + int err; if (vsi->info.port_based_outer_vlan) return 0; @@ -510,15 +487,12 @@ int ice_vsi_dis_outer_stripping(struct ice_vsi *vsi) ctxt->info.outer_vlan_flags |= ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING << ICE_AQ_VSI_OUTER_VLAN_EMODE_S; - status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "update VSI for disabling outer VLAN stripping failed, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - err = -EIO; - } else { + err = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (err) + dev_err(ice_pf_to_dev(vsi->back), "update VSI for disabling outer VLAN stripping failed, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); + else vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags; - } kfree(ctxt); return err; @@ -547,9 +521,8 @@ int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid) { struct ice_hw *hw = &vsi->back->hw; struct ice_vsi_ctx *ctxt; - enum ice_status status; u8 tag_type; - int err = 0; + int err; if (vsi->info.port_based_outer_vlan) return 0; @@ -576,15 +549,12 @@ int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid) ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) & ICE_AQ_VSI_OUTER_TAG_TYPE_M); - status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "update VSI for enabling outer VLAN insertion failed, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - err = -EIO; - } else { + err = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (err) + dev_err(ice_pf_to_dev(vsi->back), "update VSI for enabling outer VLAN insertion failed, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); + else vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags; - } kfree(ctxt); return err; @@ -609,8 +579,7 @@ int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi) { struct ice_hw *hw = &vsi->back->hw; struct ice_vsi_ctx *ctxt; - enum ice_status status; - int err = 0; + int err; if (vsi->info.port_based_outer_vlan) return 0; @@ -631,15 +600,12 @@ int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi) ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) & ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M); - status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "update VSI for disabling outer VLAN insertion failed, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - err = -EIO; - } else { + err = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (err) + dev_err(ice_pf_to_dev(vsi->back), "update VSI for disabling outer VLAN insertion failed, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); + else vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags; - } kfree(ctxt); return err; @@ -671,9 +637,8 @@ __ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid) { struct ice_hw *hw = &vsi->back->hw; struct ice_vsi_ctx *ctxt; - enum ice_status status; u8 tag_type; - int err = 0; + int err; if (tpid_to_vsi_outer_vlan_type(tpid, &tag_type)) return -EINVAL; @@ -701,12 +666,10 @@ __ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid) cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID | ICE_AQ_VSI_PROP_SW_VALID); - status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "update VSI for setting outer port based VLAN failed, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - err = -EIO; + err = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (err) { + dev_err(ice_pf_to_dev(vsi->back), "update VSI for setting outer port based VLAN failed, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); } else { vsi->info.port_based_outer_vlan = ctxt->info.port_based_outer_vlan; vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags; diff --git a/drivers/thirdparty/ice/ice_vsi_vlan_lib.h b/drivers/thirdparty/ice/ice_vsi_vlan_lib.h index 60925e920fac..c5aa5d6e3567 100644 --- a/drivers/thirdparty/ice/ice_vsi_vlan_lib.h +++ b/drivers/thirdparty/ice/ice_vsi_vlan_lib.h @@ -9,9 +9,6 @@ struct ice_vsi; -#define ICE_VLAN(tpid, vid, prio, fwd_action) \ - ((struct ice_vlan){ tpid, vid, prio, fwd_action }) - int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); int ice_vsi_del_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); diff --git a/drivers/thirdparty/ice/ice_vsi_vlan_ops.c b/drivers/thirdparty/ice/ice_vsi_vlan_ops.c index 466c691d6848..bbfa64e35c08 100644 --- a/drivers/thirdparty/ice/ice_vsi_vlan_ops.c +++ b/drivers/thirdparty/ice/ice_vsi_vlan_ops.c @@ -76,6 +76,7 @@ void ice_vsi_init_vlan_ops(struct ice_vsi *vsi) case ICE_VSI_SWITCHDEV_CTRL: ice_pf_vsi_init_vlan_ops(vsi); break; + case ICE_VSI_ADI: case ICE_VSI_VF: ice_vf_vsi_init_vlan_ops(vsi); break; diff --git a/drivers/thirdparty/ice/ice_xlt_kb.c b/drivers/thirdparty/ice/ice_xlt_kb.c new file mode 100644 index 000000000000..d1ea3e6eb16b --- /dev/null +++ b/drivers/thirdparty/ice/ice_xlt_kb.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#include "ice_common.h" + +#define ICE_XLT_KB_TBL_OFF 12 +#define ICE_XLT_KB_TBL_ENTRY_SIZE 24 + +static void _xlt_kb_entry_dump(struct ice_hw *hw, + struct ice_xlt_kb_entry *entry, int idx) +{ + int i; + + dev_info(ice_hw_to_dev(hw), "key builder entry %d\n", idx); + dev_info(ice_hw_to_dev(hw), "\txlt1_ad_sel = %d\n", + entry->xlt1_ad_sel); + dev_info(ice_hw_to_dev(hw), "\txlt2_ad_sel = %d\n", + entry->xlt2_ad_sel); + + for (i = 0; i < ICE_XLT_KB_FLAG0_14_CNT; i++) + dev_info(ice_hw_to_dev(hw), "\tflg%d_sel = %d\n", i, + entry->flg0_14_sel[i]); + + dev_info(ice_hw_to_dev(hw), "\txlt1_md_sel = %d\n", + entry->xlt1_md_sel); + dev_info(ice_hw_to_dev(hw), "\txlt2_md_sel = %d\n", + entry->xlt2_md_sel); +} + +/** + * ice_xlt_kb_dump - dump a xlt key build info + * @hw: pointer to the hardware structure + * @kb: key build to dump + */ +void ice_xlt_kb_dump(struct ice_hw *hw, struct ice_xlt_kb *kb) +{ + int i; + + dev_info(ice_hw_to_dev(hw), "xlt1_pm = %d\n", kb->xlt1_pm); + dev_info(ice_hw_to_dev(hw), "xlt2_pm = %d\n", kb->xlt2_pm); + dev_info(ice_hw_to_dev(hw), "prof_id_pm = %d\n", kb->prof_id_pm); + dev_info(ice_hw_to_dev(hw), "flag15 low = 0x%08x\n", (u32)kb->flag15); + dev_info(ice_hw_to_dev(hw), "flag15 high = 0x%08x\n", + (u32)(kb->flag15 >> 32)); + + for (i = 0; i < ICE_XLT_KB_TBL_CNT; i++) + _xlt_kb_entry_dump(hw, &kb->entries[i], i); +} + +/** The function parses a 192 bits XLT Key Build entry with below format: + * BIT 0-31: reserved + * BIT 32-34: XLT1 AdSel (entry->xlt1_ad_sel) + * BIT 35-37: XLT2 AdSel (entry->xlt2_ad_sel) + * BIT 38-46: Flag 0 Select (entry->flg0_14_sel[0]) + * BIT 47-55: Flag 1 Select (entry->flg0_14_sel[1]) + * BIT 56-64: Flag 2 Select (entry->flg0_14_sel[2]) + * BIT 65-73: Flag 3 Select (entry->flg0_14_sel[3]) + * BIT 74-82: Flag 4 Select (entry->flg0_14_sel[4]) + * BIT 83-91: Flag 5 Select (entry->flg0_14_sel[5]) + * BIT 92-100: Flag 6 Select (entry->flg0_14_sel[6]) + * BIT 101-109:Flag 7 Select (entry->flg0_14_sel[7]) + * BIT 110-118:Flag 8 Select (entry->flg0_14_sel[8]) + * BIT 119-127:Flag 9 Select (entry->flg0_14_sel[9]) + * BIT 128-136:Flag 10 Select (entry->flg0_14_sel[10]) + * BIT 137-145:Flag 11 Select (entry->flg0_14_sel[11]) + * BIT 146-154:Flag 12 Select (entry->flg0_14_sel[12]) + * BIT 155-163:Flag 13 Select (entry->flg0_14_sel[13]) + * BIT 164-172:Flag 14 Select (entry->flg0_14_sel[14]) + * BIT 173-181:reserved + * BIT 182-186:XLT1 MdSel (entry->xlt1_md_sel) + * BIT 187-191:XLT2 MdSel (entry->xlt2_md_sel) + */ +static void _kb_entry_init(struct ice_xlt_kb_entry *entry, u8 *data) +{ + u64 d64 = *(u64 *)&data[4]; + + entry->xlt1_ad_sel = (u8)(d64 & 0x7); + entry->xlt2_ad_sel = (u8)((d64 >> 3) & 0x7); + entry->flg0_14_sel[0] = (u16)((d64 >> 6) & 0x1ff); + entry->flg0_14_sel[1] = (u16)((d64 >> 15) & 0x1ff); + entry->flg0_14_sel[2] = (u16)((d64 >> 24) & 0x1ff); + entry->flg0_14_sel[3] = (u16)((d64 >> 33) & 0x1ff); + entry->flg0_14_sel[4] = (u16)((d64 >> 42) & 0x1ff); + entry->flg0_14_sel[5] = (u16)((d64 >> 51) & 0x1ff); + + d64 = (*(u64 *)&data[11] >> 4); + entry->flg0_14_sel[6] = (u16)(d64 & 0x1ff); + entry->flg0_14_sel[7] = (u16)((d64 >> 9) & 0x1ff); + entry->flg0_14_sel[8] = (u16)((d64 >> 18) & 0x1ff); + entry->flg0_14_sel[9] = (u16)((d64 >> 27) & 0x1ff); + entry->flg0_14_sel[10] = (u16)((d64 >> 36) & 0x1ff); + entry->flg0_14_sel[11] = (u16)((d64 >> 45) & 0x1ff); + + d64 = (*(u64 *)&data[18] >> 2); + entry->flg0_14_sel[12] = (u16)(d64 & 0x1ff); + entry->flg0_14_sel[13] = (u16)((d64 >> 9) & 0x1ff); + entry->flg0_14_sel[14] = (u16)((d64 >> 18) & 0x1ff); + + entry->xlt1_md_sel = (u8)((d64 >> 36) & 0x1f); + entry->xlt2_md_sel = (u8)((d64 >> 41) & 0x1f); +} + +/** The function parses a 204 bytes XLT Key Build Table with below format: + * byte 0: XLT1 Partition Mode (kb->xlt1_pm) + * byte 1: XLT2 Partition Mode (kb->xlt2_pm) + * byte 2: Profile ID Partition Mode (kb->prof_id_pm) + * byte 3: reserved + * byte 4-11: Flag15 Mask (kb->flag15) + * byte 12-203:8 Key Build entries (kb->entries) + */ +static void _parse_kb_data(struct ice_hw *hw, struct ice_xlt_kb *kb, void *data) +{ + u8 *buf = (u8 *)data; + int i; + + kb->xlt1_pm = buf[0]; + kb->xlt2_pm = buf[1]; + kb->prof_id_pm = buf[2]; + + kb->flag15 = *(u64 *)&buf[4]; + for (i = 0; i < ICE_XLT_KB_TBL_CNT; i++) + _kb_entry_init(&kb->entries[i], + &buf[ICE_XLT_KB_TBL_OFF + + i * ICE_XLT_KB_TBL_ENTRY_SIZE]); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_xlt_kb_dump(hw, kb); +} + +static struct ice_xlt_kb *_xlt_kb_get(struct ice_hw *hw, u32 sect_type) +{ + struct ice_seg *seg = hw->seg; + struct ice_pkg_enum state; + struct ice_xlt_kb *kb; + void *data; + + if (!seg) + return NULL; + + kb = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*kb), GFP_KERNEL); + if (!kb) { + ice_debug(hw, ICE_DBG_PARSER, "failed to allocate memory for xlt key builder type %d.\n", + sect_type); + return NULL; + } + + memset(&state, 0, sizeof(state)); + data = ice_pkg_enum_section(seg, &state, sect_type); + if (!data) { + ice_debug(hw, ICE_DBG_PARSER, "failed to find section type %d.\n", + sect_type); + return NULL; + } + + _parse_kb_data(hw, kb, data); + + return kb; +} + +/** + * ice_xlt_kb_get_sw - create switch xlt key build + * @hw: pointer to the hardware structure + */ +struct ice_xlt_kb *ice_xlt_kb_get_sw(struct ice_hw *hw) +{ + return _xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_SW); +} + +/** + * ice_xlt_kb_get_acl - create acl xlt key build + * @hw: pointer to the hardware structure + */ +struct ice_xlt_kb *ice_xlt_kb_get_acl(struct ice_hw *hw) +{ + return _xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_ACL); +} + +/** + * ice_xlt_kb_get_fd - create fdir xlt key build + * @hw: pointer to the hardware structure + */ +struct ice_xlt_kb *ice_xlt_kb_get_fd(struct ice_hw *hw) +{ + return _xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_FD); +} + +/** + * ice_xlt_kb_get_rss - create rss xlt key build + * @hw: pointer to the hardware structure + */ +struct ice_xlt_kb *ice_xlt_kb_get_rss(struct ice_hw *hw) +{ + return _xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_RSS); +} + +/** + * ice_xlt_kb_flag_get - aggregate 64 bits packet flag into 16 bits xlt flag + * @kb: xlt key build + * @pkt_flag: 64 bits packet flag + */ +u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag) +{ + struct ice_xlt_kb_entry *entry = &kb->entries[0]; + u16 flg = 0; + int i; + + /* check flag 15 */ + if (kb->flag15 & pkt_flag) + flg = (u16)(1u << 15); + + /* check flag 0 - 14 */ + for (i = 0; i < 15; i++) { + /* only check first entry */ + u16 idx = (u16)(entry->flg0_14_sel[i] & 0x3f); + + if (pkt_flag & (1ul << idx)) + flg |= (u16)(1u << i); + } + + return flg; +} diff --git a/drivers/thirdparty/ice/ice_xlt_kb.h b/drivers/thirdparty/ice/ice_xlt_kb.h new file mode 100644 index 000000000000..146f70044b65 --- /dev/null +++ b/drivers/thirdparty/ice/ice_xlt_kb.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_XLT_KB_H_ +#define _ICE_XLT_KB_H_ + +#define ICE_XLT_KB_TBL_CNT 8 +#define ICE_XLT_KB_FLAG0_14_CNT 15 + +struct ice_xlt_kb_entry { + u8 xlt1_ad_sel; + u8 xlt2_ad_sel; + u16 flg0_14_sel[ICE_XLT_KB_FLAG0_14_CNT]; + u8 xlt1_md_sel; + u8 xlt2_md_sel; +}; + +struct ice_xlt_kb { + u8 xlt1_pm; + u8 xlt2_pm; + u8 prof_id_pm; + u64 flag15; + + struct ice_xlt_kb_entry entries[ICE_XLT_KB_TBL_CNT]; +}; + +void ice_xlt_kb_dump(struct ice_hw *hw, struct ice_xlt_kb *kb); +struct ice_xlt_kb *ice_xlt_kb_get_sw(struct ice_hw *hw); +struct ice_xlt_kb *ice_xlt_kb_get_acl(struct ice_hw *hw); +struct ice_xlt_kb *ice_xlt_kb_get_fd(struct ice_hw *hw); +struct ice_xlt_kb *ice_xlt_kb_get_rss(struct ice_hw *hw); +u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag); +#endif /* _ICE_XLT_KB_H */ diff --git a/drivers/thirdparty/ice/ice_xsk.c b/drivers/thirdparty/ice/ice_xsk.c index 43012bb9b115..30507f55db18 100644 --- a/drivers/thirdparty/ice/ice_xsk.c +++ b/drivers/thirdparty/ice/ice_xsk.c @@ -12,6 +12,7 @@ #include "ice_xsk.h" #include "ice_txrx.h" #include "ice_txrx_lib.h" +#include "ice_irq.h" #ifdef HAVE_AF_XDP_ZC_SUPPORT /** @@ -40,6 +41,7 @@ static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx) ice_clean_tx_ring(vsi->tx_rings[q_idx]); if (ice_is_xdp_ena_vsi(vsi)) ice_clean_tx_ring(vsi->xdp_rings[q_idx]); + ice_clean_rx_ring(vsi->rx_rings[q_idx]); } @@ -92,7 +94,7 @@ ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_ring *rx_ring, wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0); ice_flush(hw); - synchronize_irq(pf->msix_entries[v_idx + base].vector); + synchronize_irq(ice_get_irq_num(pf, v_idx + base)); } } @@ -218,7 +220,6 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) return -ENOMEM; qg_buf->num_txqs = 1; - tx_ring = vsi->tx_rings[q_idx]; rx_ring = vsi->rx_rings[q_idx]; q_vector = rx_ring->q_vector; @@ -236,7 +237,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) if (err) goto free_buf; ice_set_ring_xdp(xdp_ring); - xdp_ring->xsk_pool = ice_xsk_umem(xdp_ring); + xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring); } err = ice_vsi_cfg_rxq(rx_ring); @@ -367,36 +368,38 @@ static void ice_xsk_umem_dma_unmap(struct ice_vsi *vsi, struct xdp_umem *umem) #endif /** - * ice_xsk_umem_disable - disable a UMEM region + * ice_xsk_pool_disable - disable a buffer pool region * @vsi: Current VSI * @qid: queue ID * * Returns 0 on success, negative on failure */ -static int ice_xsk_umem_disable(struct ice_vsi *vsi, u16 qid) +static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid) { #ifdef HAVE_AF_XDP_NETDEV_UMEM #ifdef HAVE_NETDEV_BPF_XSK_POOL - struct xsk_buff_pool *umem = xsk_get_pool_from_qid(vsi->netdev, qid); + struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid); #else - struct xdp_umem *umem = xsk_get_pool_from_qid(vsi->netdev, qid); + struct xdp_umem *pool = xsk_get_pool_from_qid(vsi->netdev, qid); #endif #else - struct xdp_umem *umem; + struct xdp_umem *pool; if (!vsi->xsk_umems || qid >= vsi->num_xsk_umems) return -EINVAL; - umem = vsi->xsk_umems[qid]; + pool = vsi->xsk_umems[qid]; #endif - if (!umem) + if (!pool) return -EINVAL; + clear_bit(qid, vsi->af_xdp_zc_qps); + #ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL - ice_xsk_umem_dma_unmap(vsi, umem); + ice_xsk_umem_dma_unmap(vsi, pool); #else - xsk_pool_dma_unmap(umem, ICE_RX_DMA_ATTR); + xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR); #endif #ifndef HAVE_AF_XDP_NETDEV_UMEM @@ -407,26 +410,24 @@ static int ice_xsk_umem_disable(struct ice_vsi *vsi, u16 qid) } /** - * ice_xsk_umem_enable - enable a UMEM region + * ice_xsk_pool_enable - enable a buffer pool region * @vsi: Current VSI - * @umem: pointer to a requested UMEM region + * @pool: pointer to a requested buffer pool region * @qid: queue ID * * Returns 0 on success, negative on failure */ static int #ifdef HAVE_NETDEV_BPF_XSK_POOL -ice_xsk_umem_enable(struct ice_vsi *vsi, struct xsk_buff_pool *umem, u16 qid) +ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) #else -ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid) +ice_xsk_pool_enable(struct ice_vsi *vsi, struct xdp_umem *pool, u16 qid) #endif /* HAVE_NETDEV_BPF_XSK_POOL */ { #ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL struct xdp_umem_fq_reuse *reuseq; #endif -#ifndef HAVE_AF_XDP_NETDEV_UMEM int err; -#endif if (vsi->type != ICE_VSI_PF) return -EINVAL; @@ -444,7 +445,7 @@ ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid) if (vsi->xsk_umems && vsi->xsk_umems[qid]) return -EBUSY; - vsi->xsk_umems[qid] = umem; + vsi->xsk_umems[qid] = pool; vsi->num_xsk_umems_used++; #else if (qid >= vsi->netdev->real_num_rx_queues || @@ -457,31 +458,44 @@ ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid) if (!reuseq) return -ENOMEM; - xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq)); + xsk_reuseq_free(xsk_reuseq_swap(pool, reuseq)); - return ice_xsk_umem_dma_map(vsi, umem); + err = ice_xsk_umem_dma_map(vsi, pool); #else - return xsk_pool_dma_map(umem, ice_pf_to_dev(vsi->back), + err = xsk_pool_dma_map(pool, ice_pf_to_dev(vsi->back), ICE_RX_DMA_ATTR); #endif + + if (err) + return err; + + set_bit(qid, vsi->af_xdp_zc_qps); + + return 0; } /** - * ice_xsk_umem_setup - enable/disable a UMEM region depending on its state + * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state * @vsi: Current VSI - * @umem: UMEM to enable/associate to a ring, NULL to disable + * @pool: buffer pool to enable/associate to a ring, NULL to disable * @qid: queue ID * * Returns 0 on success, negative on failure */ #ifdef HAVE_NETDEV_BPF_XSK_POOL -int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xsk_buff_pool *umem, u16 qid) +int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) #else -int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid) +int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *pool, u16 qid) #endif { - bool if_running, umem_present = !!umem; - int ret = 0, umem_failure = 0; + bool if_running, pool_present = !!pool; + int ret = 0, pool_failure = 0; + + if (qid >= vsi->num_rxq || qid >= vsi->num_txq) { + netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n"); + pool_failure = -EINVAL; + goto failure; + } if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi); @@ -489,26 +503,27 @@ int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid) ret = ice_qp_dis(vsi, qid); if (ret) { netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret); - goto xsk_umem_if_up; + goto xsk_pool_if_up; } } - umem_failure = umem_present ? ice_xsk_umem_enable(vsi, umem, qid) : - ice_xsk_umem_disable(vsi, qid); + pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) : + ice_xsk_pool_disable(vsi, qid); -xsk_umem_if_up: +xsk_pool_if_up: if (if_running) { ret = ice_qp_ena(vsi, qid); - if (!ret && umem_present) + if (!ret && pool_present) napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi); else if (ret) netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret); } - if (umem_failure) { - netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d\n", - umem_present ? "en" : "dis", umem_failure); - return umem_failure; +failure: + if (pool_failure) { + netdev_err(vsi->netdev, "Could not %sable pool, error = %d\n", + pool_present ? "en" : "dis", pool_failure); + return pool_failure; } return ret; @@ -679,11 +694,11 @@ ice_alloc_buf_slow_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) } #endif /* !HAVE_MEM_TYPE_XSK_BUFF_POOL */ -/* +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL +/** * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers * @rx_ring: Rx ring * @count: The number of buffers to allocate - * @alloc: the function pointer to call for allocation * * This function allocates a number of Rx buffers from the fill ring * or the internal recycle mechanism and places them on the Rx ring. @@ -692,12 +707,11 @@ ice_alloc_buf_slow_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) * NOTE: this function header description doesn't do kdoc style * because of the function pointer creating problems. */ -#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL +bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, int count) +#else static bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, int count, bool (*alloc)(struct ice_ring *, struct ice_rx_buf *)) -#else -bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, int count) #endif { union ice_32b_rx_flex_desc *rx_desc; @@ -934,11 +948,11 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp) result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED; break; default: - bpf_warn_invalid_xdp_action(act); - /* fallthrough -- not supported action */ + bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); + fallthrough; /* not supported action */ case XDP_ABORTED: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - /* fallthrough -- handle aborts by dropping frame */ + fallthrough; /* handle aborts by dropping frame */ case XDP_DROP: result = ICE_XDP_CONSUMED; break; @@ -1302,12 +1316,14 @@ int ice_xsk_async_xmit(struct net_device *netdev, u32 queue_id) */ q_vector = ring->q_vector; if (!napi_if_scheduled_mark_missed(&q_vector->napi)) { +#if IS_ENABLED(CONFIG_NET_RX_BUSY_POLL) if (ice_ring_ch_enabled(vsi->rx_rings[queue_id]) && !ice_vsi_pkt_inspect_opt_ena(vsi)) #define ICE_BUSY_POLL_BUDGET 8 napi_busy_loop(q_vector->napi.napi_id, NULL, NULL, false, ICE_BUSY_POLL_BUDGET); else +#endif ice_trigger_sw_intr(&vsi->back->hw, q_vector); } diff --git a/drivers/thirdparty/ice/ice_xsk.h b/drivers/thirdparty/ice/ice_xsk.h index 63599776bac1..d1aa048571ce 100644 --- a/drivers/thirdparty/ice/ice_xsk.h +++ b/drivers/thirdparty/ice/ice_xsk.h @@ -4,7 +4,6 @@ #ifndef _ICE_XSK_H_ #define _ICE_XSK_H_ #include "ice_txrx.h" -#include "ice.h" #ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL #include #endif @@ -14,13 +13,15 @@ struct ice_vsi; #ifdef HAVE_AF_XDP_ZC_SUPPORT #ifdef CONFIG_XDP_SOCKETS #ifdef HAVE_NETDEV_BPF_XSK_POOL -int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xsk_buff_pool *umem, +int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid); #else int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid); #endif +#ifndef NO_XDP_QUERY_XSK_UMEM int ice_xsk_umem_query(struct ice_vsi *vsi, struct xdp_umem **umem, u16 qid); +#endif #ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL void ice_zca_free(struct zero_copy_allocator *zca, unsigned long handle); #endif @@ -41,10 +42,11 @@ void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring); void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring); #else static inline int -ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi, #ifdef HAVE_NETDEV_BPF_XSK_POOL +ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi, struct xsk_buff_pool __always_unused *pool, #else +ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi, struct xdp_umem __always_unused *umem, #endif u16 __always_unused qid) @@ -52,6 +54,7 @@ ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi, return -EOPNOTSUPP; } +#ifndef NO_XDP_QUERY_XSK_UMEM static inline int ice_xsk_umem_query(struct ice_vsi __always_unused *vsi, struct xdp_umem __always_unused **umem, @@ -59,6 +62,7 @@ ice_xsk_umem_query(struct ice_vsi __always_unused *vsi, { return -EOPNOTSUPP; } +#endif #ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL static inline void diff --git a/drivers/thirdparty/ice/idc_generic.h b/drivers/thirdparty/ice/idc_generic.h new file mode 100644 index 000000000000..8da50f991552 --- /dev/null +++ b/drivers/thirdparty/ice/idc_generic.h @@ -0,0 +1,191 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _IDC_GENERIC_H_ +#define _IDC_GENERIC_H_ + +/* Terminology + * mfd: multi function device/driver that maintains and shares the data for the + * mfd cell + * mfd cell: Device/driver that depends on mfd for its hw data + */ + +#include + +/* Unique names used to match and load mfd cells */ +#define IDC_MFD_CELL_NAME_RDMA "rdma" + +/* Unique ids used to match and load mfd cells */ +#define IDC_MFD_CELL_ID_RDMA_PF 0x1 +#define IDC_MFD_CELL_ID_RDMA_VF 0x2 +#define IDC_MFD_CELL_ID_MAX 0x3 + +/* TODO: Revisit and move to virtchnl method of versioning */ +/* Version info used to check for compatibility between mfd and mfd cell */ +#define IDC_MAJOR_VER 1 +#define IDC_MINOR_VER 1 + +#define IDC_QOS_MAX_USER_PRIORITY 8 +#define IDC_QOS_MAX_TC 8 + +/* Forward declarations */ +struct idc_mfd_data; + +/* Reset types */ +enum idc_reset_type { + IDC_FUN_RESET = 0, +}; + +enum idc_close_reason { + IDC_INTERFACE_DOWN, + IDC_HW_RESET_PENDING, +}; + +enum idc_event { + IDC_BEFORE_MTU_CHANGE, + IDC_AFTER_MTU_CHANGE, + IDC_BEFORE_TC_CHANGE, + IDC_AFTER_TC_CHANGE, + IDC_BEFORE_INTR_CHANGE, + IDC_AFTER_INTR_CHANGE, +}; + +/* Version info used to check for compatibility between mfd and mfd cells */ +struct idc_ver_info { + u16 major; + u16 minor; +}; + +/* QoS info */ +struct idc_qos_params { + u8 rel_bw[IDC_QOS_MAX_TC]; + u8 up2tc[IDC_QOS_MAX_USER_PRIORITY]; + u32 num_apps; + u8 num_tc; + u8 prio_type[IDC_QOS_MAX_TC]; + u64 tc_ctx[IDC_QOS_MAX_TC]; + u8 vport_relative_bw; + u8 vport_priority_type; +}; + +/* RDMA queue vector map info */ +struct idc_qv_info { + u32 v_idx; + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +struct idc_qvlist_info { + u32 num_vectors; + struct idc_qv_info qv_info[1]; +}; + +/* Following APIs are implemented by mfd and invoked by mfd cells */ +struct idc_mfd_ops { + /* Called by mfd cell to indicate probe finished */ + int (*probe_finished)(struct idc_mfd_data *mfd_data); + /* Called by mfd cell to indicate remove started */ + void (*remove_started)(struct idc_mfd_data *mfd_data); + /* Called by mfd cell to indicate remove finished */ + void (*remove_finished)(struct idc_mfd_data *mfd_data); + /* Used by mfd cell to request a reset on mfd */ + int (*request_reset)(struct idc_mfd_data *mfd_data, + enum idc_reset_type reset_type); + /* Used by mfd cell to send mailbox messages */ + int (*vc_send)(struct idc_mfd_data *mfd_data, u32 f_id, u8 *msg, + u16 len); + /* used by mfd cell to send map unmap vector mailbox message. This + * message uses a different vc opcode and so different callback other + * than vc_send + */ + int (*vc_queue_vec_map_unmap)(struct idc_mfd_data *mfd_data, + struct idc_qvlist_info *qvl_info, + bool map); +}; + +/* Following APIs are implemented by mfd cells and invoked by mfd */ +struct idc_mfd_cell_ops { + /* Why we have 'open' and when it is expected to be called: + * 1. symmetric set of API w.r.t close + * 2. To be invoked form driver initialization path, should be probe + * 3. To be invoked upon RESET complete + */ + int (*open)(struct idc_mfd_data *mfd_data); + + /* close function is to be called when the mfd cell needs to be + * quiesced. This can be for a variety of reasons (enumerated in the + * idc_close_reason enum struct). A call to close will only be + * followed by a call to either remove or open. No IDC calls from the + * mfd cell should be accepted until it is re-opened. + * + * The *reason* parameter is the reason for the call to close. This + * can be for any reason enumerated in the idc_close_reason struct. + * It's primary reason is for the mfd drivers bookkeeping and in + * case the mfd cell wants to perform any different tasks + * dictated by the reason. + */ + int (*close)(struct idc_mfd_data *mfd_data, + enum idc_close_reason reason); + /* Used by mfd to pass received mailbox messages to mfd cell */ + int (*vc_receive)(struct idc_mfd_data *mfd_data, u32 f_id, u8 *msg, + u16 len); + /* used by mfd to inform various software events */ + int (*event)(struct idc_mfd_data *mfd_data, enum idc_event event); +}; + +/* Structure representing idc multi function device data Initial steps for + * sharing info is listed below + * 1.mfd registers shared data with OS + * 2.mfd cell registers platform_drv with OS + * 3.mfd cell probe is called by OS + * Match of id_entry of mfd and id_table of mfd cell determines + * which probe has to be called + * 4 probe_finished func of mfd will be called by mfd cell probe + * 5.open function of mfd cell is called by mfd + *.6 close function of mfd cell is called by mfd when mfd goes down + */ +struct idc_mfd_data { + /* Below fields are initialized by mfd. Done before calling + * mfd_add_devices OS API + */ + /* PCI device corresponding to main function Used by mfd cell + * for dma memory allocations and BAR4 access + */ + struct pci_dev *pdev; + /* Linear address corresponding to BAR0 of underlying + * pci_device. Used by mfd cell for register space access + */ + u8 __iomem *hw_addr; + + /* Vector info to be used by mfd cell */ + struct msix_entry *msix_entries; + /* Number of vectors reserved for the mfd cell */ + u16 msix_count; + /* Used by mfd cell for version checks */ + struct idc_ver_info mfd_ver; + /* mfd function type pf or vf */ + int func_type; + /* net device interface owned by mfd */ + struct net_device *netdev; + /* TC info */ + struct idc_qos_params qos_info; + /* Function pointers to be initialized by mfd and called by mfd cell + */ + struct idc_mfd_ops mfd_ops; + + /* Below fields are initialized by mfd cell. Done before calling + * probe_finished function of mfd + */ + /* used by mfd for version checks */ + struct idc_ver_info mfd_cell_ver; + /* Function pointers to be initialized by mfd cell and called by mfd + */ + struct idc_mfd_cell_ops mfd_cell_ops; +}; + +/* Structure representing the multi function device data to be shared */ +struct __idc_mfd_data { + struct idc_mfd_data *mfd_data; +}; +#endif /* _IDC_GENERIC_H_*/ diff --git a/drivers/thirdparty/ice/ieps_peer.h b/drivers/thirdparty/ice/ieps_peer.h new file mode 100644 index 000000000000..8567f0a4390a --- /dev/null +++ b/drivers/thirdparty/ice/ieps_peer.h @@ -0,0 +1,282 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +/* Intel(R) Ethernet Connection E800 Series Linux Driver IEPS extensions */ + +#ifndef _IEPS_PEER_H_ +#define _IEPS_PEER_H_ + +#include +#include +#include + +#define IEPS_VERSION_PEER_MAJOR 1 +#define IEPS_VERSION_PEER_MINOR 1 + +struct ieps_peer_api_version { + __u8 major; + __u8 minor; +}; + +struct ieps_peer_i2c { + __u8 bus; + __u16 dev_addr; + __u16 reg_addr; + bool en_10b_addr; + __u8 *data; + __u8 data_len; +}; + +enum ieps_peer_mdio_clause { + IEPS_PEER_MDIO_CLAUSE_22, + IEPS_PEER_MDIO_CLAUSE_45, + + /* Must be last */ + NUM_IEPS_PEER_MDIO_CLAUSE +}; + +struct ieps_peer_mdio { + enum ieps_peer_mdio_clause clause; + __u8 bus; + __u8 phy_addr; + __u8 dev_type; + __u16 reg_addr; + __u16 *data; + __u8 data_len; +}; + +struct ieps_peer_gpio { + __u8 pin_num; + bool is_input; + bool pin_val; +}; + +/* INTPHY */ +enum ieps_peer_phy_type { + IEPS_PEER_PHY_TYPE_100BASE_TX = 0, + IEPS_PEER_PHY_TYPE_100M_SGMII = 1, + IEPS_PEER_PHY_TYPE_1000BASE_T = 2, + IEPS_PEER_PHY_TYPE_1000BASE_SX = 3, + IEPS_PEER_PHY_TYPE_1000BASE_LX = 4, + IEPS_PEER_PHY_TYPE_1000BASE_KX = 5, + IEPS_PEER_PHY_TYPE_1G_SGMII = 6, + IEPS_PEER_PHY_TYPE_2500BASE_T = 7, + IEPS_PEER_PHY_TYPE_2500BASE_X = 8, + IEPS_PEER_PHY_TYPE_2500BASE_KX = 9, + IEPS_PEER_PHY_TYPE_5GBASE_T = 10, + IEPS_PEER_PHY_TYPE_5GBASE_KR = 11, + IEPS_PEER_PHY_TYPE_10GBASE_T = 12, + IEPS_PEER_PHY_TYPE_10G_SFI_DA = 13, + IEPS_PEER_PHY_TYPE_10GBASE_SR = 14, + IEPS_PEER_PHY_TYPE_10GBASE_LR = 15, + IEPS_PEER_PHY_TYPE_10GBASE_KR_CR1 = 16, + IEPS_PEER_PHY_TYPE_10G_SFI_AOC_ACC = 17, + IEPS_PEER_PHY_TYPE_10G_SFI_C2C = 18, + IEPS_PEER_PHY_TYPE_25GBASE_T = 19, + IEPS_PEER_PHY_TYPE_25GBASE_CR = 20, + IEPS_PEER_PHY_TYPE_25GBASE_CR_S = 21, + IEPS_PEER_PHY_TYPE_25GBASE_CR1 = 22, + IEPS_PEER_PHY_TYPE_25GBASE_SR = 23, + IEPS_PEER_PHY_TYPE_25GBASE_LR = 24, + IEPS_PEER_PHY_TYPE_25GBASE_KR = 25, + IEPS_PEER_PHY_TYPE_25GBASE_KR_S = 26, + IEPS_PEER_PHY_TYPE_25GBASE_KR1 = 27, + IEPS_PEER_PHY_TYPE_25G_AUI_AOC_ACC = 28, + IEPS_PEER_PHY_TYPE_25G_AUI_C2C = 29, + IEPS_PEER_PHY_TYPE_40GBASE_CR4 = 30, + IEPS_PEER_PHY_TYPE_40GBASE_SR4 = 31, + IEPS_PEER_PHY_TYPE_40GBASE_LR4 = 32, + IEPS_PEER_PHY_TYPE_40GBASE_KR4 = 33, + IEPS_PEER_PHY_TYPE_40G_XLAUI_AOC_ACC = 34, + IEPS_PEER_PHY_TYPE_40G_XLAUI = 35, + IEPS_PEER_PHY_TYPE_50GBASE_CR2 = 36, + IEPS_PEER_PHY_TYPE_50GBASE_SR2 = 37, + IEPS_PEER_PHY_TYPE_50GBASE_LR2 = 38, + IEPS_PEER_PHY_TYPE_50GBASE_KR2 = 39, + IEPS_PEER_PHY_TYPE_50G_LAUI2_AOC_ACC = 40, + IEPS_PEER_PHY_TYPE_50G_LAUI2 = 41, + IEPS_PEER_PHY_TYPE_50G_AUI2_AOC_ACC = 42, + IEPS_PEER_PHY_TYPE_50G_AUI2 = 43, + IEPS_PEER_PHY_TYPE_50GBASE_CP = 44, + IEPS_PEER_PHY_TYPE_50GBASE_SR = 45, + IEPS_PEER_PHY_TYPE_50GBASE_FR = 46, + IEPS_PEER_PHY_TYPE_50GBASE_LR = 47, + IEPS_PEER_PHY_TYPE_50GBASE_KR_PAM4 = 48, + IEPS_PEER_PHY_TYPE_50G_AUI1_AOC_ACC = 49, + IEPS_PEER_PHY_TYPE_50G_AUI1 = 50, + IEPS_PEER_PHY_TYPE_100GBASE_CR4 = 51, + IEPS_PEER_PHY_TYPE_100GBASE_SR4 = 52, + IEPS_PEER_PHY_TYPE_100GBASE_LR4 = 53, + IEPS_PEER_PHY_TYPE_100GBASE_KR4 = 54, + IEPS_PEER_PHY_TYPE_100G_CAUI4_AOC_ACC = 55, + IEPS_PEER_PHY_TYPE_100G_CAUI4 = 56, + IEPS_PEER_PHY_TYPE_100G_AUI4_AOC_ACC = 57, + IEPS_PEER_PHY_TYPE_100G_AUI4 = 58, + IEPS_PEER_PHY_TYPE_100GBASE_CR_PAM4 = 59, + IEPS_PEER_PHY_TYPE_100GBASE_KR_PAM4 = 60, + IEPS_PEER_PHY_TYPE_100GBASE_CP2 = 61, + IEPS_PEER_PHY_TYPE_100GBASE_SR2 = 62, + IEPS_PEER_PHY_TYPE_100GBASE_DR = 63, + IEPS_PEER_PHY_TYPE_100GBASE_KR2_PAM4 = 64, + IEPS_PEER_PHY_TYPE_100G_CAUI2_AOC_ACC = 65, + IEPS_PEER_PHY_TYPE_100G_CAUI2 = 66, + IEPS_PEER_PHY_TYPE_100G_AUI2_AOC_ACC = 67, + IEPS_PEER_PHY_TYPE_100G_AUI2 = 68, + + NUM_IEPS_PEER_PHY_TYPE, +}; + +struct ieps_peer_phy_caps { + __u64 phy_type_low; + __u64 phy_type_high; + + bool en_tx_pause; + bool en_rx_pause; + bool low_power_mode; + bool en_link; + bool an_mode; + bool en_lesm; + bool en_auto_fec; + + __u32 an_options_bm; + __u32 fec_options_bm; + __u8 phy_fw_ver[8]; +}; + +enum ieps_peer_phy_fec_type { + IEPS_PEER_FEC_10G_40G_KR_EN = BIT(0), + IEPS_PEER_FEC_10G_40G_KR_REQ = BIT(1), + IEPS_PEER_FEC_25G_RS_528_REQ = BIT(2), + IEPS_PEER_FEC_25G_KR_REQ = BIT(3), + IEPS_PEER_FEC_25G_RS_544_REQ = BIT(4), + IEPS_PEER_FEC_25G_RS_CL91_EN = BIT(6), + IEPS_PEER_FEC_25G_KR_CL74_EN = BIT(7), + + MASK_IEPS_PEER_FEC = 0xDF, +}; + +enum ieps_peer_phy_an_clause { + IEPS_PEER_AN_D3COLD = BIT(0), + IEPS_PEER_AN_CL28 = BIT(1), + IEPS_PEER_AN_CL73 = BIT(2), + IEPS_PEER_AN_CL37 = BIT(3), + + MASK_IEPS_PEER_AN = 0xF, +}; + +enum ieps_peer_port_mode { + IEPS_PEER_PORT_MODE_DOWN, + IEPS_PEER_PORT_MODE_UP, + + NUM_IEPS_PEER_PORT_MODE, +}; + +struct ieps_peer_phy_link_status { + __u8 link_cfg_err; + bool link_up; + bool link_fault_tx; + bool link_fault_rx; + bool link_fault_remote; + bool link_up_ext_port; + bool media_available; /* Not applicable for BASE_T or BACKPLANE */ + bool los; + bool an_complete; + bool an_capable; /* Valid if AN enabled */ + bool fec_enabled; + bool in_low_power_state; + bool is_tx_pause_en; + bool is_rx_pause_en; + bool exessive_link_err; + bool tx_suspended; + bool lb_mac_on; + bool lb_phy_local_on; + bool lb_phy_remote_on; + bool lse_on; + enum ieps_peer_phy_fec_type fec_type; + enum ieps_peer_phy_type phy_type; + __le64 phy_type_low; + __le64 phy_type_high; +}; + +enum ieps_peer_port_attr { + IEPS_PEER_PA_PHY_TYPE, + IEPS_PEER_PA_PHY_AN, + IEPS_PEER_PA_PHY_FEC, + IEPS_PEER_PA_PHY_LOOPBACK_LOCAL, + IEPS_PEER_PA_PHY_LOOPBACK_REMOTE, + + NUM_IEPS_PEER_PA_PHY, +}; + +union ieps_peer_port_attr_cfg { + enum ieps_peer_phy_type phy_type; + bool an_cl37_enable; + __u32 fec_options_bm; + bool en_phy_local_lb; + bool en_phy_remote_lb; +}; + +struct ieps_peer_port_attr_data { + enum ieps_peer_port_attr attr; + union ieps_peer_port_attr_cfg cfg; +}; + +struct ieps_peer_intphy_reg_rw { + __u64 reg; + __u32 data; + bool is_write; +}; + +enum ieps_peer_cmd { + IEPS_PEER_CMD_VERSION_CHECK, + IEPS_PEER_CMD_I2C_READ, + IEPS_PEER_CMD_I2C_WRITE, + IEPS_PEER_CMD_MDIO_READ, + IEPS_PEER_CMD_MDIO_WRITE, + IEPS_PEER_CMD_GPIO_GET, + IEPS_PEER_CMD_GPIO_SET, + + /* INTPHY */ + IEPS_PEER_CMD_GET_NVM_PHY_CAPS, + IEPS_PEER_CMD_GET_LINK_STATUS, + IEPS_PEER_CMD_PORT_SET_MODE, + IEPS_PEER_CMD_PORT_GET_MODE, + IEPS_PEER_CMD_PORT_SET_ATTR, + IEPS_PEER_CMD_PORT_GET_ATTR, + + /* DFX */ + IEPS_PEER_CMD_INTPHY_REG_RW, + + IEPS_PEER_CMD_SET_LM_CONFIG, + + /* Must be last */ + NUM_IEPS_PEER_CMD +}; + +enum ieps_peer_status { + IEPS_PEER_SUCCESS, + IEPS_PEER_FW_ERROR, + IEPS_PEER_NO_MEMORY, + IEPS_PEER_INVALID_CMD, + IEPS_PEER_INVALID_ARG, + IEPS_PEER_INVALID_PEER_DEV, + IEPS_PEER_VER_INCOMPATIBLE, + + IEPS_PEER_INVALID_PORT_MODE, + IEPS_PEER_INVALID_PORT_ATTR, + IEPS_PEER_PORT_INV_PHY_TYPE, + IEPS_PEER_INVALID_AN_OPT, + IEPS_PEER_INVALID_FEC_OPT, + IEPS_PEER_MULTIPLE_PHY_TYPE, + IEPS_PEER_PHY_TYPE_NOTSUP, + IEPS_PEER_FEC_OPT_NOTSUP, +}; + +struct ieps_peer_arg { + enum ieps_peer_cmd cmd; + unsigned int port; + void *data; + enum ieps_peer_status status; +}; + +#endif /* _IEPS_PEER_H_ */ diff --git a/drivers/thirdparty/ice/iidc.h b/drivers/thirdparty/ice/iidc.h new file mode 100644 index 000000000000..60aca47a87e9 --- /dev/null +++ b/drivers/thirdparty/ice/iidc.h @@ -0,0 +1,278 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _IIDC_H_ +#define _IIDC_H_ + +#include +#include +#include +#include +#include +#ifdef USE_INTEL_AUX_BUS +#include "linux/auxiliary_bus.h" +#else +#include +#endif /* USE_INTEL_AUX_BUS */ + +/* This major and minor version represent IDC API version information. + * + * The concept of passing an API version should be incorporated into the + * auxiliary drivers' probe handlers to check if they can communicate with the + * core PCI driver. During auxiliary driver probe, auxiliary driver should + * check major and minor version information (via iidc_core_dev_info:ver). If + * the version check fails, the auxiliary driver should fail the probe and log + * an appropriate message. + */ +#define IIDC_MAJOR_VER 10 +#define IIDC_MINOR_VER 2 + +enum iidc_event_type { + IIDC_EVENT_BEFORE_MTU_CHANGE, + IIDC_EVENT_AFTER_MTU_CHANGE, + IIDC_EVENT_BEFORE_TC_CHANGE, + IIDC_EVENT_AFTER_TC_CHANGE, + IIDC_EVENT_VF_RESET, + IIDC_EVENT_LINK_CHNG, + IIDC_EVENT_CRIT_ERR, + IIDC_EVENT_FAILOVER_START, + IIDC_EVENT_FAILOVER_FINISH, + IIDC_EVENT_NBITS /* must be last */ +}; + +enum iidc_reset_type { + IIDC_PFR, + IIDC_CORER, + IIDC_GLOBR, +}; + +#define IIDC_RDMA_INVALID_PORT 0xFF + +enum iidc_rdma_protocol { + IIDC_RDMA_PROTOCOL_IWARP = BIT(0), + IIDC_RDMA_PROTOCOL_ROCEV2 = BIT(1), +}; + +enum iidc_rdma_gen { + IIDC_RDMA_GEN_RESERVED = 0, + IIDC_RDMA_GEN_1 = 1, + IIDC_RDMA_GEN_2 = 2, + IIDC_RDMA_GEN_3 = 3, +}; + +struct iidc_rdma_caps { + u8 gen; /* Hardware generation */ + u8 protocols; /* bitmap of supported protocols */ +}; +/* This information is needed to handle auxiliary driver probe */ +struct iidc_ver_info { + u16 major; + u16 minor; + u64 support; +}; + +/* Struct to hold per DCB APP info */ +struct iidc_dcb_app_info { + u8 priority; + u8 selector; + u16 prot_id; +}; + +struct iidc_core_dev_info; + +#define IIDC_MAX_USER_PRIORITY 8 +#define IIDC_MAX_APPS 64 +#define IIDC_MAX_DSCP_MAPPING 64 +#define IIDC_VLAN_PFC_MODE 0x0 +#define IIDC_DSCP_PFC_MODE 0x1 + +/* Struct to hold per RDMA Qset info */ +struct iidc_rdma_qset_params { + u32 teid; /* qset TEID */ + u16 qs_handle; /* RDMA driver provides this */ + u16 vport_id; /* VSI index */ + u8 tc; /* TC branch the QSet should belong to */ +}; + +struct iidc_qos_info { + u64 tc_ctx; + u8 rel_bw; + u8 prio_type; + u8 egress_virt_up; + u8 ingress_virt_up; +}; + +/* Struct to hold QoS info */ +struct iidc_qos_params { + struct iidc_qos_info tc_info[IEEE_8021QAZ_MAX_TCS]; + u8 up2tc[IIDC_MAX_USER_PRIORITY]; + u8 vport_relative_bw; + u8 vport_priority_type; + u32 num_apps; + u8 pfc_mode; + struct iidc_dcb_app_info apps[IIDC_MAX_APPS]; + u8 dscp_map[IIDC_MAX_DSCP_MAPPING]; + u8 num_tc; +}; + +union iidc_event_info { + /* IIDC_EVENT_AFTER_TC_CHANGE */ + struct iidc_qos_params port_qos; + /* IIDC_EVENT_LINK_CHNG */ + bool link_up; + /* IIDC_EVENT_VF_RESET */ + u32 vf_id; + /* IIDC_EVENT_CRIT_ERR */ + u32 reg; +}; + +struct iidc_event { + DECLARE_BITMAP(type, IIDC_EVENT_NBITS); + union iidc_event_info info; +}; + +/* RDMA queue vector map info */ +struct iidc_qv_info { + u32 v_idx; + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +struct iidc_qvlist_info { + u32 num_vectors; + struct iidc_qv_info qv_info[1]; +}; + +struct iidc_vf_port_info { + u16 vf_id; + u16 vport_id; + u16 port_vlan_id; + u16 port_vlan_tpid; +}; + +/* Following APIs are implemented by core PCI driver */ +struct iidc_core_ops { + /* APIs to allocate resources such as VEB, VSI, Doorbell queues, + * completion queues, Tx/Rx queues, etc... + */ + int (*alloc_res)(struct iidc_core_dev_info *cdev_info, + struct iidc_rdma_qset_params *qset); + int (*free_res)(struct iidc_core_dev_info *cdev_info, + struct iidc_rdma_qset_params *qset); + + int (*request_reset)(struct iidc_core_dev_info *cdev_info, + enum iidc_reset_type reset_type); + + int (*update_vport_filter)(struct iidc_core_dev_info *cdev_info, + u16 vport_id, bool enable); + int (*get_vf_info)(struct iidc_core_dev_info *cdev_info, u16 vf_id, + struct iidc_vf_port_info *vf_port_info); + int (*vc_send)(struct iidc_core_dev_info *cdev_info, u32 vf_id, u8 *msg, + u16 len); + int (*vc_send_sync)(struct iidc_core_dev_info *cdev_info, u8 *msg, + u16 len, u8 *recv_msg, u16 *recv_len); + int (*vc_queue_vec_map_unmap)(struct iidc_core_dev_info *cdev_info, + struct iidc_qvlist_info *qvl_info, + bool map); + int (*ieps_entry)(struct iidc_core_dev_info *obj, void *arg); +}; + +#define IIDC_RDMA_ROCE_NAME "roce" +#define IIDC_RDMA_IWARP_NAME "iwarp" +#define IIDC_RDMA_ID 0x00000010 +#define IIDC_IEPS_NAME "ieps" +#define IIDC_IEPS_ID 0x00000015 +#define IIDC_MAX_NUM_AUX 5 + +/* The const struct that instantiates cdev_info_id needs to be initialized + * in the .c with the macro ASSIGN_IIDC_INFO. + * For example: + * static const struct cdev_info_id cdev_info_ids[] = ASSIGN_IIDC_INFO; + */ +struct cdev_info_id { + char *name; + int id; +}; + +#define IIDC_RDMA_INFO { .name = IIDC_RDMA_ROCE_NAME, .id = IIDC_RDMA_ID }, +#define IIDC_IEPS_INFO { .name = IIDC_IEPS_NAME, .id = IIDC_IEPS_ID }, + +#define ASSIGN_IIDC_INFO \ +{ \ + IIDC_IEPS_INFO \ + IIDC_RDMA_INFO \ +} + +enum iidc_function_type { + IIDC_FUNCTION_TYPE_PF, + IIDC_FUNCTION_TYPE_VF, +}; + +/* Structure representing auxiliary driver tailored information about the core + * PCI dev, each auxiliary driver using the IIDC interface will have an + * instance of this struct dedicated to it. + */ +struct iidc_core_dev_info { + struct pci_dev *pdev; /* PCI device of corresponding to main function */ + struct auxiliary_device *adev; + /* KVA / Linear address corresponding to BAR0 of underlying + * pci_device. + */ + u8 __iomem *hw_addr; + int cdev_info_id; + struct iidc_ver_info ver; + + /* Opaque pointer for aux driver specific data tracking. This memory + * will be alloc'd and freed by the auxiliary driver and used for + * private data accessible only to the specific auxiliary driver. + * It is stored here so that when this struct is passed to the + * auxiliary driver via an IIDC call, the data can be accessed + * at that time. + */ + void *auxiliary_priv; + + enum iidc_function_type ftype; + u16 vport_id; + /* Current active RDMA protocol */ + enum iidc_rdma_protocol rdma_protocol; + + struct iidc_qos_params qos_info; + struct net_device *netdev; + + struct msix_entry *msix_entries; + u16 msix_count; /* How many vectors are reserved for this device */ + struct iidc_rdma_caps rdma_caps; + /* Following struct contains function pointers to be initialized + * by core PCI driver and called by auxiliary driver + */ + const struct iidc_core_ops *ops; + u8 pf_id; + u8 main_pf_port; + u8 rdma_active_port; +}; + +struct iidc_auxiliary_dev { + struct auxiliary_device adev; + struct iidc_core_dev_info *cdev_info; +}; + +/* structure representing the auxiliary driver. This struct is to be + * allocated and populated by the auxiliary driver's owner. The core PCI + * driver will access these ops by performing a container_of on the + * auxiliary_device->dev.driver. + */ +struct iidc_auxiliary_drv { + struct auxiliary_driver adrv; + /* This event_handler is meant to be a blocking call. For instance, + * when a BEFORE_MTU_CHANGE event comes in, the event_handler will not + * return until the auxiliary driver is ready for the MTU change to + * happen. + */ + void (*event_handler)(struct iidc_core_dev_info *cdev_info, + struct iidc_event *event); + int (*vc_receive)(struct iidc_core_dev_info *cdev_info, u32 vf_id, + u8 *msg, u16 len); +}; + +#endif /* _IIDC_H_*/ diff --git a/drivers/thirdparty/ice/kcompat.c b/drivers/thirdparty/ice/kcompat.c index e5f8068c875c..5bf9ca4b3918 100644 --- a/drivers/thirdparty/ice/kcompat.c +++ b/drivers/thirdparty/ice/kcompat.c @@ -3,8 +3,6 @@ #include "kcompat.h" - - /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) #ifdef HAVE_FDB_OPS @@ -970,6 +968,47 @@ int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, #endif /* !(RHEL_RELEASE >= 7.3) */ #endif /* < 4.5.0 */ +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)) +int _kc_kstrtobool(const char *s, bool *res) +{ + if (!s) + return -EINVAL; + + switch (s[0]) { + case 'y': + case 'Y': + case '1': + *res = true; + return 0; + case 'n': + case 'N': + case '0': + *res = false; + return 0; + case 'o': + case 'O': + switch (s[1]) { + case 'n': + case 'N': + *res = true; + return 0; + case 'f': + case 'F': + *res = false; + return 0; + default: + break; + } + break; + default: + break; + } + + return -EINVAL; +} +#endif /* < 4.6.0 */ + /*****************************************************************************/ #if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) || \ (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0))) || \ @@ -1004,6 +1043,10 @@ const char *_kc_phy_speed_to_str(int speed) #ifdef SPEED_100000 case SPEED_100000: return "100Gbps"; +#endif +#ifdef SPEED_200000 + case SPEED_200000: + return "200Gbps"; #endif case SPEED_UNKNOWN: return "Unknown"; @@ -1210,94 +1253,6 @@ void _kc_pcie_print_link_status(struct pci_dev *dev) { } #endif /* 4.17.0 */ -/*****************************************************************************/ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)) || (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,1))) -#ifdef HAVE_TC_SETUP_CLSFLOWER -#define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \ - const struct flow_match *__m = &(__rule)->match; \ - struct flow_dissector *__d = (__m)->dissector; \ - \ - (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \ - (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \ - -void flow_rule_match_basic(const struct flow_rule *rule, - struct flow_match_basic *out) -{ - FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out); -} - -void flow_rule_match_control(const struct flow_rule *rule, - struct flow_match_control *out) -{ - FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out); -} - -void flow_rule_match_eth_addrs(const struct flow_rule *rule, - struct flow_match_eth_addrs *out) -{ - FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out); -} - -#ifdef HAVE_TC_FLOWER_ENC -void flow_rule_match_enc_keyid(const struct flow_rule *rule, - struct flow_match_enc_keyid *out) -{ - FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out); -} - -void flow_rule_match_enc_ports(const struct flow_rule *rule, - struct flow_match_ports *out) -{ - FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out); -} - -void flow_rule_match_enc_control(const struct flow_rule *rule, - struct flow_match_control *out) -{ - FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out); -} - -void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, - struct flow_match_ipv4_addrs *out) -{ - FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out); -} - -void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, - struct flow_match_ipv6_addrs *out) -{ - FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out); -} -#endif - -#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS -void flow_rule_match_vlan(const struct flow_rule *rule, - struct flow_match_vlan *out) -{ - FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out); -} -#endif - -void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, - struct flow_match_ipv4_addrs *out) -{ - FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out); -} - -void flow_rule_match_ipv6_addrs(const struct flow_rule *rule, - struct flow_match_ipv6_addrs *out) -{ - FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out); -} - -void flow_rule_match_ports(const struct flow_rule *rule, - struct flow_match_ports *out) -{ - FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out); -} -#endif /* HAVE_TC_SETUP_CLSFLOWER */ -#endif /* 5.1.0 || (RHEL && RHEL < 8.1) */ - /*****************************************************************************/ #if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) #if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) @@ -1358,3 +1313,131 @@ u64 _kc_pci_get_dsn(struct pci_dev *dev) return dsn; } #endif /* 5.7.0 */ + +#ifdef NEED_DEVM_KASPRINTF +char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, + va_list ap) +{ + unsigned int len; + char *p; + va_list aq; + + va_copy(aq, ap); + len = vsnprintf(NULL, 0, fmt, aq); + va_end(aq); + + p = devm_kmalloc(dev, len + 1, gfp); + if (!p) + return NULL; + + vsnprintf(p, len + 1, fmt, ap); + + return p; +} + +char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) +{ + va_list ap; + char *p; + + va_start(ap, fmt); + p = devm_kvasprintf(dev, gfp, fmt, ap); + va_end(ap); + + return p; +} +#endif /* NEED_DEVM_KASPRINTF */ + +#ifdef NEED_PCI_IOV_VF_ID +/* + * Below function needs to access pci_sriov offset and stride. Since + * pci_sriov structure is defined in drivers/pci/pci.h which can not + * be included as linux kernel header file, the structure definition + * is not globally visible. + * As a result, one copy of structure definition is added. Since the + * definition is a copy, you need to make sure the kernel you want + * to backport must have exactly the same pci_sriov definition as the + * copy, otherwise you'll access wrong field offset and value. + */ + +/* Single Root I/O Virtualization */ +struct pci_sriov { + int pos; /* Capability position */ + int nres; /* Number of resources */ + u32 cap; /* SR-IOV Capabilities */ + u16 ctrl; /* SR-IOV Control */ + u16 total_VFs; /* Total VFs associated with the PF */ + u16 initial_VFs; /* Initial VFs associated with the PF */ + u16 num_VFs; /* Number of VFs available */ + u16 offset; /* First VF Routing ID offset */ + u16 stride; /* Following VF stride */ + u16 vf_device; /* VF device ID */ + u32 pgsz; /* Page size for BAR alignment */ + u8 link; /* Function Dependency Link */ + u8 max_VF_buses; /* Max buses consumed by VFs */ + u16 driver_max_VFs; /* Max num VFs driver supports */ + struct pci_dev *dev; /* Lowest numbered PF */ + struct pci_dev *self; /* This PF */ + u32 cfg_size; /* VF config space size */ + u32 class; /* VF device */ + u8 hdr_type; /* VF header type */ + u16 subsystem_vendor; /* VF subsystem vendor */ + u16 subsystem_device; /* VF subsystem device */ + resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */ + bool drivers_autoprobe; /* Auto probing of VFs by driver */ +}; + +int _kc_pci_iov_vf_id(struct pci_dev *dev) +{ + struct pci_dev *pf; + + if (!dev->is_virtfn) + return -EINVAL; + + pf = pci_physfn(dev); + return (((dev->bus->number << 8) + dev->devfn) - + ((pf->bus->number << 8) + pf->devfn + pf->sriov->offset)) / + pf->sriov->stride; +} +#endif /* NEED_PCI_IOV_VF_ID */ + +#ifdef NEED_MUL_U64_U64_DIV_U64 +u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c) +{ + u64 res = 0, div, rem; + int shift; + + /* can a * b overflow ? */ + if (ilog2(a) + ilog2(b) > 62) { + /* + * (b * a) / c is equal to + * + * (b / c) * a + + * (b % c) * a / c + * + * if nothing overflows. Can the 1st multiplication + * overflow? Yes, but we do not care: this can only + * happen if the end result can't fit in u64 anyway. + * + * So the code below does + * + * res = (b / c) * a; + * b = b % c; + */ + div = div64_u64_rem(b, c, &rem); + res = div * a; + b = rem; + + shift = ilog2(a) + ilog2(b) - 62; + if (shift > 0) { + /* drop precision */ + b >>= shift; + c >>= shift; + if (!c) + return res; + } + } + + return res + div64_u64(a * b, c); +} +#endif /* NEED_MUL_U64_U64_DIV_U64 */ diff --git a/drivers/thirdparty/ice/kcompat.h b/drivers/thirdparty/ice/kcompat.h index 6549382150a6..b4d4c74af512 100644 --- a/drivers/thirdparty/ice/kcompat.h +++ b/drivers/thirdparty/ice/kcompat.h @@ -4,6 +4,7 @@ #ifndef _KCOMPAT_H_ #define _KCOMPAT_H_ +#include "kcompat_gcc.h" #ifndef LINUX_VERSION_CODE #include #else @@ -74,11 +75,9 @@ #endif #endif - #define adapter_struct ice_pf #define adapter_q_vector ice_q_vector - /* Dynamic LTR and deeper C-State support disable/enable */ /* packet split disable/enable */ @@ -131,7 +130,6 @@ struct msix_entry { #define _Bool char #endif - #undef __always_unused #define __always_unused __attribute__((__unused__)) @@ -370,7 +368,6 @@ struct _kc_vlan_hdr { #define PCI_EXP_LNKSTA_NLW_X8 0x0080 #endif - #ifndef __GFP_COLD #define __GFP_COLD 0 #endif @@ -738,6 +735,9 @@ struct _kc_ethtool_pauseparam { #ifndef SPEED_100000 #define SPEED_100000 100000 #endif +#ifndef SPEED_200000 +#define SPEED_200000 200000 +#endif #ifndef RHEL_RELEASE_VERSION #define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b)) @@ -929,23 +929,8 @@ struct _kc_ethtool_pauseparam { #define SLE_LOCALVERSION_CODE 0 #endif /* SLE_LOCALVERSION_CODE */ -/* - * Include the definitions file for HAVE/NEED flags for the standard upstream - * kernels. - * - * Then, based on the distribution we detect, load the distribution specific - * definitions file that customizes the definitions for the target - * distribution. - */ -#include "kcompat_std_defs.h" - -#ifdef CONFIG_SUSE_KERNEL -#include "kcompat_sles_defs.h" -#elif UBUNTU_VERSION_CODE -#include "kcompat_ubuntu_defs.h" -#elif RHEL_RELEASE_CODE -#include "kcompat_rhel_defs.h" -#endif +/* Include definitions from the new kcompat layout */ +#include "kcompat_defs.h" /* * ADQ depends on __TC_MQPRIO_MODE_MAX and related kernel code @@ -965,7 +950,6 @@ struct _kc_ethtool_pauseparam { #ifdef __KLOCWORK__ - */ #ifdef ARRAY_SIZE #undef ARRAY_SIZE #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) @@ -1013,7 +997,6 @@ static inline int _kc_test_and_set_bit(int nr, volatile unsigned long *addr) #define pr_debug(format, arg...) printk(KERN_DEBUG format, ##arg) #endif /* CONFIG_DYNAMIC_DEBUG */ - #undef hlist_for_each_entry_safe #define hlist_for_each_entry_safe(pos, n, head, member) \ for (n = NULL, pos = hlist_entry_safe((head)->first, typeof(*(pos)), \ @@ -1062,7 +1045,6 @@ __x < __y ? __x : __y; \ #endif /* min_t */ #endif /* __KLOCWORK__ */ - /* Older versions of GCC will trigger -Wformat-nonliteral warnings for const * char * strings. Unfortunately, the implementation of do_trace_printk does * this, in order to add a storage attribute to the memory. This was fixed in @@ -2282,6 +2264,11 @@ static inline void page_ref_inc(struct page *page) #define HAVE_TC_SETUP_CLSFLOWER #endif +#ifndef kstrtobool +#define kstrtobool _kc_kstrtobool +int _kc_kstrtobool(const char *s, bool *res); +#endif + #else /* >= 4.6.0 */ #define HAVE_PAGE_COUNT_BULK_UPDATE #define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC @@ -2388,11 +2375,6 @@ pci_release_mem_regions(struct pci_dev *pdev) #define HAVE_ETHTOOL_NEW_1G_BITS #define HAVE_ETHTOOL_NEW_10G_BITS #endif /* RHEL7.4+ */ -#if (!(SLE_VERSION_CODE) && !(RHEL_RELEASE_CODE)) || \ - SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0)) || \ - RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)) -#define time_is_before_jiffies64(a) time_after64(get_jiffies_64(), a) -#endif /* !SLE_VERSION_CODE && !RHEL_RELEASE_CODE || (SLES <= 12.3.0) || (RHEL <= 7.5) */ #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,4)) static inline void bitmap_from_u64(unsigned long *dst, u64 mask) { @@ -2622,7 +2604,6 @@ static inline void _kc_dev_consume_skb_any(struct sk_buff *skb) } while (0) #endif /* !NL_SET_ERR_MSG_MOD */ #else /* >= 4.12 */ -#define HAVE_NAPI_BUSY_LOOP #define HAVE_MIN_NAPI_ID #endif /* 4.12 */ @@ -2837,7 +2818,6 @@ void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, struct ethtool_link_ksettings *src); #define ethtool_intersect_link_masks _kc_ethtool_intersect_link_masks #else /* >= 4.15 */ -#define HAVE_NDO_BPF #define HAVE_XDP_BUFF_DATA_META #define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO #define HAVE_TCF_BLOCK @@ -2897,10 +2877,14 @@ static inline unsigned long _kc_array_index_mask_nospec(unsigned long index, #ifndef sizeof_field #define sizeof_field(TYPE, MEMBER) (sizeof((((TYPE *)0)->MEMBER))) #endif /* sizeof_field */ +/* add a check for the Oracle UEK 4.14.35 kernel as + * it backported a version of this bitmap function + */ #if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) && \ - !(SLE_VERSION_CODE >= SLE_VERSION(12,5,0) && \ - SLE_VERSION_CODE < SLE_VERSION(15,0,0) || \ - SLE_VERSION_CODE >= SLE_VERSION(15,1,0)) + !(SLE_VERSION_CODE >= SLE_VERSION(12,5,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0) || \ + SLE_VERSION_CODE >= SLE_VERSION(15,1,0)) && \ + !(LINUX_VERSION_CODE == KERNEL_VERSION(4,14,35)) /* * Copy bitmap and clear tail bits in last word. */ @@ -2926,7 +2910,6 @@ void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits #endif /* !(RHEL >= 8.0) && !(SLES >= 12.5 && SLES < 15.0 || SLES >= 15.1) */ #else /* >= 4.16 */ #include -#define HAVE_XDP_BUFF_RXQ #define HAVE_TC_FLOWER_OFFLOAD_COMMON_EXTACK #define HAVE_TCF_MIRRED_DEV #define HAVE_VF_STATS_DROPPED @@ -2961,15 +2944,10 @@ void _kc_pcie_print_link_status(struct pci_dev *dev); #define HAVE_XDP_SOCK #define HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS #define NO_NDO_XDP_FLUSH -#define HAVE_AF_XDP_SUPPORT #endif /* 4.18.0 */ /*****************************************************************************/ #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)) -#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) && \ - (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(8,2))) -#define HAVE_DEVLINK_REGIONS -#endif /* RHEL >= 8.0 && RHEL <= 8.2 */ #define bitmap_alloc(nbits, flags) \ kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long), flags) #define bitmap_zalloc(nbits, flags) bitmap_alloc(nbits, ((flags) | __GFP_ZERO)) @@ -3005,36 +2983,11 @@ static inline void __kc_metadata_dst_free(void *md_dst) #define HAVE_NETDEV_SB_DEV #define HAVE_TCF_VLAN_TPID #define HAVE_RHASHTABLE_TYPES -#define HAVE_DEVLINK_REGIONS -#define HAVE_DEVLINK_PARAMS #endif /* 4.19.0 */ /*****************************************************************************/ #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)) #define HAVE_XDP_UMEM_PROPS -#ifdef HAVE_AF_XDP_SUPPORT -#ifndef napi_if_scheduled_mark_missed -static inline bool __kc_napi_if_scheduled_mark_missed(struct napi_struct *n) -{ - unsigned long val, new; - - do { - val = READ_ONCE(n->state); - if (val & NAPIF_STATE_DISABLE) - return true; - - if (!(val & NAPIF_STATE_SCHED)) - return false; - - new = val | NAPIF_STATE_MISSED; - } while (cmpxchg(&n->state, val, new) != val); - - return true; -} - -#define napi_if_scheduled_mark_missed __kc_napi_if_scheduled_mark_missed -#endif /* !napi_if_scheduled_mark_missed */ -#endif /* HAVE_AF_XDP_SUPPORT */ #if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0))) #define HAVE_DEVLINK_ESWITCH_OPS_EXTACK #endif /* RHEL >= 8.0 */ @@ -3046,7 +2999,6 @@ static inline bool __kc_napi_if_scheduled_mark_missed(struct napi_struct *n) #else /* >= 4.20.0 */ #define HAVE_DEVLINK_ESWITCH_OPS_EXTACK #define HAVE_AF_XDP_ZC_SUPPORT -#define HAVE_VXLAN_TYPE #define HAVE_ETF_SUPPORT /* Earliest TxTime First */ #endif /* 4.20.0 */ @@ -3110,7 +3062,6 @@ ptp_read_system_postts(struct ptp_system_timestamp __always_unused *sts) #if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)) #define HAVE_TC_INDIR_BLOCK #endif /* RHEL 8.2 */ -#define INDIRECT_CALLABLE_DECLARE(x) x #else /* >= 5.0.0 */ #define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL #define HAVE_PTP_CLOCK_INFO_GETTIMEX64 @@ -3118,7 +3069,6 @@ ptp_read_system_postts(struct ptp_system_timestamp __always_unused *sts) #define HAVE_DMA_ALLOC_COHERENT_ZEROES_MEM #define HAVE_GENEVE_TYPE #define HAVE_TC_INDIR_BLOCK -#define HAVE_INDIRECT_CALL_WRAPPER_HEADER #endif /* 5.0.0 */ /*****************************************************************************/ @@ -3128,114 +3078,14 @@ ptp_read_system_postts(struct ptp_system_timestamp __always_unused *sts) #define HAVE_NDO_FDB_ADD_EXTACK #define HAVE_DEVLINK_INFO_GET #define HAVE_DEVLINK_FLASH_UPDATE -#else /* RHEL < 8.1 */ -#ifdef HAVE_TC_SETUP_CLSFLOWER -#include - -struct flow_match { - struct flow_dissector *dissector; - void *mask; - void *key; -}; - -struct flow_match_basic { - struct flow_dissector_key_basic *key, *mask; -}; - -struct flow_match_control { - struct flow_dissector_key_control *key, *mask; -}; - -struct flow_match_eth_addrs { - struct flow_dissector_key_eth_addrs *key, *mask; -}; - -#ifdef HAVE_TC_FLOWER_ENC -struct flow_match_enc_keyid { - struct flow_dissector_key_keyid *key, *mask; -}; -#endif - -#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS -struct flow_match_vlan { - struct flow_dissector_key_vlan *key, *mask; -}; -#endif - -struct flow_match_ipv4_addrs { - struct flow_dissector_key_ipv4_addrs *key, *mask; -}; - -struct flow_match_ipv6_addrs { - struct flow_dissector_key_ipv6_addrs *key, *mask; -}; - -struct flow_match_ports { - struct flow_dissector_key_ports *key, *mask; -}; - -struct flow_rule { - struct flow_match match; -}; - -void flow_rule_match_basic(const struct flow_rule *rule, - struct flow_match_basic *out); -void flow_rule_match_control(const struct flow_rule *rule, - struct flow_match_control *out); -void flow_rule_match_eth_addrs(const struct flow_rule *rule, - struct flow_match_eth_addrs *out); -#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS -void flow_rule_match_vlan(const struct flow_rule *rule, - struct flow_match_vlan *out); -#endif -void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, - struct flow_match_ipv4_addrs *out); -void flow_rule_match_ipv6_addrs(const struct flow_rule *rule, - struct flow_match_ipv6_addrs *out); -void flow_rule_match_ports(const struct flow_rule *rule, - struct flow_match_ports *out); -#ifdef HAVE_TC_FLOWER_ENC -void flow_rule_match_enc_ports(const struct flow_rule *rule, - struct flow_match_ports *out); -void flow_rule_match_enc_control(const struct flow_rule *rule, - struct flow_match_control *out); -void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, - struct flow_match_ipv4_addrs *out); -void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, - struct flow_match_ipv6_addrs *out); -void flow_rule_match_enc_keyid(const struct flow_rule *rule, - struct flow_match_enc_keyid *out); -#endif - -static inline struct flow_rule * -tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd) -{ - return (struct flow_rule *)&tc_flow_cmd->dissector; -} - -static inline bool flow_rule_match_key(const struct flow_rule *rule, - enum flow_dissector_key_id key) -{ - return dissector_uses_key(rule->match.dissector, key); -} -#endif /* HAVE_TC_SETUP_CLSFLOWER */ - #endif /* RHEL < 8.1 */ - -#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) -#define devlink_params_publish(devlink) do { } while (0) -#define devlink_params_unpublish(devlink) do { } while (0) -#endif - #else /* >= 5.1.0 */ #define HAVE_NDO_FDB_ADD_EXTACK #define NO_XDP_QUERY_XSK_UMEM #define HAVE_AF_XDP_NETDEV_UMEM #define HAVE_TC_FLOW_RULE_INFRASTRUCTURE -#define HAVE_TC_FLOWER_ENC_IP #define HAVE_DEVLINK_INFO_GET #define HAVE_DEVLINK_FLASH_UPDATE -#define HAVE_DEVLINK_PORT_PARAMS #endif /* 5.1.0 */ /*****************************************************************************/ @@ -3371,10 +3221,10 @@ static inline void _kc_bitmap_set_value8(unsigned long *map, /*****************************************************************************/ #if (LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)) -#ifdef HAVE_AF_XDP_SUPPORT +#ifdef HAVE_AF_XDP_ZC_SUPPORT #define xsk_umem_release_addr xsk_umem_discard_addr #define xsk_umem_release_addr_rq xsk_umem_discard_addr_rq -#endif /* HAVE_AF_XDP_SUPPORT */ +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ #if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3)) || \ (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15,3,0))) #define HAVE_TX_TIMEOUT_TXQUEUE @@ -3387,22 +3237,22 @@ static inline void _kc_bitmap_set_value8(unsigned long *map, #if (LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0)) u64 _kc_pci_get_dsn(struct pci_dev *dev); #define pci_get_dsn(dev) _kc_pci_get_dsn(dev) -#if !(SLE_VERSION_CODE > SLE_VERSION(15,2,0)) && \ +/* add a check for the Oracle UEK 5.4.17 kernel which + * backported the rename of the aer functions + */ +#if defined(NEED_ORCL_LIN_PCI_AER_CLEAR_NONFATAL_STATUS) || \ +!(SLE_VERSION_CODE > SLE_VERSION(15, 2, 0)) && \ !((LINUX_VERSION_CODE == KERNEL_VERSION(5,3,18)) && \ - (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(14,0,0))) && \ +(SLE_LOCALVERSION_CODE >= KERNEL_VERSION(14, 0, 0))) && \ + !(LINUX_VERSION_CODE == KERNEL_VERSION(5,4,17)) && \ !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3))) #define pci_aer_clear_nonfatal_status pci_cleanup_aer_uncorrect_error_status #endif -#define cpu_latency_qos_update_request pm_qos_update_request -#define cpu_latency_qos_add_request(arg1, arg2) pm_qos_add_request(arg1, PM_QOS_CPU_DMA_LATENCY, arg2) -#define cpu_latency_qos_remove_request pm_qos_remove_request - #ifndef DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID #define DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID "fw.bundle_id" #endif #else /* >= 5.7.0 */ -#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT #define HAVE_ETHTOOL_COALESCE_PARAMS_SUPPORT #endif /* 5.7.0 */ @@ -3410,25 +3260,14 @@ u64 _kc_pci_get_dsn(struct pci_dev *dev); #if (LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)) #if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4))) && \ !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15,3,0)) +/* (RHEL < 8.4) || (SLE < 15.3) */ #define xdp_convert_buff_to_frame convert_to_xdp_frame -#endif /* (RHEL < 8.4) || (SLE < 15.3) */ +#elif (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4))) +/* RHEL >= 8.4 */ +#define HAVE_XDP_BUFF_FRAME_SZ +#endif #define flex_array_size(p, member, count) \ array_size(count, sizeof(*(p)->member) + __must_be_array((p)->member)) -#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15,3,0))) -#ifdef HAVE_AF_XDP_ZC_SUPPORT -#ifndef xsk_umem_get_rx_frame_size -static inline u32 _xsk_umem_get_rx_frame_size(struct xdp_umem *umem) -{ - return umem->chunk_size_nohr - XDP_PACKET_HEADROOM; -} - -#define xsk_umem_get_rx_frame_size _xsk_umem_get_rx_frame_size -#endif /* xsk_umem_get_rx_frame_size */ -#endif /* HAVE_AF_XDP_ZC_SUPPORT */ -#else /* SLE >= 15.3 */ -#define HAVE_XDP_BUFF_FRAME_SZ -#define HAVE_MEM_TYPE_XSK_BUFF_POOL -#endif /* SLE >= 15.3 */ #else /* >= 5.8.0 */ #define HAVE_TC_FLOW_INDIR_DEV #define HAVE_TC_FLOW_INDIR_BLOCK_CLEANUP @@ -3461,62 +3300,12 @@ static inline u32 _xsk_umem_get_rx_frame_size(struct xdp_umem *umem) #endif /* SLE_VERSION_CODE && SLE_VERSION_CODE >= SLES15SP3 */ /*****************************************************************************/ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)) -#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15,3,0)) -#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS -#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS -#else /* SLE >= 15.3 */ -struct devlink_flash_update_params { - const char *file_name; - const char *component; - u32 overwrite_mask; -}; - -#ifndef DEVLINK_FLASH_OVERWRITE_SETTINGS -#define DEVLINK_FLASH_OVERWRITE_SETTINGS BIT(0) -#endif - -#ifndef DEVLINK_FLASH_OVERWRITE_IDENTIFIERS -#define DEVLINK_FLASH_OVERWRITE_IDENTIFIERS BIT(1) -#endif -#endif /* !(SLE >= 15.3) */ - -#if (!(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,3,0)))) -#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM -#define xsk_get_pool_from_qid xdp_get_umem_from_qid -#define xsk_pool_get_rx_frame_size xsk_umem_get_rx_frame_size -#define xsk_pool_set_rxq_info xsk_buff_set_rxq_info -#define xsk_pool_dma_unmap xsk_buff_dma_unmap -#define xsk_pool_dma_map xsk_buff_dma_map -#define xsk_tx_peek_desc xsk_umem_consume_tx -#define xsk_tx_release xsk_umem_consume_tx_done -#define xsk_tx_completed xsk_umem_complete_tx -#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup -#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL -#include -static inline void -_kc_xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, - void __always_unused *pool) -{ - xsk_buff_dma_sync_for_cpu(xdp); -} - -#define xsk_buff_dma_sync_for_cpu(xdp, pool) \ - _kc_xsk_buff_dma_sync_for_cpu(xdp, pool) -#endif /* HAVE_MEM_TYPE_XSK_BUFF_POOL */ -#else /* SLE >= 15.3 */ -#define HAVE_NETDEV_BPF_XSK_POOL -#endif /* SLE >= 15.3 */ -#else /* >= 5.10.0 */ -#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS -#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS -#define HAVE_NETDEV_BPF_XSK_POOL -#endif /* 5.10.0 */ - -/*****************************************************************************/ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)) -#ifdef HAVE_XDP_BUFF_RXQ +#ifdef HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#ifdef HAVE_XDP_BUFF_IN_XDP_H #include +#else +#include +#endif /* HAVE_XDP_BUFF_IN_XDP_H */ static inline int _kc_xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, struct net_device *dev, u32 queue_index, unsigned int __always_unused napi_id) @@ -3526,7 +3315,8 @@ _kc_xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, struct net_device *dev, #define xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id) \ _kc_xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id) -#endif /* HAVE_XDP_BUFF_RXQ */ +#endif /* HAVE_XDP_RXQ_INFO_REG_3_PARAMS */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)) #ifdef HAVE_NAPI_BUSY_LOOP #ifdef CONFIG_NET_RX_BUSY_POLL #include @@ -3543,10 +3333,14 @@ _kc_napi_busy_loop(unsigned int napi_id, _kc_napi_busy_loop(napi_id, loop_end, loop_end_arg, prefer_busy_poll, budget) #endif /* CONFIG_NET_RX_BUSY_POLL */ #endif /* HAVE_NAPI_BUSY_LOOP */ -#define HAVE_DEVLINK_FLASH_UPDATE_BEGIN_END_NOTIFY -#else /* >= 5.11.0 */ -#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW -#endif /* 5.11.0 */ +#endif /* <5.11.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,12,0)) +#define HAVE_GRO_HEADER +#endif /* >=5.12.0 */ + +/*****************************************************************************/ /* * Load the implementations file which actually defines kcompat backports. diff --git a/drivers/thirdparty/ice/kcompat_defs.h b/drivers/thirdparty/ice/kcompat_defs.h new file mode 100644 index 000000000000..45adcb45b15b --- /dev/null +++ b/drivers/thirdparty/ice/kcompat_defs.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _KCOMPAT_DEFS_H_ +#define _KCOMPAT_DEFS_H_ + +#ifndef LINUX_VERSION_CODE +#include +#else +#ifndef KERNEL_VERSION +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) +#endif +#endif /* LINUX_VERSION_CODE */ + +#ifndef UTS_RELEASE +#include +#endif + +/* + * Include the definitions file for HAVE/NEED flags for the standard upstream + * kernels. + * + * Then, based on the distribution we detect, load the distribution specific + * definitions file that customizes the definitions for the target + * distribution. + */ +#include "kcompat_std_defs.h" + +#ifdef CONFIG_SUSE_KERNEL +#include "kcompat_sles_defs.h" +#elif UBUNTU_VERSION_CODE +#include "kcompat_ubuntu_defs.h" +#elif RHEL_RELEASE_CODE +#include "kcompat_rhel_defs.h" +#elif defined(UEK_RELEASE_NUMBER) +#include "kcompat_oracle_defs.h" +#endif + +#endif /* _KCOMPAT_DEFS_H_ */ diff --git a/drivers/thirdparty/ice/kcompat_gcc.h b/drivers/thirdparty/ice/kcompat_gcc.h new file mode 100644 index 000000000000..0112c19902b6 --- /dev/null +++ b/drivers/thirdparty/ice/kcompat_gcc.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _KCOMPAT_GCC_H_ +#define _KCOMPAT_GCC_H_ + +#ifdef __has_attribute +#if __has_attribute(__fallthrough__) +# define fallthrough __attribute__((__fallthrough__)) +#else +# define fallthrough do {} while (0) /* fallthrough */ +#endif /* __has_attribute(fallthrough) */ +#else +# define fallthrough do {} while (0) /* fallthrough */ +#endif /* __has_attribute */ + +#endif /* _KCOMPAT_GCC_H_ */ diff --git a/drivers/thirdparty/ice/kcompat_impl.h b/drivers/thirdparty/ice/kcompat_impl.h index 266415ca7a96..8152460164aa 100644 --- a/drivers/thirdparty/ice/kcompat_impl.h +++ b/drivers/thirdparty/ice/kcompat_impl.h @@ -17,6 +17,75 @@ * generic network stack functions */ +/* NEED_NETDEV_TXQ_BQL_PREFETCH + * + * functions + * netdev_txq_bql_complete_prefetchw() + * netdev_txq_bql_enqueue_prefetchw() + * + * were added in kernel 4.20 upstream commit + * 535114539bb2 ("net: add netdev_txq_bql_{enqueue, complete}_prefetchw() + * helpers") + */ +#ifdef NEED_NETDEV_TXQ_BQL_PREFETCH +/** + * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write + * @dev_queue: pointer to transmit queue + * + * BQL enabled drivers might use this helper in their ndo_start_xmit(), + * to give appropriate hint to the CPU. + */ +static inline +void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) +{ +#ifdef CONFIG_BQL + prefetchw(&dev_queue->dql.num_queued); +#endif +} + +/** + * netdev_txq_bql_complete_prefetchw - prefetch bql data for write + * @dev_queue: pointer to transmit queue + * + * BQL enabled drivers might use this helper in their TX completion path, + * to give appropriate hint to the CPU. + */ +static inline +void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) +{ +#ifdef CONFIG_BQL + prefetchw(&dev_queue->dql.limit); +#endif +} +#endif /* NEED_NETDEV_TXQ_BQL_PREFETCH */ + +/* NEED_NETDEV_TX_SENT_QUEUE + * + * __netdev_tx_sent_queue was added in kernel 4.20 upstream commit + * 3e59020abf0f ("net: bql: add __netdev_tx_sent_queue()") + */ +#ifdef NEED_NETDEV_TX_SENT_QUEUE +/* Variant of netdev_tx_sent_queue() for drivers that are aware + * that they should not test BQL status themselves. + * We do want to change __QUEUE_STATE_STACK_XOFF only for the last + * skb of a batch. + * Returns true if the doorbell must be used to kick the NIC. + */ +static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, + unsigned int bytes, + bool xmit_more) +{ + if (xmit_more) { +#ifdef CONFIG_BQL + dql_queued(&dev_queue->dql, bytes); +#endif + return netif_tx_queue_stopped(dev_queue); + } + netdev_tx_sent_queue(dev_queue, bytes); + return true; +} +#endif /* NEED_NETDEV_TX_SENT_QUEUE */ + /* NEED_NET_PREFETCH * * net_prefetch was introduced by commit f468f21b7af0 ("net: Take common @@ -212,6 +281,22 @@ devlink_flash_update_status_notify(struct devlink __always_unused *devlink, } #endif /* NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY */ +#ifndef HAVE_DEVLINK_FLASH_UPDATE_PARAMS +struct devlink_flash_update_params { + const char *file_name; + const char *component; + u32 overwrite_mask; +}; + +#ifndef DEVLINK_FLASH_OVERWRITE_SETTINGS +#define DEVLINK_FLASH_OVERWRITE_SETTINGS BIT(0) +#endif + +#ifndef DEVLINK_FLASH_OVERWRITE_IDENTIFIERS +#define DEVLINK_FLASH_OVERWRITE_IDENTIFIERS BIT(1) +#endif +#endif /* !HAVE_DEVLINK_FLASH_UPDATE_PARAMS */ + /* NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY * * devlink_flash_update_timeout_notify was added by upstream commit @@ -323,6 +408,36 @@ _kc_devlink_port_attrs_set(struct devlink_port *devlink_port, #endif /* NEED_DEVLINK_PORT_ATTRS_SET_STRUCT */ +/* + * NEED_DEVLINK_ALLOC_SETS_DEV + * + * Since commit 919d13a7e455 ("devlink: Set device as early as possible"), the + * devlink device pointer is set by devlink_alloc instead of by + * devlink_register. + * + * devlink_alloc now includes the device pointer in its signature, while + * devlink_register no longer includes it. + * + * This implementation provides a replacement for devlink_alloc which will + * take and then silently discard the extra dev pointer. + * + * To use devlink_register, drivers must check + * HAVE_DEVLINK_REGISTER_SETS_DEV. Note that we can't easily provide + * a backport of the change to devlink_register directly. Although the dev + * pointer is accessible from the devlink pointer through the driver private + * section, it is device driver specific and is not easily accessible in + * compat code. + */ +#ifdef NEED_DEVLINK_ALLOC_SETS_DEV +static inline struct devlink * +_kc_devlink_alloc(const struct devlink_ops *ops, size_t priv_size, + struct device * __always_unused dev) +{ + return devlink_alloc(ops, priv_size); +} +#define devlink_alloc _kc_devlink_alloc +#endif /* NEED_DEVLINK_ALLOC_SETS_DEV */ + #endif /* CONFIG_NET_DEVLINK */ #ifdef NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE @@ -495,4 +610,762 @@ static inline struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns) #endif /* NEED_CONVERT_ART_NS_TO_TSC */ #endif /* HAVE_PTP_CROSSTIMESTAMP */ +/* + * PTP functions and definitions + */ +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) +#include +#include + +/* PTP_* ioctl flags + * + * PTP_PEROUT_ONE_SHOT and PTP_PEROUT_DUTY_CYCLE were added by commit + * f65b71aa25a6 ("ptp: add ability to configure duty cycle for periodic + * output") + * + * PTP_PEROUT_PHASE was added in commit b6bd41363a1c ("ptp: introduce + * a phase offset in the periodic output request") + * + * PTP_STRICT_FLAGS was added in commit 6138e687c7b6 ("ptp: Introduce strict + * checking of external time stamp options.") + * + * These flags control behavior for the periodic output PTP ioctl. For older + * kernels, we define the flags as 0. This allows bitmask checks on flags to + * work as expected, since these feature flags will become no-ops on kernels + * that lack support. + * + * Drivers can check if the relevant feature is actually supported by using an + * '#if' on the flag instead of an '#ifdef' + */ +#ifndef PTP_PEROUT_PHASE +#define PTP_PEROUT_PHASE 0 +#endif + +#ifndef PTP_PEROUT_DUTY_CYCLE +#define PTP_PEROUT_DUTY_CYCLE 0 +#endif + +#ifndef PTP_STRICT_FLAGS +#define PTP_STRICT_FLAGS 0 +#endif + +#ifndef PTP_PEROUT_PHASE +/* PTP_PEROUT_PHASE + * + * The PTP_PEROUT_PHASE flag was added in commit b6bd41363a1c ("ptp: introduce + * a phase offset in the periodic output request") as a way for userspace to + * request a phase-offset periodic output that starts on some arbitrary + * multiple of the clock period. + * + * For older kernels, define this flag to 0 so that checks for if it is + * enabled will always fail. Drivers should use '#if PTP_PEROUT_PHASE' to + * determine if the kernel has phase support, and use the flag as normal for + * checking supported flags or if the flag is enabled for a given request. + */ +#define PTP_PEROUT_PHASE 0 +#endif + +#endif /* CONFIG_PTP_1588_CLOCK */ + +#ifdef NEED_BUS_FIND_DEVICE_CONST_DATA +/* NEED_BUS_FIND_DEVICE_CONST_DATA + * + * bus_find_device() was updated in upstream commit 418e3ea157ef + * ("bus_find_device: Unify the match callback with class_find_device") + * to take a const void *data parameter and also have the match() function + * passed in take a const void *data parameter. + * + * all of the kcompat below makes it so the caller can always just call + * bus_find_device() according to the upstream kernel without having to worry + * about const vs. non-const arguments. + */ +struct _kc_bus_find_device_custom_data { + const void *real_data; + int (*real_match)(struct device *dev, const void *data); +}; + +static inline int _kc_bus_find_device_wrapped_match(struct device *dev, void *data) +{ + struct _kc_bus_find_device_custom_data *custom_data = data; + + return custom_data->real_match(dev, custom_data->real_data); +} + +static inline struct device * +_kc_bus_find_device(struct bus_type *type, struct device *start, + const void *data, + int (*match)(struct device *dev, const void *data)) +{ + struct _kc_bus_find_device_custom_data custom_data = {}; + + custom_data.real_data = data; + custom_data.real_match = match; + + return bus_find_device(type, start, &custom_data, + _kc_bus_find_device_wrapped_match); +} + +/* force callers of bus_find_device() to call _kc_bus_find_device() on kernels + * where NEED_BUS_FIND_DEVICE_CONST_DATA is defined + */ +#define bus_find_device(type, start, data, match) \ + _kc_bus_find_device(type, start, data, match) +#endif /* NEED_BUS_FIND_DEVICE_CONST_DATA */ + +#ifdef NEED_DEV_PM_DOMAIN_ATTACH_DETACH +#include +/* NEED_DEV_PM_DOMAIN_ATTACH_DETACH + * + * dev_pm_domain_attach() and dev_pm_domain_detach() were added in upstream + * commit 46420dd73b80 ("PM / Domains: Add APIs to attach/detach a PM domain for + * a device"). To support older kernels and OSVs that don't have these API, just + * implement how older versions worked by directly calling acpi_dev_pm_attach() + * and acpi_dev_pm_detach(). + */ +static inline int dev_pm_domain_attach(struct device *dev, bool power_on) +{ + if (dev->pm_domain) + return 0; + + if (ACPI_HANDLE(dev)) + return acpi_dev_pm_attach(dev, true); + + return 0; +} + +static inline void dev_pm_domain_detach(struct device *dev, bool power_off) +{ + if (ACPI_HANDLE(dev)) + acpi_dev_pm_detach(dev, true); +} +#endif /* NEED_DEV_PM_DOMAIN_ATTACH_DETACH */ + +#ifdef NEED_CPU_LATENCY_QOS_RENAME +/* NEED_CPU_LATENCY_QOS_RENAME + * + * The PM_QOS_CPU_DMA_LATENCY definition was removed in 67b06ba01857 ("PM: + * QoS: Drop PM_QOS_CPU_DMA_LATENCY and rename related functions"). The + * related functions were renamed to use "cpu_latency_qos_" prefix. + * + * Use wrapper functions to map the new API onto the API available in older + * kernels. + */ +#include +static inline void +cpu_latency_qos_add_request(struct pm_qos_request *req, s32 value) +{ + pm_qos_add_request(req, PM_QOS_CPU_DMA_LATENCY, value); +} + +static inline void +cpu_latency_qos_update_request(struct pm_qos_request *req, s32 new_value) +{ + pm_qos_update_request(req, new_value); +} + +static inline void +cpu_latency_qos_remove_request(struct pm_qos_request *req) +{ + pm_qos_remove_request(req); +} +#endif /* NEED_CPU_LATENCY_QOS_RENAME */ + +#ifdef NEED_DECLARE_STATIC_KEY_FALSE +/* NEED_DECLARE_STATIC_KEY_FALSE + * + * DECLARE_STATIC_KEY_FALSE was added by upstream commit + * 525e0ac4d2b2 ("locking/static_keys: Provide DECLARE and + * well as DEFINE macros") + * + * The definition is now necessary to handle + * the xdpdrv work with more than 64 cpus + */ +#define DECLARE_STATIC_KEY_FALSE(name) \ + extern struct static_key_false name +#endif /* NEED_DECLARE_STATIC_KEY_FALSE */ + +#ifdef NEED_DEFINE_STATIC_KEY_FALSE +/* NEED_DEFINE_STATIC_KEY_FALSE + * + * DEFINE_STATIC_KEY_FALSE was added by upstream commit + * 11276d5306b8 ("locking/static_keys: Add a new + * static_key interface") + * + * The definition is now necessary to handle + * the xdpdrv work with more than 64 cpus + */ +#define DECLARE_STATIC_KEY_FALSE(name) extern struct static_key name + +#define DEFINE_STATIC_KEY_FALSE(name) \ + struct static_key name = STATIC_KEY_INIT_FALSE +#endif /* NEED_DEFINE_STATIC_KEY_FALSE */ + +#ifdef NEED_STATIC_BRANCH +/* NEED_STATIC_BRANCH + * + * static_branch_likely, static_branch_unlikely, + * static_branch_inc, static_branch_dec was added by upstream commit + * 11276d5306b8 ("locking/static_keys: Add a new + * static_key interface") + * + * The definition is now necessary to handle + * the xdpdrv work with more than 64 cpus + */ +#define static_branch_likely(x) likely(static_key_enabled(x)) +#define static_branch_unlikely(x) unlikely(static_key_enabled(x)) + +#define static_branch_inc(x) static_key_slow_inc(x) +#define static_branch_dec(x) static_key_slow_dec(x) + +#endif /* NEED_STATIC_BRANCH */ + +#ifdef NEED_NETDEV_XDP_STRUCT +#define netdev_bpf netdev_xdp +#endif /* NEED_NETDEV_XDP_STRUCT */ + +#ifdef NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#ifdef HAVE_XDP_SUPPORT +#include +static inline void +_kc_bpf_warn_invalid_xdp_action(__maybe_unused struct net_device *dev, + __maybe_unused struct bpf_prog *prog, u32 act) +{ + bpf_warn_invalid_xdp_action(act); +} + +#define bpf_warn_invalid_xdp_action(dev, prog, act) \ + _kc_bpf_warn_invalid_xdp_action(dev, prog, act) +#endif /* HAVE_XDP_SUPPORT */ +#endif /* HAVE_NETDEV_PROG_XDP_WARN_ACTION */ + +/* NEED_ETH_HW_ADDR_SET + * + * eth_hw_addr_set was added by upstream commit + * 48eab831ae8b ("net: create netdev->dev_addr assignment helpers") + * + * Using eth_hw_addr_set became required in 5.17, when the dev_addr field in + * the netdev struct was constified. See 48eab831ae8b ("net: create + * netdev->dev_addr assignment helpers") + */ +#ifdef NEED_ETH_HW_ADDR_SET +static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr) +{ + ether_addr_copy(dev->dev_addr, addr); +} +#endif /* NEED_ETH_HW_ADDR_SET */ + +#ifdef NEED_JIFFIES_64_TIME_IS_MACROS +/* NEED_JIFFIES_64_TIME_IS_MACROS + * + * The jiffies64 time_is_* macros were introduced upstream by 3740dcdf8a77 + * ("jiffies: add time comparison functions for 64 bit jiffies") in Linux 4.9. + * + * Support for 64-bit jiffies has been available since the initial import of + * Linux into git in 2005, so its safe to just implement the macros as-is + * here. + */ +#define time_is_before_jiffies64(a) time_after64(get_jiffies_64(), a) +#define time_is_after_jiffies64(a) time_before64(get_jiffies_64(), a) +#define time_is_before_eq_jiffies64(a) time_after_eq64(get_jiffies_64(), a) +#define time_is_after_eq_jiffies64(a) time_before_eq64(get_jiffies_64(), a) +#endif /* NEED_JIFFIES_64_TIME_IS_MACROS */ + +#ifdef NEED_INDIRECT_CALL_WRAPPER_MACROS +/* NEED_INDIRECT_CALL_WRAPPER_MACROS + * + * The INDIRECT_CALL_* macros were introduced upstream as upstream commit + * 283c16a2dfd3 ("indirect call wrappers: helpers to speed-up indirect calls + * of builtin") which landed in Linux 5.0 + * + * These are easy to implement directly. + */ +#ifdef CONFIG_RETPOLINE +#define INDIRECT_CALL_1(f, f1, ...) \ + ({ \ + likely(f == f1) ? f1(__VA_ARGS__) : f(__VA_ARGS__); \ + }) +#define INDIRECT_CALL_2(f, f2, f1, ...) \ + ({ \ + likely(f == f2) ? f2(__VA_ARGS__) : \ + INDIRECT_CALL_1(f, f1, __VA_ARGS__); \ + }) + +#define INDIRECT_CALLABLE_DECLARE(f) f +#define INDIRECT_CALLABLE_SCOPE +#else /* !CONFIG_RETPOLINE */ +#define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__) +#define INDIRECT_CALL_2(f, f2, f1, ...) f(__VA_ARGS__) +#define INDIRECT_CALLABLE_DECLARE(f) +#define INDIRECT_CALLABLE_SCOPE static +#endif /* CONFIG_RETPOLINE */ +#endif /* NEED_INDIRECT_CALL_WRAPPER_MACROS */ + +#ifdef NEED_INDIRECT_CALL_3_AND_4 +/* NEED_INDIRECT_CALL_3_AND_4 + * Support for the 3 and 4 call variants was added in upstream commit + * e678e9ddea96 ("indirect_call_wrapper: extend indirect wrapper to support up + * to 4 calls") + * + * These are easy to implement directly. + */ + +#ifdef CONFIG_RETPOLINE +#define INDIRECT_CALL_3(f, f3, f2, f1, ...) \ + ({ \ + likely(f == f3) ? f3(__VA_ARGS__) : \ + INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__); \ + }) +#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) \ + ({ \ + likely(f == f4) ? f4(__VA_ARGS__) : \ + INDIRECT_CALL_3(f, f3, f2, f1, __VA_ARGS__); \ + }) +#else /* !CONFIG_RETPOLINE */ +#define INDIRECT_CALL_3(f, f3, f2, f1, ...) f(__VA_ARGS__) +#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) f(__VA_ARGS__) +#endif /* CONFIG_RETPOLINE */ +#endif /* NEED_INDIRECT_CALL_3_AND_4 */ + +#ifdef NEED_EXPORT_INDIRECT_CALLABLE +/* NEED_EXPORT_INDIRECT_CALLABLE + * + * Support for EXPORT_INDIRECT_CALLABLE was added in upstream commit + * 0053859496ba ("net: add EXPORT_INDIRECT_CALLABLE wrapper") + * + * These are easy to implement directly. + */ +#ifdef CONFIG_RETPOLINE +#define EXPORT_INDIRECT_CALLABLE(f) EXPORT_SYMBOL(f) +#else +#define EXPORT_INDIRECT_CALLABLE(f) +#endif /* CONFIG_RETPOLINE */ +#endif /* NEED_EXPORT_INDIRECT_CALLABLE */ + +#ifdef NEED_DEVM_KASPRINTF +/* NEED_DEVM_KASPRINTF + * + * devm_kvasprintf and devm_kasprintf were added by commit + * 75f2a4ead5d5 ("devres: Add devm_kasprintf and devm_kvasprintf API") + * in Linux 3.17. + */ +__printf(3, 0) char *devm_kvasprintf(struct device *dev, gfp_t gfp, + const char *fmt, va_list ap); +__printf(3, 4) char *devm_kasprintf(struct device *dev, gfp_t gfp, + const char *fmt, ...); +#endif /* NEED_DEVM_KASPRINTF */ + +#ifdef NEED_XSK_UMEM_GET_RX_FRAME_SIZE +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#ifndef xsk_umem_get_rx_frame_size +static inline u32 _xsk_umem_get_rx_frame_size(struct xdp_umem *umem) +{ + return umem->chunk_size_nohr - XDP_PACKET_HEADROOM; +} + +#define xsk_umem_get_rx_frame_size _xsk_umem_get_rx_frame_size +#endif /* xsk_umem_get_rx_frame_size */ +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ +#endif + +#ifdef NEED_XSK_BUFF_DMA_SYNC_FOR_CPU +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL +#include +static inline void +_kc_xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, + void __always_unused *pool) +{ + xsk_buff_dma_sync_for_cpu(xdp); +} + +#define xsk_buff_dma_sync_for_cpu(xdp, pool) \ + _kc_xsk_buff_dma_sync_for_cpu(xdp, pool) +#endif /* HAVE_MEM_TYPE_XSK_BUFF_POOL */ +#endif /* NEED_XSK_BUFF_DMA_SYNC_FOR_CPU */ + +#ifdef NEED_XSK_BUFF_POOL_RENAME +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xsk_get_pool_from_qid xdp_get_umem_from_qid +#define xsk_pool_get_rx_frame_size xsk_umem_get_rx_frame_size +#define xsk_pool_set_rxq_info xsk_buff_set_rxq_info +#define xsk_pool_dma_unmap xsk_buff_dma_unmap +#define xsk_pool_dma_map xsk_buff_dma_map +#define xsk_tx_peek_desc xsk_umem_consume_tx +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup +#endif /* NEED_XSK_BUFF_POOL_RENAME */ + +#ifdef NEED_PCI_IOV_VF_ID +/* NEED_PCI_IOV_VF_ID + * + * pci_iov_vf_id were added by commit 21ca9fb62d468 ("PCI/IOV: + * Add pci_iov_vf_id() to get VF index") in Linux 5.18 + */ +int _kc_pci_iov_vf_id(struct pci_dev *dev); +#define pci_iov_vf_id _kc_pci_iov_vf_id +#endif /* NEED_PCI_IOV_VF_ID */ + +/* NEED_MUL_U64_U64_DIV_U64 + * + * mul_u64_u64_div_u64 was introduced in Linux 5.9 as part of commit + * 3dc167ba5729 ("sched/cputime: Improve cputime_adjust()") + */ +#ifdef NEED_MUL_U64_U64_DIV_U64 +u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div); +#endif /* NEED_MUL_U64_U64_DIV_U64 */ + +#ifndef HAVE_LINKMODE +static inline void linkmode_set_bit(int nr, volatile unsigned long *addr) +{ + __set_bit(nr, addr); +} + +static inline void linkmode_zero(unsigned long *dst) +{ + bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS); +} +#endif /* !HAVE_LINKMODE */ + +#ifndef ETHTOOL_GLINKSETTINGS +/* Link mode bit indices */ +enum ethtool_link_mode_bit_indices { + ETHTOOL_LINK_MODE_10baseT_Half_BIT = 0, + ETHTOOL_LINK_MODE_10baseT_Full_BIT = 1, + ETHTOOL_LINK_MODE_100baseT_Half_BIT = 2, + ETHTOOL_LINK_MODE_100baseT_Full_BIT = 3, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT = 4, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT = 5, + ETHTOOL_LINK_MODE_Autoneg_BIT = 6, + ETHTOOL_LINK_MODE_TP_BIT = 7, + ETHTOOL_LINK_MODE_AUI_BIT = 8, + ETHTOOL_LINK_MODE_MII_BIT = 9, + ETHTOOL_LINK_MODE_FIBRE_BIT = 10, + ETHTOOL_LINK_MODE_BNC_BIT = 11, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT = 12, + ETHTOOL_LINK_MODE_Pause_BIT = 13, + ETHTOOL_LINK_MODE_Asym_Pause_BIT = 14, + ETHTOOL_LINK_MODE_2500baseX_Full_BIT = 15, + ETHTOOL_LINK_MODE_Backplane_BIT = 16, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT = 17, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT = 18, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT = 19, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT = 20, + ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT = 21, + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT = 22, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT = 23, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT = 24, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT = 25, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT = 26, + ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT = 27, + ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28, + ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29, + ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30, + + /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit + * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_* + * macro for bits > 31. The only way to use indices > 31 is to + * use the new ETHTOOL_GLINKSETTINGS/ETHTOOL_SLINKSETTINGS API. + */ + + __ETHTOOL_LINK_MODE_LAST + = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, +}; +#endif /* !ETHTOOL_GLINKSETTINGS */ + +#if defined(NEED_FLOW_MATCH) && defined(HAVE_TC_SETUP_CLSFLOWER) +/* NEED_FLOW_MATCH + * + * flow_match*, FLOW_DISSECTOR_MATCH, flow_rule*, flow_rule_match_key, and + * tc_cls_flower_offload_flow_rule were added by commit + * 8f2566225ae2 ("flow_offload: add flow_rule and flow_match structures and use + * them") in Linux 5.1. + */ + +#include + +struct flow_match { + struct flow_dissector *dissector; + void *mask; + void *key; +}; + +struct flow_match_basic { + struct flow_dissector_key_basic *key, *mask; +}; + +struct flow_match_control { + struct flow_dissector_key_control *key, *mask; +}; + +struct flow_match_eth_addrs { + struct flow_dissector_key_eth_addrs *key, *mask; +}; + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +struct flow_match_vlan { + struct flow_dissector_key_vlan *key, *mask; +}; +#endif /* HAVE_TC_FLOWER_VLAN_IN_TAGS */ + +struct flow_match_ipv4_addrs { + struct flow_dissector_key_ipv4_addrs *key, *mask; +}; + +struct flow_match_ipv6_addrs { + struct flow_dissector_key_ipv6_addrs *key, *mask; +}; + +#ifdef HAVE_FLOW_DISSECTOR_KEY_IP +struct flow_match_ip { + struct flow_dissector_key_ip *key, *mask; +}; +#endif /* HAVE_FLOW_DISSECTOR_KEY_IP */ + +struct flow_match_ports { + struct flow_dissector_key_ports *key, *mask; +}; + +#ifdef HAVE_TC_FLOWER_ENC +struct flow_match_enc_keyid { + struct flow_dissector_key_keyid *key, *mask; +}; +#endif /* HAVE_TC_FLOWER_ENC */ + +struct flow_rule { + struct flow_match match; +}; + +static inline struct flow_rule * +tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd) +{ + return (struct flow_rule *)&tc_flow_cmd->dissector; +} + +static inline bool flow_rule_match_key(const struct flow_rule *rule, + enum flow_dissector_key_id key) +{ + return dissector_uses_key(rule->match.dissector, key); +} + +#define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \ + const struct flow_match *__m = &(__rule)->match; \ + struct flow_dissector *__d = (__m)->dissector; \ + \ + (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \ + (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \ + +static inline void +flow_rule_match_basic(const struct flow_rule *rule, + struct flow_match_basic *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out); +} + +static inline void +flow_rule_match_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out); +} + +static inline void +flow_rule_match_eth_addrs(const struct flow_rule *rule, + struct flow_match_eth_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out); +} + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +static inline void +flow_rule_match_vlan(const struct flow_rule *rule, struct flow_match_vlan *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out); +} +#endif /* HAVE_TC_FLOWER_VLAN_IN_TAGS */ + +static inline void +flow_rule_match_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out); +} + +static inline void +flow_rule_match_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out); +} + +#ifdef HAVE_FLOW_DISSECTOR_KEY_IP +static inline void +flow_rule_match_ip(const struct flow_rule *rule, struct flow_match_ip *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out); +} +#endif /* HAVE_FLOW_DISSECTOR_KEY_IP */ + +static inline void +flow_rule_match_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out); +} + +#ifdef HAVE_TC_FLOWER_ENC +static inline void +flow_rule_match_enc_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out); +} + +static inline void +flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out); +} + +static inline void +flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out); +} + +#ifdef HAVE_FLOW_DISSECTOR_KEY_IP +#ifdef HAVE_FLOW_DISSECTOR_KEY_ENC_IP +static inline void +flow_rule_match_enc_ip(const struct flow_rule *rule, struct flow_match_ip *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out); +} +#endif /* HAVE_FLOW_DISSECTOR_KEY_ENC_IP */ +#endif /* HAVE_FLOW_DISSECTOR_KEY_IP */ + +static inline void +flow_rule_match_enc_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out); +} + +static inline void +flow_rule_match_enc_keyid(const struct flow_rule *rule, + struct flow_match_enc_keyid *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out); +} +#endif /* HAVE_TC_FLOWER_ENC */ +#endif /* NEED_FLOW_MATCH && HAVE_TC_SETUP_CLSFLOWER */ + +#ifndef HAVE_INCLUDE_BITFIELD +/* linux/bitfield.h has been added in Linux 4.9 in upstream commit + * 3e9b3112ec74 ("add basic register-field manipulation macros") + */ +#define __bf_shf(x) (__builtin_ffsll(x) - 1) + +#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \ + ({ \ + BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \ + _pfx "mask is not constant"); \ + BUILD_BUG_ON_MSG(!(_mask), _pfx "mask is zero"); \ + BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \ + ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \ + _pfx "value too large for the field"); \ + BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \ + _pfx "type of reg too small for mask"); \ + __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \ + (1ULL << __bf_shf(_mask))); \ + }) + +/** + * FIELD_MAX() - produce the maximum value representable by a field + * @_mask: shifted mask defining the field's length and position + * + * FIELD_MAX() returns the maximum value that can be held in the field + * specified by @_mask. + */ +#define FIELD_MAX(_mask) \ + ({ \ + __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_MAX: "); \ + (typeof(_mask))((_mask) >> __bf_shf(_mask)); \ + }) + +/** + * FIELD_FIT() - check if value fits in the field + * @_mask: shifted mask defining the field's length and position + * @_val: value to test against the field + * + * Return: true if @_val can fit inside @_mask, false if @_val is too big. + */ +#define FIELD_FIT(_mask, _val) \ + ({ \ + __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \ + !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \ + }) + +/** + * FIELD_PREP() - prepare a bitfield element + * @_mask: shifted mask defining the field's length and position + * @_val: value to put in the field + * + * FIELD_PREP() masks and shifts up the value. The result should + * be combined with other fields of the bitfield using logical OR. + */ +#define FIELD_PREP(_mask, _val) \ + ({ \ + __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \ + ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \ + }) + +/** + * FIELD_GET() - extract a bitfield element + * @_mask: shifted mask defining the field's length and position + * @_reg: value of entire bitfield + * + * FIELD_GET() extracts the field specified by @_mask from the + * bitfield passed in as @_reg by masking and shifting it down. + */ +#define FIELD_GET(_mask, _reg) \ + ({ \ + __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \ + (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \ + }) +#endif /* HAVE_INCLUDE_BITFIELD */ + +#ifdef NEED_BUILD_BUG_ON +/* Force a compilation error if a constant expression is not a power of 2 */ +#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \ + BUILD_BUG_ON(((n) & ((n) - 1)) != 0) + +/** + * BUILD_BUG_ON_MSG - break compile if a condition is true & emit supplied + * error message. + * @condition: the condition which the compiler should know is false. + * + * See BUILD_BUG_ON for description. + */ +#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) + +/** + * BUILD_BUG_ON - break compile if a condition is true. + * @condition: the condition which the compiler should know is false. + * + * If you have some code which relies on certain constants being equal, or + * some other compile-time-evaluated condition, you should use BUILD_BUG_ON to + * detect if someone changes it. + */ +#define BUILD_BUG_ON(condition) \ + BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) +#endif /* NEED_BUILD_BUG_ON */ + +#ifdef NEED_IN_TASK +#define in_hardirq() (hardirq_count()) +#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) +#define in_task() (!(in_nmi() | in_hardirq() | \ + in_serving_softirq())) +#endif /* NEED_IN_TASK */ + #endif /* _KCOMPAT_IMPL_H_ */ diff --git a/drivers/thirdparty/ice/kcompat_kthread.h b/drivers/thirdparty/ice/kcompat_kthread.h new file mode 100644 index 000000000000..ea8ae2158eb0 --- /dev/null +++ b/drivers/thirdparty/ice/kcompat_kthread.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _KCOMPAT_KTHREAD_H_ +#define _KCOMPAT_KTHREAD_H_ + +/* Kernels since 4.9 have supported delayed work items for kthreads. In order + * to allow seamless transition from old to new kernels, this header defines + * a set of macros to switch out kthread usage with a work queue on the older + * kernels that do not have support for kthread_delayed_work. + */ +#ifdef HAVE_KTHREAD_DELAYED_API +#include +#else /* HAVE_KTHREAD_DELAYED_API */ +#include +#undef kthread_work +#define kthread_work work_struct +#undef kthread_delayed_work +#define kthread_delayed_work delayed_work +#undef kthread_worker +#define kthread_worker workqueue_struct +#undef kthread_queue_work +#define kthread_queue_work(worker, work) queue_work(worker, work) +#undef kthread_queue_delayed_work +#define kthread_queue_delayed_work(worker, dwork, delay) \ + queue_delayed_work(worker, dwork, delay) +#undef kthread_init_work +#define kthread_init_work(work, fn) INIT_WORK(work, fn) +#undef kthread_init_delayed_work +#define kthread_init_delayed_work(dwork, fn) \ + INIT_DELAYED_WORK(dwork, fn) +#undef kthread_flush_worker +#define kthread_flush_worker(worker) flush_workqueue(worker) +#undef kthread_cancel_work_sync +#define kthread_cancel_work_sync(work) cancel_work_sync(work) +#undef kthread_cancel_delayed_work_sync +#define kthread_cancel_delayed_work_sync(dwork) \ + cancel_delayed_work_sync(dwork) +#undef kthread_create_worker +#define kthread_create_worker(flags, namefmt, ...) \ + alloc_workqueue(namefmt, 0, 0, ##__VA_ARGS__) +#undef kthread_destroy_worker +#define kthread_destroy_worker(worker) destroy_workqueue(worker) +#endif /* !HAVE_KTHREAD_DELAYED_API */ + +#endif /* _KCOMPAT_KTHREAD_H_ */ diff --git a/drivers/thirdparty/ice/kcompat_net_dim.c b/drivers/thirdparty/ice/kcompat_net_dim.c index 244c87634aba..7604c8ef2090 100644 --- a/drivers/thirdparty/ice/kcompat_net_dim.c +++ b/drivers/thirdparty/ice/kcompat_net_dim.c @@ -22,35 +22,35 @@ #define NET_DIM_DEF_PROFILE_EQE 1 #define NET_DIM_RX_EQE_PROFILES { \ - {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE, 0, 0}, \ + {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE, 0, 0}, \ + {64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE, 0, 0}, \ + {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE, 0, 0}, \ + {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE, 0, 0}, \ } #define NET_DIM_RX_CQE_PROFILES { \ - {2, 256}, \ - {8, 128}, \ - {16, 64}, \ - {32, 64}, \ - {64, 64} \ + {2, 256, 0, 0}, \ + {8, 128, 0, 0}, \ + {16, 64, 0, 0}, \ + {32, 64, 0, 0}, \ + {64, 64, 0, 0} \ } #define NET_DIM_TX_EQE_PROFILES { \ - {1, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {8, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {32, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {64, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {128, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE} \ + {1, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE, 0, 0}, \ + {8, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE, 0, 0}, \ + {32, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE, 0, 0}, \ + {64, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE, 0, 0}, \ + {128, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE, 0, 0} \ } #define NET_DIM_TX_CQE_PROFILES { \ - {5, 128}, \ - {8, 64}, \ - {16, 32}, \ - {32, 32}, \ - {64, 32} \ + {5, 128, 0, 0}, \ + {8, 64, 0, 0}, \ + {16, 32, 0, 0}, \ + {32, 32, 0, 0}, \ + {64, 32, 0, 0} \ } static const struct dim_cq_moder @@ -239,7 +239,7 @@ void net_dim(struct dim *dim, struct dim_sample end_sample) schedule_work(&dim->work); break; } - /* fall through */ + fallthrough; case DIM_START_MEASURE: dim_update_sample(end_sample.event_ctr, end_sample.pkt_ctr, end_sample.byte_ctr, &dim->start_sample); diff --git a/drivers/thirdparty/ice/kcompat_oracle_defs.h b/drivers/thirdparty/ice/kcompat_oracle_defs.h new file mode 100644 index 000000000000..d64aed160db6 --- /dev/null +++ b/drivers/thirdparty/ice/kcompat_oracle_defs.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _KCOMPAT_ORACLE_DEFS_H_ +#define _KCOMPAT_ORACLE_DEFS_H_ + +/* UEK kernel versions are a combination of the LINUX_VERSION_CODE along with + * an extra 3 digits. This values are part of BUILD_KERNEL string, and first + * number extracted by common.mk and placed into UEK_RELEASE_NUMBER. + * + * We combine the value of UEK_RELEASE_NUMBER along with the LINUX_VERSION code + * to generate the useful value that determines what specific kernel we're + * dealing with. + * + * Just in case the UEK_RELEASE_NUMBER ever goes above 255, we reserve 16 bits + * instead of 8 for this value. + */ +#if !defined(UEK_RELEASE_NUMBER) +#error "UEK_RELEASE_NUMBER is undefined" +#endif + +#if !defined(UEK_MINOR_RELEASE_NUMBER) +#error "UEK_MINOR_RELEASE_NUMBER is undefined" +#endif + +#if UEK_RELEASE_NUMBER > 65535 +#error "UEK_RELEASE_NUMBER is unexpectedly large" +#endif + +#define UEK_KERNEL_CODE ((LINUX_VERSION_CODE << 16) + UEK_RELEASE_NUMBER) +#define UEK_KERNEL_VERSION(a, b, c, d) ((KERNEL_VERSION(a, b, c) << 16) + (d)) + +#if UEK_KERNEL_VERSION(5, 4, 17, 2136) > UEK_KERNEL_CODE +#define NEED_ORCL_LIN_PCI_AER_CLEAR_NONFATAL_STATUS +#endif + +#if UEK_KERNEL_VERSION(5, 4, 17, 2136) == UEK_KERNEL_CODE +#if UEK_MINOR_RELEASE_NUMBER < 301 +#undef NEED_NET_PREFETCH +#endif +#endif + +#endif /* _KCOMPAT_ORACLE_DEFS_H_ */ diff --git a/drivers/thirdparty/ice/kcompat_pldmfw.c b/drivers/thirdparty/ice/kcompat_pldmfw.c index 181700796b4e..d4605d8af38b 100644 --- a/drivers/thirdparty/ice/kcompat_pldmfw.c +++ b/drivers/thirdparty/ice/kcompat_pldmfw.c @@ -163,15 +163,27 @@ struct __pldmfw_component_area { #define pldm_first_desc_tlv(start) \ ((const struct __pldmfw_desc_tlv *)(start)) +/** + * pldm_desc_tlv_member + * @desc: pointer to a descriptor TLV + * @member: name of member to give pointer to + * + * Yields pointer to @member within, possibly unaligned, @desc + */ +#define pldm_desc_tlv_member(desc, member) \ + ((const u8 *)(desc) + offsetof(struct __pldmfw_desc_tlv, member)) + /** * pldm_next_desc_tlv * @desc: pointer to a descriptor TLV * * Finds the pointer to the next descriptor following a given descriptor */ -#define pldm_next_desc_tlv(desc) \ - ((const struct __pldmfw_desc_tlv *)((desc)->data + \ - get_unaligned_le16(&(desc)->size))) +#define pldm_next_desc_tlv(desc) \ + ({ const void *desc_ = (desc); \ + ((const struct __pldmfw_desc_tlv *)( \ + pldm_desc_tlv_member(desc_, data) \ + + get_unaligned_le16(pldm_desc_tlv_member(desc_, size)))); }) /** * pldm_for_each_desc_tlv @@ -517,10 +529,10 @@ pldm_parse_desc_tlvs(struct pldmfw_priv *data, struct pldmfw_record *record, u8 if (err) return err; - type = get_unaligned_le16(&__desc->type); + type = get_unaligned_le16(pldm_desc_tlv_member(__desc, type)); /* According to DSP0267, this only includes the data field */ - size = get_unaligned_le16(&__desc->size); + size = get_unaligned_le16(pldm_desc_tlv_member(__desc, size)); err = pldm_check_desc_tlv_len(data, type, size); if (err) @@ -537,7 +549,7 @@ pldm_parse_desc_tlvs(struct pldmfw_priv *data, struct pldmfw_record *record, u8 desc->type = type; desc->size = size; - desc->data = __desc->data; + desc->data = pldm_desc_tlv_member(__desc, data); list_add_tail(&desc->entry, &record->descs); } diff --git a/drivers/thirdparty/ice/kcompat_rhel_defs.h b/drivers/thirdparty/ice/kcompat_rhel_defs.h index e5ddbe16e6a0..c620e11fb62c 100644 --- a/drivers/thirdparty/ice/kcompat_rhel_defs.h +++ b/drivers/thirdparty/ice/kcompat_rhel_defs.h @@ -26,19 +26,33 @@ /*****************************************************************************/ #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,3)) +#define NEED_NETDEV_TXQ_BQL_PREFETCH #else /* >= 7.3 */ #undef NEED_DEV_PRINTK_ONCE +#undef NEED_DEVM_KASPRINTF #endif /* 7.3 */ +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,4)) +#define NEED_BUILD_BUG_ON +#else /* >= 7.4 */ +#define HAVE_RHEL7_EXTENDED_OFFLOAD_STATS +#define HAVE_INCLUDE_BITFIELD +#endif /* 7.4 */ + /*****************************************************************************/ #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) #else /* >= 7.5 */ #define HAVE_TCF_EXTS_TO_LIST +#define HAVE_FLOW_DISSECTOR_KEY_IP #endif /* 7.5 */ /*****************************************************************************/ #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)) +#undef HAVE_XDP_BUFF_RXQ +#undef HAVE_XDP_RXQ_INFO_REG_3_PARAMS #else /* >= 7.6 */ +#undef NEED_JIFFIES_64_TIME_IS_MACROS #undef NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0 #undef NEED_TC_SETUP_QDISC_MQPRIO #endif /* 7.6 */ @@ -47,32 +61,86 @@ #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,7)) #else /* >= 7.7 */ #define HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR +#define HAVE_ETHTOOL_NEW_100G_BITS +#undef NEED_NETDEV_TX_SENT_QUEUE +#undef NEED_IN_TASK +#define HAVE_FLOW_DISSECTOR_KEY_ENC_IP #endif /* 7.7 */ +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,8)) +#else /* >= 7.8 */ +#endif /* 7.8 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,9)) +#else /* >= 7.9 */ +/* mul_u64_u64_div_u64 was backported into RHEL 7.9 but not into the early + * 8.x releases + */ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) +#undef NEED_MUL_U64_U64_DIV_U64 +#endif /* < 8.0 */ +#endif /* 7.9 */ + /*****************************************************************************/ #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) #else /* >= 8.0 */ #undef HAVE_TCF_EXTS_TO_LIST +#undef HAVE_ETHTOOL_NEW_100G_BITS +#define HAVE_NDO_OFFLOAD_STATS +#undef HAVE_RHEL7_EXTENDED_OFFLOAD_STATS #define HAVE_TCF_EXTS_FOR_EACH_ACTION -#endif /* 7.5 */ +/* 7.7 undefs it due to a backport in 7.7+, but 8.0 needs it still */ +#define NEED_NETDEV_TX_SENT_QUEUE +#define HAVE_DEVLINK_REGIONS +#define HAVE_DEVLINK_PARAMS +#endif /* 8.0 */ /*****************************************************************************/ #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,1)) #define NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE +#define NEED_FLOW_MATCH #else /* >= 8.1 */ +#define HAVE_ETHTOOL_NEW_100G_BITS #undef NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE +#undef NEED_FLOW_MATCH +#define HAVE_DEVLINK_PARAMS_PUBLISH +#undef NEED_NETDEV_TX_SENT_QUEUE +#undef NEED_INDIRECT_CALL_WRAPPER_MACROS +#define HAVE_INDIRECT_CALL_WRAPPER_HEADER +#define HAVE_GRETAP_TYPE +#define HAVE_VXLAN_TYPE +#define HAVE_LINKMODE +#define HAVE_FLOW_DISSECTOR_KEY_CVLAN #endif /* 8.1 */ /*****************************************************************************/ #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,2)) #else /* >= 8.2 */ +#undef NEED_BUS_FIND_DEVICE_CONST_DATA #undef NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY #undef NEED_SKB_FRAG_OFF #undef NEED_SKB_FRAG_OFF_ADD #undef NEED_FLOW_INDR_BLOCK_CB_REGISTER +#define HAVE_FLOW_INDR_BLOCK_LOCK #define HAVE_DEVLINK_PORT_ATTRS_SET_SWITCH_ID +#define HAVE_DEVLINK_HEALTH +#define HAVE_NETDEV_SB_DEV #endif /* 8.2 */ +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,3)) +#else /* >= 8.3 */ +#undef NEED_CPU_LATENCY_QOS_RENAME +#define HAVE_DEVLINK_HEALTH_OPS_EXTACK +#define HAVE_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT +#define HAVE_DEVLINK_RELOAD_ENABLE_DISABLE +#undef NEED_DEVLINK_REGION_CREATE_OPS +#undef NEED_MUL_U64_U64_DIV_U64 +#endif /* 8.3 */ + /*****************************************************************************/ #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,4)) #else /* >= 8.4 */ @@ -80,6 +148,69 @@ #undef NEED_NET_PREFETCH #undef NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY #undef HAVE_XDP_QUERY_PROG +#define HAVE_AF_XDP_ZC_SUPPORT +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NDO_XSK_WAKEUP +#define XSK_UMEM_RETURNS_XDP_DESC +#undef NEED_XSK_UMEM_GET_RX_FRAME_SIZE +#define HAVE_ETHTOOL_COALESCE_PARAMS_SUPPORT #endif /* 8.4 */ +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,5)) +#else /* >= 8.5 */ +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS +#define HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT +#undef HAVE_DEVLINK_FLASH_UPDATE_BEGIN_END_NOTIFY +#undef HAVE_NAPI_BUSY_LOOP +#undef HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#undef NEED_XSK_BUFF_DMA_SYNC_FOR_CPU +#define NO_XDP_QUERY_XSK_UMEM +#undef NEED_XSK_BUFF_POOL_RENAME +#define HAVE_NETDEV_BPF_XSK_POOL +#define HAVE_AF_XDP_NETDEV_UMEM +#endif /* 8.5 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,6)) +#else /* >= 8.6 */ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,0)) +#define HAVE_ETHTOOL_COALESCE_EXTACK +#endif /* < 9.0 */ +#undef NEED_ETH_HW_ADDR_SET +#endif /* 8.6 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,7)) +#else /* >= 8.7 */ +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#undef NEED_DEVLINK_ALLOC_SETS_DEV +#undef NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#endif /* 8.7 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,0)) +#else /* >= 9.0 */ +#define HAVE_XDP_BUFF_RXQ +#define HAVE_NDO_ETH_IOCTL +#define NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#undef HAVE_DEVLINK_PARAMS_PUBLISH +#undef HAVE_DEVLINK_RELOAD_ENABLE_DISABLE +#undef HAVE_DEVLINK_REGISTER_SETS_DEV +#undef HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define HAVE_DEVLINK_NOTIFY_REGISTER +#define HAVE_DEVLINK_SET_FEATURES +#endif /* 9.0 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,1)) +#else /* >= 9.1 */ +#undef NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_XDP_DO_FLUSH +#endif /* 9.1 */ + #endif /* _KCOMPAT_RHEL_DEFS_H_ */ diff --git a/drivers/thirdparty/ice/kcompat_sles_defs.h b/drivers/thirdparty/ice/kcompat_sles_defs.h index 5ee6563993d5..adfb5c5f191a 100644 --- a/drivers/thirdparty/ice/kcompat_sles_defs.h +++ b/drivers/thirdparty/ice/kcompat_sles_defs.h @@ -92,6 +92,13 @@ #warning "SLE kernel versions between 4.12.14-23 and 4.12.14-94 are not supported" #endif +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4,12,14,10)) +#else /* >= 4.12.14-10 */ +#undef NEED_INDIRECT_CALL_WRAPPER_MACROS +#define HAVE_INDIRECT_CALL_WRAPPER_HEADER +#endif /* 4.12.14-10 */ + /*****************************************************************************/ #if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4,12,14,100)) #else /* >= 4.12.14-100 */ @@ -111,8 +118,9 @@ #endif /* 4.12.14-111 */ /*****************************************************************************/ -#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4,12,14,120)) -#else /* >= 4.12.14-120 */ +/* SLES 12-SP5 base kernel version */ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4,12,14,115)) +#else /* >= 4.12.14-115 */ #define HAVE_NDO_SELECT_QUEUE_SB_DEV #define HAVE_TCF_MIRRED_DEV #define HAVE_TCF_BLOCK @@ -120,21 +128,53 @@ #define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK #undef NEED_TC_SETUP_QDISC_MQPRIO #undef NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0 -#endif /* 4.12.14-120 */ +#undef NEED_NETDEV_TX_SENT_QUEUE +#define HAVE_LINKMODE +#endif /* 4.12.14-115 */ + +/*****************************************************************************/ +/* SLES 15-SP1 base */ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4,12,14,195)) +#else /* >= 4.12.14-195 */ +#define HAVE_DEVLINK_PARAMS +#undef NEED_NETDEV_TX_SENT_QUEUE +#endif /* 4.12.14-195 */ /*****************************************************************************/ #if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,3,8,2)) #else /* >= 5.3.8-2 */ +#undef NEED_BUS_FIND_DEVICE_CONST_DATA #undef NEED_FLOW_INDR_BLOCK_CB_REGISTER #undef NEED_SKB_FRAG_OFF #undef NEED_SKB_FRAG_OFF_ADD +#define HAVE_FLOW_INDR_BLOCK_LOCK +#define HAVE_DEVLINK_PARAMS_PUBLISH #endif /* 5.3.8-2 */ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,3,16,2)) +#else /* >= 5.3.16-2 */ +#define HAVE_DEVLINK_HEALTH_OPS_EXTACK +#endif /* 5.3.16-2 */ + +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,3,18,24)) +#else /* >= 5.3.18-24 */ +#undef NEED_MUL_U64_U64_DIV_U64 +#endif + +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,3,18,26)) +#else /* >= 5.3.18-26 */ +#undef NEED_CPU_LATENCY_QOS_RENAME +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#define HAVE_DEVLINK_RELOAD_ENABLE_DISABLE +#endif + /*****************************************************************************/ #if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,3,18,34)) #else /* >= 5.3.18-34 */ #undef NEED_DEVLINK_REGION_CREATE_OPS #undef NEED_DEVLINK_PORT_ATTRS_SET_STRUCT +#define HAVE_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER #endif /* 5.3.18-34 */ /*****************************************************************************/ @@ -149,4 +189,38 @@ #undef NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY #endif /* 5.3.18-38 */ +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,3,18,41)) +#define NEED_XSK_BUFF_POOL_RENAME +#else /* >= 5.3.18-41 */ +#define HAVE_XDP_BUFF_FRAME_SZ +#define HAVE_NETDEV_BPF_XSK_POOL +#undef NEED_XSK_UMEM_GET_RX_FRAME_SIZE +#undef NEED_XSK_BUFF_POOL_RENAME +#undef NEED_XSK_BUFF_DMA_SYNC_FOR_CPU +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#endif /* 5.3.18-41 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,3,18,59)) +#else /* >= 5.3.18-59 */ +#undef NEED_ETH_HW_ADDR_SET +#endif /* 5.3.18-59 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5, 14, 17, 1)) +#else /* >= 5.14.17-150400.1 */ + #undef HAVE_DEVLINK_PARAMS_PUBLISH + #undef HAVE_DEVLINK_REGISTER_SETS_DEV + #define HAVE_DEVLINK_SET_FEATURES +#endif /* 5.14.17-150400.1 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,14,21,9)) +#else /* >= 5.14.21-150400.9 */ +#undef NEED_DEVLINK_ALLOC_SETS_DEV +#undef HAVE_DEVLINK_RELOAD_ENABLE_DISABLE +#define HAVE_ETHTOOL_COALESCE_EXTACK +#endif /* 5.14.21-150400.9 */ + #endif /* _KCOMPAT_SLES_DEFS_H_ */ diff --git a/drivers/thirdparty/ice/kcompat_std_defs.h b/drivers/thirdparty/ice/kcompat_std_defs.h index 1d657809ff9e..c426f1be4b54 100644 --- a/drivers/thirdparty/ice/kcompat_std_defs.h +++ b/drivers/thirdparty/ice/kcompat_std_defs.h @@ -31,29 +31,82 @@ #error "KERNEL_VERSION is undefined" #endif +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)) +#define NEED_DEVM_KASPRINTF +#else /* >= 3,17,0 */ +#endif /* 3,17,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)) +#define NEED_DEV_PM_DOMAIN_ATTACH_DETACH +#else /* >= 3,18,0 */ +#endif /* 3,18,0 */ + /*****************************************************************************/ #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0)) #define NEED_DEV_PRINTK_ONCE #else /* >= 3,19,0 */ #endif /* 3,19,0 */ +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)) +#define NEED_DEFINE_STATIC_KEY_FALSE +#define NEED_STATIC_BRANCH +#else /* >= 4,3,0 */ +#define NEED_DECLARE_STATIC_KEY_FALSE +#endif /* 4,3,0 */ + /*****************************************************************************/ #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)) #else /* >= 4,8,0 */ #define HAVE_TCF_EXTS_TO_LIST +#define HAVE_PCI_ALLOC_IRQ #endif /* 4,8,0 */ +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)) +#define NEED_JIFFIES_64_TIME_IS_MACROS +#else /* >= 4,9,0 */ +#define HAVE_KTHREAD_DELAYED_API +#define HAVE_NDO_OFFLOAD_STATS +#undef NEED_DECLARE_STATIC_KEY_FALSE +#define HAVE_INCLUDE_BITFIELD +#endif /* 4,9,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,62)) +#define NEED_IN_TASK +#else /* >= 4,9,62 */ +#endif /* 4,9,62 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) +#else /* >= 4,12,0 */ +#define HAVE_NAPI_BUSY_LOOP +#endif /* 4,12,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0)) +#else /* >= 4,13,0 */ +#define HAVE_FLOW_DISSECTOR_KEY_IP +#endif /* 4,13,0 */ + /*****************************************************************************/ #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) #define NEED_TC_SETUP_QDISC_MQPRIO +#define NEED_NETDEV_XDP_STRUCT #else /* >= 4,15,0 */ #define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_NDO_BPF #endif /* 4,15,0 */ /*****************************************************************************/ #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0)) #define NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0 #else /* >= 4,16,0 */ +#define HAVE_XDP_BUFF_RXQ +#define HAVE_XDP_RXQ_INFO_REG_3_PARAMS #endif /* 4,16,0 */ /*****************************************************************************/ @@ -77,51 +130,214 @@ #else /* >= 4,19,0 */ #undef HAVE_TCF_EXTS_TO_LIST #define HAVE_TCF_EXTS_FOR_EACH_ACTION +#define HAVE_DEVLINK_REGIONS +#define HAVE_TC_ETF_QOPT_OFFLOAD +#define HAVE_DEVLINK_PARAMS +#define HAVE_FLOW_DISSECTOR_KEY_ENC_IP #endif /* 4,19,0 */ +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)) +#define NEED_NETDEV_TX_SENT_QUEUE +#else /* >= 4.20.0 */ +#define HAVE_VXLAN_TYPE +#define HAVE_LINKMODE +#endif /* 4.20.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)) +#define NEED_INDIRECT_CALL_WRAPPER_MACROS +#else /* >= 5.0.0 */ +#define HAVE_GRETAP_TYPE +#define HAVE_INDIRECT_CALL_WRAPPER_HEADER +#endif /* 5.0.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)) +#define NEED_FLOW_MATCH +#else /* >= 5.1.0 */ +#define HAVE_ETHTOOL_200G_BITS +#define HAVE_ETHTOOL_NEW_100G_BITS +#define HAVE_DEVLINK_PARAMS_PUBLISH +#define HAVE_DEVLINK_HEALTH +#endif /* 5.1.0 */ + /*****************************************************************************/ #if (LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0)) #else /* >= 5.2.0 */ #define HAVE_DEVLINK_PORT_ATTRS_SET_SWITCH_ID +#define HAVE_FLOW_DISSECTOR_KEY_CVLAN #endif /* 5.2.0 */ /*****************************************************************************/ #if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) #define NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY +#define NEED_BUS_FIND_DEVICE_CONST_DATA #else /* >= 5.3.0 */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(5,3,10)) +#define HAVE_DEVLINK_RELOAD_ENABLE_DISABLE +#endif /* 5.3.10 */ #endif /* 5.3.0 */ -/*****************************************************************************/ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0)) -#define NEED_DEVLINK_REGION_CREATE_OPS -#else /* >= 5.7.0 */ -#endif /* 5.7.0 */ - /*****************************************************************************/ #if (LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)) #define NEED_SKB_FRAG_OFF_ADD #define NEED_SKB_FRAG_OFF +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4,14,241) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) +#undef NEED_SKB_FRAG_OFF +#endif /* > 4.14.241 && < 4.15.0 */ #if (LINUX_VERSION_CODE > KERNEL_VERSION(4,19,200) && \ LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)) #undef NEED_SKB_FRAG_OFF -#endif /* 4.19.X for X > 201 */ +#endif /* > 4.19.200 && < 4.20.0 */ #define NEED_FLOW_INDR_BLOCK_CB_REGISTER #else /* >= 5.4.0 */ +#define HAVE_FLOW_INDR_BLOCK_LOCK +#define HAVE_XSK_UNALIGNED_CHUNK_PLACEMENT #endif /* 5.4.0 */ +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,5,0)) +#else /* >= 5.5.0 */ +#define HAVE_DEVLINK_HEALTH_OPS_EXTACK +#endif /* 5.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0)) +#define NEED_DEVLINK_REGION_CREATE_OPS +#define NEED_CPU_LATENCY_QOS_RENAME +#else /* >= 5.7.0 */ +#define HAVE_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT +#endif /* 5.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)) +#define NEED_XSK_UMEM_GET_RX_FRAME_SIZE +#else /* >= 5.8.0 */ +#undef HAVE_XSK_UNALIGNED_CHUNK_PLACEMENT +#endif /* 5.8.0 */ + /*****************************************************************************/ #if (LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)) #define NEED_DEVLINK_PORT_ATTRS_SET_STRUCT #define HAVE_XDP_QUERY_PROG +#define NEED_INDIRECT_CALL_3_AND_4 +#define NEED_MUL_U64_U64_DIV_U64 #else /* >= 5.9.0 */ +#define HAVE_TASKLET_SETUP #endif /* 5.9.0 */ /*****************************************************************************/ #if (LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)) #define NEED_NET_PREFETCH #define NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY +#define NEED_XSK_BUFF_DMA_SYNC_FOR_CPU +#define NEED_XSK_BUFF_POOL_RENAME #else /* >= 5.10.0 */ +#define HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#define HAVE_UDP_TUNNEL_NIC_SHARED +#define HAVE_NETDEV_BPF_XSK_POOL #endif /* 5.10.0 */ +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)) +#define HAVE_DEVLINK_FLASH_UPDATE_BEGIN_END_NOTIFY +#else /* >= 5.11.0 */ +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW +#define HAVE_XSK_BATCHED_DESCRIPTOR_INTERFACES +#define HAVE_PASID_SUPPORT +#undef HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#define HAVE_XSK_TX_PEEK_RELEASE_DESC_BATCH_3_PARAMS +#endif /* 5.11.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,12,0)) +#define NEED_EXPORT_INDIRECT_CALLABLE +#else /* >= 5.12.0 */ +#endif /* 5.12.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,13,0)) +/* HAVE_KOBJ_IN_MDEV_PARENT_OPS_CREATE + * + * create api changed as part of the commit c2ef2f50ad0c( vfio/mdev: Remove + * kobj from mdev_parent_ops->create()) + * + * if flag is defined use the old API else new API + */ +#define HAVE_KOBJ_IN_MDEV_PARENT_OPS_CREATE +#define HAVE_DEV_IN_MDEV_API +#else /* >= 5.13.0 */ +#define HAVE_XPS_MAP_TYPE +#endif /* 5.13.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,14,0)) +#else /* >= 5.14.0 */ +#define HAVE_TTY_WRITE_ROOM_UINT +#endif /* 5.14.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,15,0)) +#define NEED_DEVLINK_ALLOC_SETS_DEV +#define HAVE_DEVLINK_REGISTER_SETS_DEV +#define NEED_ETH_HW_ADDR_SET +#else /* >= 5.15.0 */ +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_NDO_ETH_IOCTL +#define HAVE_DEVICE_IN_MDEV_PARENT_OPS +#define HAVE_LMV1_SUPPORT +#define NEED_PCI_IOV_VF_ID +#endif /* 5.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,16,0)) +#else /* >= 5.16.0 */ +#undef HAVE_PASID_SUPPORT +#define HAVE_DEVLINK_SET_FEATURES +#define HAVE_DEVLINK_NOTIFY_REGISTER +#undef HAVE_DEVLINK_RELOAD_ENABLE_DISABLE +#undef HAVE_DEVLINK_PARAMS_PUBLISH +#endif /* 5.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,17,0)) +#define NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#else /* >=5.17.0*/ +#define HAVE_XDP_DO_FLUSH +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#endif /* 5.17.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0)) +#else /* >=5.18.0*/ +#undef HAVE_LMV1_SUPPORT +#undef NEED_PCI_IOV_VF_ID +#define HAVE_GTP_SUPPORT +#undef HAVE_XSK_TX_PEEK_RELEASE_DESC_BATCH_3_PARAMS +#endif /* 5.18.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,19,0)) +#else /* >=5.19.0 */ +#define HAVE_NDO_FDB_DEL_EXTACK +#endif /* 5.19.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6,0,0)) +#else /* >=6.0.0 */ +#define HAVE_FLOW_DISSECTOR_KEY_PPPOE +#endif /* 6.0.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6,1,0)) +#else /* >=6.1.0 */ +#define HAVE_FLOW_DISSECTOR_KEY_L2TPV3 +#endif /* 6.1.0 */ + #endif /* _KCOMPAT_STD_DEFS_H_ */ diff --git a/drivers/thirdparty/ice/kcompat_ubuntu_defs.h b/drivers/thirdparty/ice/kcompat_ubuntu_defs.h index 9da611f64f21..030c76823ca2 100644 --- a/drivers/thirdparty/ice/kcompat_ubuntu_defs.h +++ b/drivers/thirdparty/ice/kcompat_ubuntu_defs.h @@ -25,4 +25,11 @@ #error "UBUNTU_VERSION is undefined" #endif +/*****************************************************************************/ +#if (UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,15,0,159) && \ + UBUNTU_VERSION_CODE < UBUNTU_VERSION(4,15,0,999)) +#undef NEED_SKB_FRAG_OFF +#endif + +/*****************************************************************************/ #endif /* _KCOMPAT_UBUNTU_DEFS_H_ */ diff --git a/drivers/thirdparty/ice/siov_regs.h b/drivers/thirdparty/ice/siov_regs.h new file mode 100644 index 000000000000..19aa48c06b14 --- /dev/null +++ b/drivers/thirdparty/ice/siov_regs.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _SIOV_REGS_H_ +#define _SIOV_REGS_H_ +#define VDEV_MBX_START 0x20000 /* Begin at 128KB */ +#define VDEV_MBX_ATQBAL (VDEV_MBX_START + 0x0000) +#define VDEV_MBX_ATQBAH (VDEV_MBX_START + 0x0004) +#define VDEV_MBX_ATQLEN (VDEV_MBX_START + 0x0008) +#define VDEV_MBX_ATQH (VDEV_MBX_START + 0x000C) +#define VDEV_MBX_ATQT (VDEV_MBX_START + 0x0010) +#define VDEV_MBX_ARQBAL (VDEV_MBX_START + 0x0014) +#define VDEV_MBX_ARQBAH (VDEV_MBX_START + 0x0018) +#define VDEV_MBX_ARQLEN (VDEV_MBX_START + 0x001C) +#define VDEV_MBX_ARQH (VDEV_MBX_START + 0x0020) +#define VDEV_MBX_ARQT (VDEV_MBX_START + 0x0024) +#define VDEV_GET_RSTAT 0x21000 /* 132KB for RSTAT */ + +/* Begin at offset after 1MB (after 256 4k pages) */ +#define VDEV_QRX_TAIL_START 0x100000 +#define VDEV_QRX_TAIL(_i) (VDEV_QRX_TAIL_START + ((_i) * 0x1000)) /* 2k Rx queues */ + +#define VDEV_QRX_BUFQ_TAIL_START 0x900000 /* Begin at offset of 9MB for Rx buffer queue tail register pages */ +#define VDEV_QRX_BUFQ_TAIL(_i) (VDEV_QRX_BUFQ_TAIL_START + ((_i) * 0x1000)) /* 2k Rx buffer queues */ + +#define VDEV_QTX_TAIL_START 0x1100000 /* Begin at offset of 17MB for 2k Tx queues */ +#define VDEV_QTX_TAIL(_i) (VDEV_QTX_TAIL_START + ((_i) * 0x1000)) /* 2k Tx queues */ + +#define VDEV_QTX_COMPL_TAIL_START 0x1900000 /* Begin at offset of 25MB for 2k Tx completion queues */ +#define VDEV_QTX_COMPL_TAIL(_i) (VDEV_QTX_COMPL_TAIL_START + ((_i) * 0x1000)) /* 2k Tx completion queues */ + +#define VDEV_INT_DYN_CTL01 0x2100000 /* Begin at offset 33MB */ + +#define VDEV_INT_DYN_START (VDEV_INT_DYN_CTL01 + 0x1000) /* Begin at offset of 33MB + 4k to accomdate CTL01 register */ +#define VDEV_INT_DYN_CTL(_i) (VDEV_INT_DYN_START + ((_i) * 0x1000)) +#define VDEV_INT_ITR_0(_i) (VDEV_INT_DYN_START + ((_i) * 0x1000) + 0x04) +#define VDEV_INT_ITR_1(_i) (VDEV_INT_DYN_START + ((_i) * 0x1000) + 0x08) +#define VDEV_INT_ITR_2(_i) (VDEV_INT_DYN_START + ((_i) * 0x1000) + 0x0C) + +/* Next offset to begin at 42MB (0x2A00000) */ +#endif /* _SIOV_REGS_H_ */ diff --git a/drivers/thirdparty/ice/virtchnl.h b/drivers/thirdparty/ice/virtchnl.h index dd7fade36bd3..1586d6c25a5c 100644 --- a/drivers/thirdparty/ice/virtchnl.h +++ b/drivers/thirdparty/ice/virtchnl.h @@ -1,7 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2018-2021, Intel Corporation. */ - #ifndef _VIRTCHNL_H_ #define _VIRTCHNL_H_ @@ -23,7 +22,7 @@ * * The PF is required to return a status code in v_retval for all messages * except RESET_VF, which does not require any response. The returned value - * is of virtchnl_status_code type, defined in the shared type.h. + * is of virtchnl_status_code type, defined here. * * In general, VF driver initialization should roughly follow the order of * these opcodes. The VF driver must first validate the API version of the @@ -38,8 +37,21 @@ * value in current and future projects */ +/* These macros are used to generate compilation errors if a structure/union + * is not exactly the correct length. It gives a divide by zero error if the + * structure/union is not of the correct size, otherwise it creates an enum + * that is never used. + */ +#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \ + { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } +#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \ + { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) } -/* Error Codes */ +/* Error Codes + * Note that many older versions of various iAVF drivers convert the reported + * status code directly into an iavf_status enumeration. For this reason, it + * is important that the values of these enumerations line up. + */ enum virtchnl_status_code { VIRTCHNL_STATUS_SUCCESS = 0, VIRTCHNL_STATUS_ERR_PARAM = -5, @@ -86,6 +98,9 @@ enum virtchnl_rx_hsplit { VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8, }; +enum virtchnl_bw_limit_type { + VIRTCHNL_BW_SHAPER = 0, +}; /* END GENERIC DEFINES */ /* Opcodes for VF-PF communication. These are placed in the v_opcode field @@ -97,6 +112,7 @@ enum virtchnl_ops { * VFs send requests to the PF using the other ops. * Use of "advanced opcode" features must be negotiated as part of capabilities * exchange and are not considered part of base mode feature set. + * */ VIRTCHNL_OP_UNKNOWN = 0, VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ @@ -135,6 +151,7 @@ enum virtchnl_ops { VIRTCHNL_OP_ADD_CLOUD_FILTER = 32, VIRTCHNL_OP_DEL_CLOUD_FILTER = 33, /* opcode 34 is reserved */ + VIRTCHNL_OP_DCF_CONFIG_BW = 37, VIRTCHNL_OP_DCF_VLAN_OFFLOAD = 38, VIRTCHNL_OP_DCF_CMD_DESC = 39, VIRTCHNL_OP_DCF_CMD_BUFF = 40, @@ -156,11 +173,17 @@ enum virtchnl_ops { VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57, VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2 = 58, VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 = 59, - /* opcodes 60 through 69 are reserved */ + /* opcodes 60 through 65 are reserved */ + VIRTCHNL_OP_GET_QOS_CAPS = 66, + VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP = 67, + /* opcode 68 through 70 are reserved */ VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107, VIRTCHNL_OP_DISABLE_QUEUES_V2 = 108, VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111, - VIRTCHNL_OP_DCF_RULE_FLUSH = 6000, + VIRTCHNL_OP_CONFIG_QUEUE_BW = 112, + VIRTCHNL_OP_CONFIG_QUANTA = 113, + VIRTCHNL_OP_FLOW_SUBSCRIBE = 114, + VIRTCHNL_OP_FLOW_UNSUBSCRIBE = 115, VIRTCHNL_OP_MAX, }; @@ -251,12 +274,6 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode) return "VIRTCHNL_OP_DEL_FDIR_FILTER"; case VIRTCHNL_OP_GET_MAX_RSS_QREGION: return "VIRTCHNL_OP_GET_MAX_RSS_QREGION"; - case VIRTCHNL_OP_ENABLE_QUEUES_V2: - return "VIRTCHNL_OP_ENABLE_QUEUES_V2"; - case VIRTCHNL_OP_DISABLE_QUEUES_V2: - return "VIRTCHNL_OP_DISABLE_QUEUES_V2"; - case VIRTCHNL_OP_MAP_QUEUE_VECTOR: - return "VIRTCHNL_OP_MAP_QUEUE_VECTOR"; case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: return "VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS"; case VIRTCHNL_OP_ADD_VLAN_V2: @@ -275,6 +292,16 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode) return "VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2"; case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2: return "VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2"; + case VIRTCHNL_OP_ENABLE_QUEUES_V2: + return "VIRTCHNL_OP_ENABLE_QUEUES_V2"; + case VIRTCHNL_OP_DISABLE_QUEUES_V2: + return "VIRTCHNL_OP_DISABLE_QUEUES_V2"; + case VIRTCHNL_OP_MAP_QUEUE_VECTOR: + return "VIRTCHNL_OP_MAP_QUEUE_VECTOR"; + case VIRTCHNL_OP_FLOW_SUBSCRIBE: + return "VIRTCHNL_OP_FLOW_SUBSCRIBE"; + case VIRTCHNL_OP_FLOW_UNSUBSCRIBE: + return "VIRTCHNL_OP_FLOW_UNSUBSCRIBE"; case VIRTCHNL_OP_MAX: return "VIRTCHNL_OP_MAX"; default: @@ -282,15 +309,29 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode) } } -/* These macros are used to generate compilation errors if a structure/union - * is not exactly the correct length. It gives a divide by zero error if the - * structure/union is not of the correct size, otherwise it creates an enum - * that is never used. - */ -#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \ - { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } -#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \ - { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) } +static inline const char *virtchnl_stat_str(enum virtchnl_status_code v_status) +{ + switch (v_status) { + case VIRTCHNL_STATUS_SUCCESS: + return "VIRTCHNL_STATUS_SUCCESS"; + case VIRTCHNL_STATUS_ERR_PARAM: + return "VIRTCHNL_STATUS_ERR_PARAM"; + case VIRTCHNL_STATUS_ERR_NO_MEMORY: + return "VIRTCHNL_STATUS_ERR_NO_MEMORY"; + case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH: + return "VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH"; + case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR: + return "VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR"; + case VIRTCHNL_STATUS_ERR_INVALID_VF_ID: + return "VIRTCHNL_STATUS_ERR_INVALID_VF_ID"; + case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR: + return "VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR"; + case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED: + return "VIRTCHNL_STATUS_ERR_NOT_SUPPORTED"; + default: + return "Unknown status code (update virtchnl.h)"; + } +} /* Message descriptions and data structures. */ @@ -380,6 +421,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); /* BIT(8) is reserved */ #define VIRTCHNL_VF_LARGE_NUM_QPAIRS BIT(9) #define VIRTCHNL_VF_OFFLOAD_CRC BIT(10) +#define VIRTCHNL_VF_OFFLOAD_FSUB_PF BIT(14) #define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15) #define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16) #define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17) @@ -394,6 +436,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); #define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC BIT(26) #define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF BIT(27) #define VIRTCHNL_VF_OFFLOAD_FDIR_PF BIT(28) +#define VIRTCHNL_VF_OFFLOAD_QOS BIT(29) #define VIRTCHNL_VF_CAP_DCF BIT(30) /* BIT(31) is reserved */ @@ -437,21 +480,14 @@ VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info); /* RX descriptor IDs (range from 0 to 63) */ enum virtchnl_rx_desc_ids { VIRTCHNL_RXDID_0_16B_BASE = 0, - /* 32B_BASE and FLEX_SPLITQ share desc ids as default descriptors - * because they can be differentiated based on queue model; e.g. single - * queue model can only use 32B_BASE and split queue model can only use - * FLEX_SPLITQ. Having these as 1 allows them to be used as default - * descriptors without negotiation. - */ VIRTCHNL_RXDID_1_32B_BASE = 1, - VIRTCHNL_RXDID_1_FLEX_SPLITQ = 1, VIRTCHNL_RXDID_2_FLEX_SQ_NIC = 2, VIRTCHNL_RXDID_3_FLEX_SQ_SW = 3, VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB = 4, VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL = 5, VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2 = 6, VIRTCHNL_RXDID_7_HW_RSVD = 7, - /* 9 through 15 are reserved */ + /* 8 through 15 are reserved */ VIRTCHNL_RXDID_16_COMMS_GENERIC = 16, VIRTCHNL_RXDID_17_COMMS_AUX_VLAN = 17, VIRTCHNL_RXDID_18_COMMS_AUX_IPV4 = 18, @@ -465,7 +501,6 @@ enum virtchnl_rx_desc_ids { enum virtchnl_rx_desc_id_bitmasks { VIRTCHNL_RXDID_0_16B_BASE_M = BIT(VIRTCHNL_RXDID_0_16B_BASE), VIRTCHNL_RXDID_1_32B_BASE_M = BIT(VIRTCHNL_RXDID_1_32B_BASE), - VIRTCHNL_RXDID_1_FLEX_SPLITQ_M = BIT(VIRTCHNL_RXDID_1_FLEX_SPLITQ), VIRTCHNL_RXDID_2_FLEX_SQ_NIC_M = BIT(VIRTCHNL_RXDID_2_FLEX_SQ_NIC), VIRTCHNL_RXDID_3_FLEX_SQ_SW_M = BIT(VIRTCHNL_RXDID_3_FLEX_SQ_SW), VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB_M = BIT(VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB), @@ -1162,6 +1197,46 @@ struct virtchnl_rss_lut { VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut); +/* enum virthcnl_hash_filter + * + * Bits defining the hash filters in the hena field of the virtchnl_rss_hena + * structure. Each bit indicates a specific hash filter for RSS. + * + * Note that not all bits are supported on all hardware. The VF should use + * VIRTCHNL_OP_GET_RSS_HENA_CAPS to determine which bits the PF is capable of + * before using VIRTCHNL_OP_SET_RSS_HENA to enable specific filters. + */ +enum virtchnl_hash_filter { + /* Bits 0 through 28 are reserved for future use */ + /* Bit 29, 30, and 32 are not supported on XL710 a X710 */ + VIRTCHNL_HASH_FILTER_UNICAST_IPV4_UDP = 29, + VIRTCHNL_HASH_FILTER_MULTICAST_IPV4_UDP = 30, + VIRTCHNL_HASH_FILTER_IPV4_UDP = 31, + VIRTCHNL_HASH_FILTER_IPV4_TCP_SYN_NO_ACK = 32, + VIRTCHNL_HASH_FILTER_IPV4_TCP = 33, + VIRTCHNL_HASH_FILTER_IPV4_SCTP = 34, + VIRTCHNL_HASH_FILTER_IPV4_OTHER = 35, + VIRTCHNL_HASH_FILTER_FRAG_IPV4 = 36, + /* Bits 37 and 38 are reserved for future use */ + /* Bit 39, 40, and 42 are not supported on XL710 a X710 */ + VIRTCHNL_HASH_FILTER_UNICAST_IPV6_UDP = 39, + VIRTCHNL_HASH_FILTER_MULTICAST_IPV6_UDP = 40, + VIRTCHNL_HASH_FILTER_IPV6_UDP = 41, + VIRTCHNL_HASH_FILTER_IPV6_TCP_SYN_NO_ACK = 42, + VIRTCHNL_HASH_FILTER_IPV6_TCP = 43, + VIRTCHNL_HASH_FILTER_IPV6_SCTP = 44, + VIRTCHNL_HASH_FILTER_IPV6_OTHER = 45, + VIRTCHNL_HASH_FILTER_FRAG_IPV6 = 46, + /* Bit 37 is reserved for future use */ + VIRTCHNL_HASH_FILTER_FCOE_OX = 48, + VIRTCHNL_HASH_FILTER_FCOE_RX = 49, + VIRTCHNL_HASH_FILTER_FCOE_OTHER = 50, + /* Bits 51 through 62 are reserved for future use */ + VIRTCHNL_HASH_FILTER_L2_PAYLOAD = 63, +}; + +#define VIRTCHNL_HASH_FILTER_INVALID (0) + /* VIRTCHNL_OP_GET_RSS_HENA_CAPS * VIRTCHNL_OP_SET_RSS_HENA * VF sends these messages to get and set the hash filter enable bits for RSS. @@ -1170,6 +1245,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut); * traffic types that are hashed by the hardware. */ struct virtchnl_rss_hena { + /* see enum virtchnl_hash_filter */ u64 hena; }; @@ -1281,6 +1357,14 @@ struct virtchnl_filter { VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter); +struct virtchnl_shaper_bw { + /* Unit is Kbps */ + u32 committed; + u32 peak; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_shaper_bw); + /* VIRTCHNL_OP_DCF_GET_VSI_MAP * VF sends this message to get VSI mapping table. * PF responds with an indirect message containing VF's @@ -1353,6 +1437,37 @@ struct virtchnl_dcf_vlan_offload { VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_dcf_vlan_offload); +struct virtchnl_dcf_bw_cfg { + u8 tc_num; +#define VIRTCHNL_DCF_BW_CIR BIT(0) +#define VIRTCHNL_DCF_BW_PIR BIT(1) + u8 bw_type; + u8 pad[2]; + enum virtchnl_bw_limit_type type; + union { + struct virtchnl_shaper_bw shaper; + u8 pad2[32]; + }; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_dcf_bw_cfg); + +/* VIRTCHNL_OP_DCF_CONFIG_BW + * VF send this message to set the bandwidth configuration of each + * TC with a specific vf id. The flag node_type is to indicate that + * this message is to configure VSI node or TC node bandwidth. + */ +struct virtchnl_dcf_bw_cfg_list { + u16 vf_id; + u8 num_elem; +#define VIRTCHNL_DCF_TARGET_TC_BW 0 +#define VIRTCHNL_DCF_TARGET_VF_BW 1 + u8 node_type; + struct virtchnl_dcf_bw_cfg cfg[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(44, virtchnl_dcf_bw_cfg_list); + struct virtchnl_supported_rxdids { /* see enum virtchnl_rx_desc_id_bitmasks */ u64 supported_rxdids; @@ -1371,7 +1486,6 @@ enum virtchnl_event_codes { VIRTCHNL_EVENT_RESET_IMPENDING, VIRTCHNL_EVENT_PF_DRIVER_CLOSE, VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE, - VIRTCHNL_EVENT_DCF_VSI_INFO = 1000, }; #define PF_EVENT_SEVERITY_INFO 0 @@ -1400,13 +1514,6 @@ struct virtchnl_pf_event { u8 link_status; u8 pad[3]; } link_event_adv; - struct { - /* link_speed provided in Mbps */ - u32 link_speed; - u16 vport_id; - u8 link_status; - u8 pad; - } link_event_adv_vport; struct { u16 vf_id; u16 vsi_id; @@ -1450,7 +1557,6 @@ struct virtchnl_rdma_qvlist_info { VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_rdma_qvlist_info); - /* VF reset states - these are written into the RSTAT register: * VFGEN_RSTAT on the VF * When the PF initiates a reset, it writes 0 @@ -1469,6 +1575,8 @@ enum virtchnl_vfr_states { }; #define VIRTCHNL_MAX_NUM_PROTO_HDRS 32 +#define VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK 16 +#define VIRTCHNL_MAX_SIZE_RAW_PACKET 1024 #define PROTO_HDR_SHIFT 5 #define PROTO_HDR_FIELD_START(proto_hdr_type) \ (proto_hdr_type << PROTO_HDR_SHIFT) @@ -1540,6 +1648,7 @@ enum virtchnl_proto_hdr_type { */ VIRTCHNL_PROTO_HDR_IPV4_FRAG, VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG, + VIRTCHNL_PROTO_HDR_GRE, }; /* Protocol header field within a protocol header. */ @@ -1562,6 +1671,7 @@ enum virtchnl_proto_hdr_field { VIRTCHNL_PROTO_HDR_IPV4_DSCP, VIRTCHNL_PROTO_HDR_IPV4_TTL, VIRTCHNL_PROTO_HDR_IPV4_PROT, + VIRTCHNL_PROTO_HDR_IPV4_CHKSUM, /* IPV6 */ VIRTCHNL_PROTO_HDR_IPV6_SRC = PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6), @@ -1586,14 +1696,17 @@ enum virtchnl_proto_hdr_field { VIRTCHNL_PROTO_HDR_TCP_SRC_PORT = PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP), VIRTCHNL_PROTO_HDR_TCP_DST_PORT, + VIRTCHNL_PROTO_HDR_TCP_CHKSUM, /* UDP */ VIRTCHNL_PROTO_HDR_UDP_SRC_PORT = PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP), VIRTCHNL_PROTO_HDR_UDP_DST_PORT, + VIRTCHNL_PROTO_HDR_UDP_CHKSUM, /* SCTP */ VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT = PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP), VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, + VIRTCHNL_PROTO_HDR_SCTP_CHKSUM, /* GTPU_IP */ VIRTCHNL_PROTO_HDR_GTPU_IP_TEID = PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP), @@ -1630,6 +1743,15 @@ enum virtchnl_proto_hdr_field { /* IPv6 Extension Fragment */ VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID = PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG), + /* GTPU_DWN/UP */ + VIRTCHNL_PROTO_HDR_GTPU_DWN_QFI = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN), + VIRTCHNL_PROTO_HDR_GTPU_UP_QFI = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP), + /* L2TPv2 */ + VIRTCHNL_PROTO_HDR_L2TPV2_SESS_ID = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV2), + VIRTCHNL_PROTO_HDR_L2TPV2_LEN_SESS_ID, }; struct virtchnl_proto_hdr { @@ -1646,17 +1768,52 @@ struct virtchnl_proto_hdr { VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr); +struct virtchnl_proto_hdr_w_msk { + /* see enum virtchnl_proto_hdr_type */ + s32 type; + u32 pad; + /** + * binary buffer in network order for specific header type. + * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4 + * header is expected to be copied into the buffer. + */ + u8 buffer_spec[64]; + /* binary buffer for bit-mask applied to specific header type */ + u8 buffer_mask[64]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(136, virtchnl_proto_hdr_w_msk); + struct virtchnl_proto_hdrs { u8 tunnel_level; /** * specify where protocol header start from. + * must be 0 when sending a raw packet request. * 0 - from the outer layer * 1 - from the first inner layer * 2 - from the second inner layer * .... - **/ - int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */ - struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS]; + */ + int count; + /** + * count must <= + * VIRTCHNL_MAX_NUM_PROTO_HDRS + VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK + * count = 0 : select raw + * 1 < count <= VIRTCHNL_MAX_NUM_PROTO_HDRS : select proto_hdr + * count > VIRTCHNL_MAX_NUM_PROTO_HDRS : select proto_hdr_w_msk + * last valid index = count - VIRTCHNL_MAX_NUM_PROTO_HDRS + */ + union { + struct virtchnl_proto_hdr + proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS]; + struct virtchnl_proto_hdr_w_msk + proto_hdr_w_msk[VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK]; + struct { + u16 pkt_len; + u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET]; + u8 mask[VIRTCHNL_MAX_SIZE_RAW_PACKET]; + } raw; + }; }; VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs); @@ -1671,7 +1828,7 @@ struct virtchnl_rss_cfg { VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg); -/* action configuration for FDIR */ +/* action configuration for FDIR and FSUB */ struct virtchnl_filter_action { /* see enum virtchnl_action type */ s32 type; @@ -1789,21 +1946,146 @@ struct virtchnl_fdir_del { VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del); -/* TX and RX queue types are valid in legacy as well as split queue models. - * With Split Queue model, 2 additional types are introduced - TX_COMPLETION - * and RX_BUFFER. In split queue model, RX corresponds to the queue where HW - * posts completions. +/* Status returned to VF after VF requests FSUB commands + * VIRTCHNL_FSUB_SUCCESS + * VF FLOW related request is successfully done by PF + * The request can be OP_FLOW_SUBSCRIBE/UNSUBSCRIBE. + * + * VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE + * OP_FLOW_SUBSCRIBE request is failed due to no Hardware resource. + * + * VIRTCHNL_FSUB_FAILURE_RULE_EXIST + * OP_FLOW_SUBSCRIBE request is failed due to the rule is already existed. + * + * VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST + * OP_FLOW_UNSUBSCRIBE request is failed due to this rule doesn't exist. + * + * VIRTCHNL_FSUB_FAILURE_RULE_INVALID + * OP_FLOW_SUBSCRIBE request is failed due to parameters validation + * or HW doesn't support. */ +enum virtchnl_fsub_prgm_status { + VIRTCHNL_FSUB_SUCCESS = 0, + VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE, + VIRTCHNL_FSUB_FAILURE_RULE_EXIST, + VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST, + VIRTCHNL_FSUB_FAILURE_RULE_INVALID, +}; + +/* VIRTCHNL_OP_FLOW_SUBSCRIBE + * VF sends this request to PF by filling out vsi_id, + * validate_only, priority, proto_hdrs and actions. + * PF will return flow_id + * if the request is successfully done and return status to VF. + */ +struct virtchnl_flow_sub { + u16 vsi_id; /* INPUT */ + u8 validate_only; /* INPUT */ + /* 0 is the highest priority; INPUT */ + u8 priority; + u32 flow_id; /* OUTPUT */ + struct virtchnl_proto_hdrs proto_hdrs; /* INPUT */ + struct virtchnl_filter_action_set actions; /* INPUT */ + /* see enum virtchnl_fsub_prgm_status; OUTPUT */ + s32 status; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_flow_sub); + +/* VIRTCHNL_OP_FLOW_UNSUBSCRIBE + * VF sends this request to PF by filling out vsi_id + * and flow_id. PF will return status to VF. + */ +struct virtchnl_flow_unsub { + u16 vsi_id; /* INPUT */ + u16 pad; + u32 flow_id; /* INPUT */ + /* see enum virtchnl_fsub_prgm_status; OUTPUT */ + s32 status; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_flow_unsub); + +/* VIRTCHNL_OP_GET_QOS_CAPS + * VF sends this message to get its QoS Caps, such as + * TC number, Arbiter and Bandwidth. + */ +struct virtchnl_qos_cap_elem { + u8 tc_num; + u8 tc_prio; +#define VIRTCHNL_ABITER_STRICT 0 +#define VIRTCHNL_ABITER_ETS 2 + u8 arbiter; +#define VIRTCHNL_STRICT_WEIGHT 1 + u8 weight; + enum virtchnl_bw_limit_type type; + union { + struct virtchnl_shaper_bw shaper; + u8 pad2[32]; + }; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_qos_cap_elem); + +struct virtchnl_qos_cap_list { + u16 vsi_id; + u16 num_elem; + struct virtchnl_qos_cap_elem cap[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(44, virtchnl_qos_cap_list); + +/* VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP + * VF sends message virtchnl_queue_tc_mapping to set queue to tc + * mapping for all the Tx and Rx queues with a specified VSI, and + * would get response about bitmap of valid user priorities + * associated with queues. + */ +struct virtchnl_queue_tc_mapping { + u16 vsi_id; + u16 num_tc; + u16 num_queue_pairs; + u8 pad[2]; + union { + struct { + u16 start_queue_id; + u16 queue_count; + } req; + struct { +#define VIRTCHNL_USER_PRIO_TYPE_UP 0 +#define VIRTCHNL_USER_PRIO_TYPE_DSCP 1 + u16 prio_type; + u16 valid_prio_bitmap; + } resp; + } tc[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_tc_mapping); + +/* VIRTCHNL_OP_CONFIG_QUEUE_BW */ +struct virtchnl_queue_bw { + u16 queue_id; + u8 tc; + u8 pad; + struct virtchnl_shaper_bw shaper; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_bw); + +struct virtchnl_queues_bw_cfg { + u16 vsi_id; + u16 num_queues; + struct virtchnl_queue_bw cfg[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queues_bw_cfg); + +/* queue types */ enum virtchnl_queue_type { VIRTCHNL_QUEUE_TYPE_TX = 0, VIRTCHNL_QUEUE_TYPE_RX = 1, - VIRTCHNL_QUEUE_TYPE_TX_COMPLETION = 2, - VIRTCHNL_QUEUE_TYPE_RX_BUFFER = 3, - VIRTCHNL_QUEUE_TYPE_CONFIG_TX = 4, - VIRTCHNL_QUEUE_TYPE_CONFIG_RX = 5 }; - /* structure to specify a chunk of contiguous queues */ struct virtchnl_queue_chunk { /* see enum virtchnl_queue_type */ @@ -1823,22 +2105,15 @@ struct virtchnl_queue_chunks { VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_chunks); - /* VIRTCHNL_OP_ENABLE_QUEUES_V2 * VIRTCHNL_OP_DISABLE_QUEUES_V2 - * VIRTCHNL_OP_DEL_QUEUES * - * If VIRTCHNL version was negotiated in VIRTCHNL_OP_VERSION as 2.0 - * then all of these ops are available. + * These opcodes can be used if VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in + * VIRTCHNL_OP_GET_VF_RESOURCES * - * If VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES - * then VIRTCHNL_OP_ENABLE_QUEUES_V2 and VIRTCHNL_OP_DISABLE_QUEUES_V2 are - * available. - * - * PF sends these messages to enable, disable or delete queues specified in - * chunks. PF sends virtchnl_del_ena_dis_queues struct to specify the queues - * to be enabled/disabled/deleted. Also applicable to single queue RX or - * TX. CP performs requested action and returns status. + * VF sends virtchnl_ena_dis_queues struct to specify the queues to be + * enabled/disabled in chunks. Also applicable to single queue RX or + * TX. PF performs requested action and returns status. */ struct virtchnl_del_ena_dis_queues { u16 vport_id; @@ -1872,13 +2147,13 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queue_vector); /* VIRTCHNL_OP_MAP_QUEUE_VECTOR * - * If VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES - * then only VIRTCHNL_OP_MAP_QUEUE_VECTOR is available. + * This opcode can be used only if VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated + * in VIRTCHNL_OP_GET_VF_RESOURCES * - * PF sends this message to map or unmap queues to vectors and ITR index - * registers. External data buffer contains virtchnl_queue_vector_maps structure + * VF sends this message to map queues to vectors and ITR index registers. + * External data buffer contains virtchnl_queue_vector_maps structure * that contains num_qv_maps of virtchnl_queue_vector structures. - * CP maps the requested queue vector maps after validating the queue and vector + * PF maps the requested queue vector maps after validating the queue and vector * ids and returns a status code. */ struct virtchnl_queue_vector_maps { @@ -1890,7 +2165,12 @@ struct virtchnl_queue_vector_maps { VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_queue_vector_maps); +struct virtchnl_quanta_cfg { + u16 quanta_size; + struct virtchnl_queue_chunk queue_select; +}; +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg); /* Since VF messages are limited by u16 size, precalculate the maximum possible * values of nested elements in virtchnl structures that virtual channel can @@ -2144,11 +2424,23 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, */ valid_len = msglen; break; - case VIRTCHNL_OP_DCF_RULE_FLUSH: case VIRTCHNL_OP_DCF_DISABLE: case VIRTCHNL_OP_DCF_GET_VSI_MAP: case VIRTCHNL_OP_DCF_GET_PKG_INFO: break; + case VIRTCHNL_OP_DCF_CONFIG_BW: + valid_len = sizeof(struct virtchnl_dcf_bw_cfg_list); + if (msglen >= valid_len) { + struct virtchnl_dcf_bw_cfg_list *cfg_list = + (struct virtchnl_dcf_bw_cfg_list *)msg; + if (cfg_list->num_elem == 0) { + err_msg_format = true; + break; + } + valid_len += (cfg_list->num_elem - 1) * + sizeof(struct virtchnl_dcf_bw_cfg); + } + break; case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS: break; case VIRTCHNL_OP_ADD_RSS_CFG: @@ -2161,6 +2453,52 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, case VIRTCHNL_OP_DEL_FDIR_FILTER: valid_len = sizeof(struct virtchnl_fdir_del); break; + case VIRTCHNL_OP_FLOW_SUBSCRIBE: + valid_len = sizeof(struct virtchnl_flow_sub); + break; + case VIRTCHNL_OP_FLOW_UNSUBSCRIBE: + valid_len = sizeof(struct virtchnl_flow_unsub); + break; + case VIRTCHNL_OP_GET_QOS_CAPS: + break; + case VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP: + valid_len = sizeof(struct virtchnl_queue_tc_mapping); + if (msglen >= valid_len) { + struct virtchnl_queue_tc_mapping *q_tc = + (struct virtchnl_queue_tc_mapping *)msg; + if (q_tc->num_tc == 0) { + err_msg_format = true; + break; + } + valid_len += (q_tc->num_tc - 1) * + sizeof(q_tc->tc[0]); + } + break; + case VIRTCHNL_OP_CONFIG_QUEUE_BW: + valid_len = sizeof(struct virtchnl_queues_bw_cfg); + if (msglen >= valid_len) { + struct virtchnl_queues_bw_cfg *q_bw = + (struct virtchnl_queues_bw_cfg *)msg; + if (q_bw->num_queues == 0) { + err_msg_format = true; + break; + } + valid_len += (q_bw->num_queues - 1) * + sizeof(q_bw->cfg[0]); + } + break; + case VIRTCHNL_OP_CONFIG_QUANTA: + valid_len = sizeof(struct virtchnl_quanta_cfg); + if (msglen >= valid_len) { + struct virtchnl_quanta_cfg *q_quanta = + (struct virtchnl_quanta_cfg *)msg; + if (q_quanta->quanta_size == 0 || + q_quanta->queue_select.num_queues == 0) { + err_msg_format = true; + break; + } + } + break; case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: break; case VIRTCHNL_OP_ADD_VLAN_V2: diff --git a/drivers/thirdparty/ice/virtchnl_inline_ipsec.h b/drivers/thirdparty/ice/virtchnl_inline_ipsec.h deleted file mode 100644 index eec608dde607..000000000000 --- a/drivers/thirdparty/ice/virtchnl_inline_ipsec.h +++ /dev/null @@ -1,548 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (C) 2018-2021, Intel Corporation. */ - - -#ifndef _VIRTCHNL_INLINE_IPSEC_H_ -#define _VIRTCHNL_INLINE_IPSEC_H_ - -#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM 3 -#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM 16 -#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM 128 -#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER 2 -#define VIRTCHNL_IPSEC_MAX_KEY_LEN 128 -#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM 8 -#define VIRTCHNL_IPSEC_SA_DESTROY 0 -#define VIRTCHNL_IPSEC_BROADCAST_VFID 0xFFFFFFFF -#define VIRTCHNL_IPSEC_INVALID_REQ_ID 0xFFFF -#define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP 0xFFFFFFFF -#define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP 0xFFFFFFFF - -/* crypto type */ -#define VIRTCHNL_AUTH 1 -#define VIRTCHNL_CIPHER 2 -#define VIRTCHNL_AEAD 3 - -/* caps enabled */ -#define VIRTCHNL_IPSEC_ESN_ENA BIT(0) -#define VIRTCHNL_IPSEC_UDP_ENCAP_ENA BIT(1) -#define VIRTCHNL_IPSEC_SA_INDEX_SW_ENA BIT(2) -#define VIRTCHNL_IPSEC_AUDIT_ENA BIT(3) -#define VIRTCHNL_IPSEC_BYTE_LIMIT_ENA BIT(4) -#define VIRTCHNL_IPSEC_DROP_ON_AUTH_FAIL_ENA BIT(5) -#define VIRTCHNL_IPSEC_ARW_CHECK_ENA BIT(6) -#define VIRTCHNL_IPSEC_24BIT_SPI_ENA BIT(7) - -/* algorithm type */ -/* Hash Algorithm */ -#define VIRTCHNL_HASH_NO_ALG 0 /* NULL algorithm */ -#define VIRTCHNL_AES_CBC_MAC 1 /* AES-CBC-MAC algorithm */ -#define VIRTCHNL_AES_CMAC 2 /* AES CMAC algorithm */ -#define VIRTCHNL_AES_GMAC 3 /* AES GMAC algorithm */ -#define VIRTCHNL_AES_XCBC_MAC 4 /* AES XCBC algorithm */ -#define VIRTCHNL_MD5_HMAC 5 /* HMAC using MD5 algorithm */ -#define VIRTCHNL_SHA1_HMAC 6 /* HMAC using 128 bit SHA algorithm */ -#define VIRTCHNL_SHA224_HMAC 7 /* HMAC using 224 bit SHA algorithm */ -#define VIRTCHNL_SHA256_HMAC 8 /* HMAC using 256 bit SHA algorithm */ -#define VIRTCHNL_SHA384_HMAC 9 /* HMAC using 384 bit SHA algorithm */ -#define VIRTCHNL_SHA512_HMAC 10 /* HMAC using 512 bit SHA algorithm */ -#define VIRTCHNL_SHA3_224_HMAC 11 /* HMAC using 224 bit SHA3 algorithm */ -#define VIRTCHNL_SHA3_256_HMAC 12 /* HMAC using 256 bit SHA3 algorithm */ -#define VIRTCHNL_SHA3_384_HMAC 13 /* HMAC using 384 bit SHA3 algorithm */ -#define VIRTCHNL_SHA3_512_HMAC 14 /* HMAC using 512 bit SHA3 algorithm */ -/* Cipher Algorithm */ -#define VIRTCHNL_CIPHER_NO_ALG 15 /* NULL algorithm */ -#define VIRTCHNL_3DES_CBC 16 /* Triple DES algorithm in CBC mode */ -#define VIRTCHNL_AES_CBC 17 /* AES algorithm in CBC mode */ -#define VIRTCHNL_AES_CTR 18 /* AES algorithm in Counter mode */ -/* AEAD Algorithm */ -#define VIRTCHNL_AES_CCM 19 /* AES algorithm in CCM mode */ -#define VIRTCHNL_AES_GCM 20 /* AES algorithm in GCM mode */ -#define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */ - -/* protocol type */ -#define VIRTCHNL_PROTO_ESP 1 -#define VIRTCHNL_PROTO_AH 2 -#define VIRTCHNL_PROTO_RSVD1 3 - -/* sa mode */ -#define VIRTCHNL_SA_MODE_TRANSPORT 1 -#define VIRTCHNL_SA_MODE_TUNNEL 2 -#define VIRTCHNL_SA_MODE_TRAN_TUN 3 -#define VIRTCHNL_SA_MODE_UNKNOWN 4 - -/* sa direction */ -#define VIRTCHNL_DIR_INGRESS 1 -#define VIRTCHNL_DIR_EGRESS 2 -#define VIRTCHNL_DIR_INGRESS_EGRESS 3 - -/* sa termination */ -#define VIRTCHNL_TERM_SOFTWARE 1 -#define VIRTCHNL_TERM_HARDWARE 2 - -/* sa ip type */ -#define VIRTCHNL_IPV4 1 -#define VIRTCHNL_IPV6 2 - -/* for virtchnl_ipsec_resp */ -enum inline_ipsec_resp { - INLINE_IPSEC_SUCCESS = 0, - INLINE_IPSEC_FAIL = -1, - INLINE_IPSEC_ERR_FIFO_FULL = -2, - INLINE_IPSEC_ERR_NOT_READY = -3, - INLINE_IPSEC_ERR_VF_DOWN = -4, - INLINE_IPSEC_ERR_INVALID_PARAMS = -5, - INLINE_IPSEC_ERR_NO_MEM = -6, -}; - -/* Detailed opcodes for DPDK and IPsec use */ -enum inline_ipsec_ops { - INLINE_IPSEC_OP_GET_CAP = 0, - INLINE_IPSEC_OP_GET_STATUS = 1, - INLINE_IPSEC_OP_SA_CREATE = 2, - INLINE_IPSEC_OP_SA_UPDATE = 3, - INLINE_IPSEC_OP_SA_DESTROY = 4, - INLINE_IPSEC_OP_SP_CREATE = 5, - INLINE_IPSEC_OP_SP_DESTROY = 6, - INLINE_IPSEC_OP_SA_READ = 7, - INLINE_IPSEC_OP_EVENT = 8, - INLINE_IPSEC_OP_RESP = 9, -}; - -/* Not all valid, if certain field is invalid, set 1 for all bits */ -struct virtchnl_algo_cap { - u32 algo_type; - - u16 block_size; - - u16 min_key_size; - u16 max_key_size; - u16 inc_key_size; - - u16 min_iv_size; - u16 max_iv_size; - u16 inc_iv_size; - - u16 min_digest_size; - u16 max_digest_size; - u16 inc_digest_size; - - u16 min_aad_size; - u16 max_aad_size; - u16 inc_aad_size; -} __packed; - -/* vf record the capability of crypto from the virtchnl */ -struct virtchnl_sym_crypto_cap { - u8 crypto_type; - u8 algo_cap_num; - struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM]; -}; - -/* VIRTCHNL_OP_GET_IPSEC_CAP - * VF pass virtchnl_ipsec_cap to PF - * and PF return capability of ipsec from virtchnl. - */ -struct virtchnl_ipsec_cap { - /* max number of SA per VF */ - u16 max_sa_num; - - /* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */ - u8 virtchnl_protocol_type; - - /* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */ - u8 virtchnl_sa_mode; - - /* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */ - u8 virtchnl_direction; - - /* termination mode - value ref VIRTCHNL_TERM_XXX */ - u8 termination_mode; - - /* number of supported crypto capability */ - u8 crypto_cap_num; - - /* descriptor ID */ - u16 desc_id; - - /* capabilities enabled - value ref VIRTCHNL_IPSEC_XXX_ENA */ - u32 caps_enabled; - - /* crypto capabilities */ - struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM]; -} __packed; - -/* configuration of crypto function */ -struct virtchnl_ipsec_crypto_cfg_item { - u8 crypto_type; - - u32 algo_type; - - /* Length of valid IV data. */ - u16 iv_len; - - /* Length of digest */ - u16 digest_len; - - /* SA salt */ - u32 salt; - - /* The length of the symmetric key */ - u16 key_len; - - /* key data buffer */ - u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN]; -} __packed; - -struct virtchnl_ipsec_sym_crypto_cfg { - struct virtchnl_ipsec_crypto_cfg_item - items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER]; -}; - -/* VIRTCHNL_OP_IPSEC_SA_CREATE - * VF send this SA configuration to PF using virtchnl; - * PF create SA as configuration and PF driver will return - * an unique index (sa_idx) for the created SA. - */ -struct virtchnl_ipsec_sa_cfg { - /* IPsec SA Protocol - AH/ESP */ - u8 virtchnl_protocol_type; - - /* termination mode - value ref VIRTCHNL_TERM_XXX */ - u8 virtchnl_termination; - - /* type of outer IP - IPv4/IPv6 */ - u8 virtchnl_ip_type; - - /* type of esn - !0:enable/0:disable */ - u8 esn_enabled; - - /* udp encap - !0:enable/0:disable */ - u8 udp_encap_enabled; - - /* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */ - u8 virtchnl_direction; - - /* reserved */ - u8 reserved1; - - /* SA security parameter index */ - u32 spi; - - /* outer src ip address */ - u8 src_addr[16]; - - /* outer dst ip address */ - u8 dst_addr[16]; - - /* SPD reference. Used to link an SA with its policy. - * PF drivers may ignore this field. - */ - u16 spd_ref; - - /* high 32 bits of esn */ - u32 esn_hi; - - /* low 32 bits of esn */ - u32 esn_low; - - /* When enabled, sa_index must be valid */ - u8 sa_index_en; - - /* SA index when sa_index_en is true */ - u32 sa_index; - - /* auditing mode - enable/disable */ - u8 audit_en; - - /* lifetime byte limit - enable/disable - * When enabled, byte_limit_hard and byte_limit_soft - * must be valid. - */ - u8 byte_limit_en; - - /* hard byte limit count */ - u64 byte_limit_hard; - - /* soft byte limit count */ - u64 byte_limit_soft; - - /* drop on authentication failure - enable/disable */ - u8 drop_on_auth_fail_en; - - /* anti-reply window check - enable/disable - * When enabled, arw_size must be valid. - */ - u8 arw_check_en; - - /* size of arw window, offset by 1. Setting to 0 - * represents ARW window size of 1. Setting to 127 - * represents ARW window size of 128 - */ - u8 arw_size; - - /* no ip offload mode - enable/disable - * When enabled, ip type and address must not be valid. - */ - u8 no_ip_offload_en; - - /* SA Domain. Used to logical separate an SADB into groups. - * PF drivers supporting a single group ignore this field. - */ - u16 sa_domain; - - /* crypto configuration */ - struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg; -} __packed; - -/* VIRTCHNL_OP_IPSEC_SA_UPDATE - * VF send configuration of index of SA to PF - * PF will update SA according to configuration - */ -struct virtchnl_ipsec_sa_update { - u32 sa_index; /* SA to update */ - u32 esn_hi; /* high 32 bits of esn */ - u32 esn_low; /* low 32 bits of esn */ -}; - -/* VIRTCHNL_OP_IPSEC_SA_DESTROY - * VF send configuration of index of SA to PF - * PF will destroy SA according to configuration - * flag bitmap indicate all SA or just selected SA will - * be destroyed - */ -struct virtchnl_ipsec_sa_destroy { - /* All zero bitmap indicates all SA will be destroyed. - * Non-zero bitmap indicates the selected SA in - * array sa_index will be destroyed. - */ - u8 flag; - - /* selected SA index */ - u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM]; -} __packed; - -/* VIRTCHNL_OP_IPSEC_SA_READ - * VF send this SA configuration to PF using virtchnl; - * PF read SA and will return configuration for the created SA. - */ -struct virtchnl_ipsec_sa_read { - /* SA valid - invalid/valid */ - u8 valid; - - /* SA active - inactive/active */ - u8 active; - - /* SA SN rollover - not_rollover/rollover */ - u8 sn_rollover; - - /* IPsec SA Protocol - AH/ESP */ - u8 virtchnl_protocol_type; - - /* termination mode - value ref VIRTCHNL_TERM_XXX */ - u8 virtchnl_termination; - - /* auditing mode - enable/disable */ - u8 audit_en; - - /* lifetime byte limit - enable/disable - * When set to limit, byte_limit_hard and byte_limit_soft - * must be valid. - */ - u8 byte_limit_en; - - /* hard byte limit count */ - u64 byte_limit_hard; - - /* soft byte limit count */ - u64 byte_limit_soft; - - /* drop on authentication failure - enable/disable */ - u8 drop_on_auth_fail_en; - - /* anti-replay window check - enable/disable - * When set to check, arw_size, arw_top, and arw must be valid - */ - u8 arw_check_en; - - /* size of arw window, offset by 1. Setting to 0 - * represents ARW window size of 1. Setting to 127 - * represents ARW window size of 128 - */ - u8 arw_size; - - /* reserved */ - u8 reserved1; - - /* top of anti-replay-window */ - u64 arw_top; - - /* anti-replay-window */ - u8 arw[16]; - - /* packets processed */ - u64 packets_processed; - - /* bytes processed */ - u64 bytes_processed; - - /* packets dropped */ - u32 packets_dropped; - - /* authentication failures */ - u32 auth_fails; - - /* ARW check failures */ - u32 arw_fails; - - /* type of esn - enable/disable */ - u8 esn; - - /* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */ - u8 virtchnl_direction; - - /* SA security parameter index */ - u32 spi; - - /* SA salt */ - u32 salt; - - /* high 32 bits of esn */ - u32 esn_hi; - - /* low 32 bits of esn */ - u32 esn_low; - - /* SA Domain. Used to logical separate an SADB into groups. - * PF drivers supporting a single group ignore this field. - */ - u16 sa_domain; - - /* SPD reference. Used to link an SA with its policy. - * PF drivers may ignore this field. - */ - u16 spd_ref; - - /* crypto configuration. Salt and keys are set to 0 */ - struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg; -} __packed; - -/* Add allowlist entry in IES */ -struct virtchnl_ipsec_sp_cfg { - u32 spi; - u32 dip[4]; - - /* Drop frame if true or redirect to QAT if false. */ - u8 drop; - - /* Congestion domain. For future use. */ - u8 cgd; - - /* 0 for IPv4 table, 1 for IPv6 table. */ - u8 table_id; - - /* Set TC (congestion domain) if true. For future use. */ - u8 set_tc; -}; - -/* Delete allowlist entry in IES */ -struct virtchnl_ipsec_sp_destroy { - /* 0 for IPv4 table, 1 for IPv6 table. */ - u8 table_id; - u32 rule_id; -} __packed; - -/* Response from IES to allowlist operations */ -struct virtchnl_ipsec_sp_cfg_resp { - u32 rule_id; -}; - -struct virtchnl_ipsec_sa_cfg_resp { - u32 sa_handle; -}; - -#define INLINE_IPSEC_EVENT_RESET 0x1 -#define INLINE_IPSEC_EVENT_CRYPTO_ON 0x2 -#define INLINE_IPSEC_EVENT_CRYPTO_OFF 0x4 - -struct virtchnl_ipsec_event { - u32 ipsec_event_data; -}; - -#define INLINE_IPSEC_STATUS_AVAILABLE 0x1 -#define INLINE_IPSEC_STATUS_UNAVAILABLE 0x2 - -struct virtchnl_ipsec_status { - u32 status; -}; - -struct virtchnl_ipsec_resp { - u32 resp; -}; - -/* Internal message descriptor for VF <-> IPsec communication */ -struct inline_ipsec_msg { - u16 ipsec_opcode; - u16 req_id; - - union { - /* IPsec request */ - struct virtchnl_ipsec_sa_cfg sa_cfg[0]; - struct virtchnl_ipsec_sp_cfg sp_cfg[0]; - struct virtchnl_ipsec_sa_update sa_update[0]; - struct virtchnl_ipsec_sa_destroy sa_destroy[0]; - struct virtchnl_ipsec_sp_destroy sp_destroy[0]; - - /* IPsec response */ - struct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0]; - struct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0]; - struct virtchnl_ipsec_cap ipsec_cap[0]; - struct virtchnl_ipsec_status ipsec_status[0]; - /* response to del_sa, del_sp, update_sa */ - struct virtchnl_ipsec_resp ipsec_resp[0]; - - /* IPsec event (no req_id is required) */ - struct virtchnl_ipsec_event event[0]; - - /* Reserved */ - struct virtchnl_ipsec_sa_read sa_read[0]; - } ipsec_data; -}; - -static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode) -{ - u16 valid_len = sizeof(struct inline_ipsec_msg); - - switch (opcode) { - case INLINE_IPSEC_OP_GET_CAP: - case INLINE_IPSEC_OP_GET_STATUS: - break; - case INLINE_IPSEC_OP_SA_CREATE: - valid_len += sizeof(struct virtchnl_ipsec_sa_cfg); - break; - case INLINE_IPSEC_OP_SP_CREATE: - valid_len += sizeof(struct virtchnl_ipsec_sp_cfg); - break; - case INLINE_IPSEC_OP_SA_UPDATE: - valid_len += sizeof(struct virtchnl_ipsec_sa_update); - break; - case INLINE_IPSEC_OP_SA_DESTROY: - valid_len += sizeof(struct virtchnl_ipsec_sa_destroy); - break; - case INLINE_IPSEC_OP_SP_DESTROY: - valid_len += sizeof(struct virtchnl_ipsec_sp_destroy); - break; - /* Only for msg length caculation of response to VF in case of - * inline ipsec failure. - */ - case INLINE_IPSEC_OP_RESP: - valid_len += sizeof(struct virtchnl_ipsec_resp); - break; - default: - valid_len = 0; - break; - } - - return valid_len; -} - -#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */ diff --git a/drivers/thirdparty/ice/virtchnl_lan_desc.h b/drivers/thirdparty/ice/virtchnl_lan_desc.h deleted file mode 100644 index 5ac587008da7..000000000000 --- a/drivers/thirdparty/ice/virtchnl_lan_desc.h +++ /dev/null @@ -1,528 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (C) 2018-2021, Intel Corporation. */ - -/* - * Copyright (C) 2019 Intel Corporation - * - * For licensing information, see the file 'LICENSE' in the root folder - */ -#ifndef _VIRTCHNL_LAN_DESC_H_ -#define _VIRTCHNL_LAN_DESC_H_ - -/* Rx */ -/* For virtchnl_splitq_base_rx_flex desc members */ -#define VIRTCHNL_RXD_FLEX_RXDID_S 0 -#define VIRTCHNL_RXD_FLEX_RXDID_M \ - ICE_M(0xFUL, VIRTCHNL_RXD_FLEX_RXDID_S) -#define VIRTCHNL_RXD_FLEX_PTYPE_S 0 -#define VIRTCHNL_RXD_FLEX_PTYPE_M \ - ICE_M(0x3FFUL, VIRTCHNL_RXD_FLEX_PTYPE_S) -#define VIRTCHNL_RXD_FLEX_UMBCAST_S 10 -#define VIRTCHNL_RXD_FLEX_UMBCAST_M \ - ICE_M(0x3UL, VIRTCHNL_RXD_FLEX_UMBCAST_S) -#define VIRTCHNL_RXD_FLEX_FF0_S 12 -#define VIRTCHNL_RXD_FLEX_FF0_M ICE_M(0xFUL, VIRTCHNL_RXD_FLEX_FF0_S) -#define VIRTCHNL_RXD_FLEX_LEN_PBUF_S 0 -#define VIRTCHNL_RXD_FLEX_LEN_PBUF_M \ - ICE_M(0x3FFFUL, VIRTCHNL_RXD_FLEX_LEN_PBUF_S) -#define VIRTCHNL_RXD_FLEX_GEN_S 14 -#define VIRTCHNL_RXD_FLEX_GEN_M BIT_ULL(VIRTCHNL_RXD_FLEX_GEN_S) -#define VIRTCHNL_RXD_FLEX_BUFQ_ID_S 15 -#define VIRTCHNL_RXD_FLEX_BUFQ_ID_M \ - BIT_ULL(VIRTCHNL_RXD_FLEX_BUFQ_ID_S) -#define VIRTCHNL_RXD_FLEX_LEN_HDR_S 0 -#define VIRTCHNL_RXD_FLEX_LEN_HDR_M \ - ICE_M(0x3FFUL, VIRTCHNL_RXD_FLEX_LEN_HDR_S) -#define VIRTCHNL_RXD_FLEX_RSC_S 10 -#define VIRTCHNL_RXD_FLEX_RSC_M BIT_ULL(VIRTCHNL_RXD_FLEX_RSC_S) -#define VIRTCHNL_RXD_FLEX_SPH_S 11 -#define VIRTCHNL_RXD_FLEX_SPH_M BIT_ULL(VIRTCHNL_RXD_FLEX_SPH_S) -#define VIRTCHNL_RXD_FLEX_MISS_S 12 -#define VIRTCHNL_RXD_FLEX_MISS_M \ - BIT_ULL(VIRTCHNL_RXD_FLEX_MISS_S) -#define VIRTCHNL_RXD_FLEX_FF1_S 13 -#define VIRTCHNL_RXD_FLEX_FF1_M ICE_M(0x7UL, VIRTCHNL_RXD_FLEX_FF1_M) - -/* For virtchnl_singleq_base_rx_legacy desc members */ -#define VIRTCHNL_RXD_QW1_LEN_SPH_S 63 -#define VIRTCHNL_RXD_QW1_LEN_SPH_M BIT_ULL(VIRTCHNL_RXD_QW1_LEN_SPH_S) -#define VIRTCHNL_RXD_QW1_LEN_HBUF_S 52 -#define VIRTCHNL_RXD_QW1_LEN_HBUF_M \ - ICE_M(0x7FFULL, VIRTCHNL_RXD_QW1_LEN_HBUF_S) -#define VIRTCHNL_RXD_QW1_LEN_PBUF_S 38 -#define VIRTCHNL_RXD_QW1_LEN_PBUF_M \ - ICE_M(0x3FFFULL, VIRTCHNL_RXD_QW1_LEN_PBUF_S) -#define VIRTCHNL_RXD_QW1_PTYPE_S 30 -#define VIRTCHNL_RXD_QW1_PTYPE_M \ - ICE_M(0xFFULL, VIRTCHNL_RXD_QW1_PTYPE_S) -#define VIRTCHNL_RXD_QW1_ERROR_S 19 -#define VIRTCHNL_RXD_QW1_ERROR_M \ - ICE_M(0xFFUL, VIRTCHNL_RXD_QW1_ERROR_S) -#define VIRTCHNL_RXD_QW1_STATUS_S 0 -#define VIRTCHNL_RXD_QW1_STATUS_M \ - ICE_M(0x7FFFFUL, VIRTCHNL_RXD_QW1_STATUS_S) - -enum virtchnl_rx_flex_desc_status_error_0_qw1_bits { - /* Note: These are predefined bit offsets */ - VIRTCHNL_RX_FLEX_DESC_STATUS0_DD_S = 0, - VIRTCHNL_RX_FLEX_DESC_STATUS0_EOF_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_HBO_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_L3L4P_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_IPE_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_L4E_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S, -}; - -enum virtchnl_rx_flex_desc_status_error_0_qw0_bits { - VIRTCHNL_RX_FLEX_DESC_STATUS0_LPBK_S = 0, - VIRTCHNL_RX_FLEX_DESC_STATUS0_IPV6EXADD_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_RXE_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_CRCP_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_RSS_VALID_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_L2TAG1P_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */ -}; - -enum virtchnl_rx_flex_desc_status_error_1_bits { - /* Note: These are predefined bit offsets */ - VIRTCHNL_RX_FLEX_DESC_STATUS1_RSVD_S = 0, /* 2 bits */ - VIRTCHNL_RX_FLEX_DESC_STATUS1_ATRAEFAIL_S = 2, - VIRTCHNL_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 3, - VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 4, - VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S = 5, - VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S = 6, - VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S = 7, - VIRTCHNL_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */ -}; - -enum virtchnl_rx_base_desc_status_bits { - /* Note: These are predefined bit offsets */ - VIRTCHNL_RX_BASE_DESC_STATUS_DD_S = 0, - VIRTCHNL_RX_BASE_DESC_STATUS_EOF_S = 1, - VIRTCHNL_RX_BASE_DESC_STATUS_L2TAG1P_S = 2, - VIRTCHNL_RX_BASE_DESC_STATUS_L3L4P_S = 3, - VIRTCHNL_RX_BASE_DESC_STATUS_CRCP_S = 4, - VIRTCHNL_RX_BASE_DESC_STATUS_RSVD_S = 5, /* 3 BITS */ - VIRTCHNL_RX_BASE_DESC_STATUS_EXT_UDP_0_S = 8, - VIRTCHNL_RX_BASE_DESC_STATUS_UMBCAST_S = 9, /* 2 BITS */ - VIRTCHNL_RX_BASE_DESC_STATUS_FLM_S = 11, - VIRTCHNL_RX_BASE_DESC_STATUS_FLTSTAT_S = 12, /* 2 BITS */ - VIRTCHNL_RX_BASE_DESC_STATUS_LPBK_S = 14, - VIRTCHNL_RX_BASE_DESC_STATUS_IPV6EXADD_S = 15, - VIRTCHNL_RX_BASE_DESC_STATUS_RSVD1_S = 16, /* 2 BITS */ - VIRTCHNL_RX_BASE_DESC_STATUS_INT_UDP_0_S = 18, - VIRTCHNL_RX_BASE_DESC_STATUS_LAST /* this entry must be last!!! */ -}; - -enum virtchnl_rx_desc_fltstat_values { - VIRTCHNL_RX_DESC_FLTSTAT_NO_DATA = 0, - VIRTCHNL_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */ - VIRTCHNL_RX_DESC_FLTSTAT_RSV = 2, - VIRTCHNL_RX_DESC_FLTSTAT_RSS_HASH = 3, -}; - -enum virtchnl_rx_base_desc_error_bits { - /* Note: These are predefined bit offsets */ - VIRTCHNL_RX_BASE_DESC_ERROR_RXE_S = 0, - VIRTCHNL_RX_BASE_DESC_ERROR_ATRAEFAIL_S = 1, - VIRTCHNL_RX_BASE_DESC_ERROR_HBO_S = 2, - VIRTCHNL_RX_BASE_DESC_ERROR_L3L4E_S = 3, /* 3 BITS */ - VIRTCHNL_RX_BASE_DESC_ERROR_IPE_S = 3, - VIRTCHNL_RX_BASE_DESC_ERROR_L4E_S = 4, - VIRTCHNL_RX_BASE_DESC_ERROR_EIPE_S = 5, - VIRTCHNL_RX_BASE_DESC_ERROR_OVERSIZE_S = 6, - VIRTCHNL_RX_BASE_DESC_ERROR_RSVD_S = 7 -}; - -/* Receive Descriptors */ -/* splitq buf - | 16| 0| - ---------------------------------------------------------------- - | RSV | Buffer ID | - ---------------------------------------------------------------- - | Rx packet buffer adresss | - ---------------------------------------------------------------- - | Rx header buffer adresss | - ---------------------------------------------------------------- - | RSV | - ---------------------------------------------------------------- - | 0| - */ -struct virtchnl_splitq_rx_buf_desc { - struct { - __le16 buf_id; /* Buffer Identifier */ - __le16 rsvd0; - __le32 rsvd1; - } qword0; - __le64 pkt_addr; /* Packet buffer address */ - __le64 hdr_addr; /* Header buffer address */ - __le64 rsvd2; -}; /* read used with buffer queues*/ - -/* singleq buf - | 0| - ---------------------------------------------------------------- - | Rx packet buffer adresss | - ---------------------------------------------------------------- - | Rx header buffer adresss | - ---------------------------------------------------------------- - | RSV | - ---------------------------------------------------------------- - | RSV | - ---------------------------------------------------------------- - | 0| - */ -struct virtchnl_singleq_rx_buf_desc { - __le64 pkt_addr; /* Packet buffer address */ - __le64 hdr_addr; /* Header buffer address */ - __le64 rsvd1; - __le64 rsvd2; -}; /* read used with buffer queues*/ - -union virtchnl_rx_buf_desc { - struct virtchnl_singleq_rx_buf_desc read; - struct virtchnl_splitq_rx_buf_desc split_rd; -}; - -/* (0x00) singleq wb(compl) */ -struct virtchnl_singleq_base_rx_desc { - struct { - struct { - __le16 mirroring_status; - __le16 l2tag1; - } lo_dword; - union { - __le32 rss; /* RSS Hash */ - __le32 fd_id; /* Flow Director filter id */ - } hi_dword; - } qword0; - struct { - /* status/error/PTYPE/length */ - __le64 status_error_ptype_len; - } qword1; - struct { - __le16 ext_status; /* extended status */ - __le16 rsvd; - __le16 l2tag2_1; - __le16 l2tag2_2; - } qword2; - struct { - __le32 reserved; - __le32 fd_id; - } qword3; -}; /* writeback */ - -/* (0x01) singleq flex compl */ -struct virtchnl_rx_flex_desc { - /* Qword 0 */ - u8 rxdid; /* descriptor builder profile id */ - u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */ - __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */ - __le16 pkt_len; /* [15:14] are reserved */ - __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */ - /* sph=[11:11] */ - /* ff1/ext=[15:12] */ - - /* Qword 1 */ - __le16 status_error0; - __le16 l2tag1; - __le16 flex_meta0; - __le16 flex_meta1; - - /* Qword 2 */ - __le16 status_error1; - u8 flex_flags2; - u8 time_stamp_low; - __le16 l2tag2_1st; - __le16 l2tag2_2nd; - - /* Qword 3 */ - __le16 flex_meta2; - __le16 flex_meta3; - union { - struct { - __le16 flex_meta4; - __le16 flex_meta5; - } flex; - __le32 ts_high; - } flex_ts; -}; - -/* (0x02) */ -struct virtchnl_rx_flex_desc_nic { - /* Qword 0 */ - u8 rxdid; - u8 mir_id_umb_cast; - __le16 ptype_flexi_flags0; - __le16 pkt_len; - __le16 hdr_len_sph_flex_flags1; - - /* Qword 1 */ - __le16 status_error0; - __le16 l2tag1; - __le32 rss_hash; - - /* Qword 2 */ - __le16 status_error1; - u8 flexi_flags2; - u8 ts_low; - __le16 l2tag2_1st; - __le16 l2tag2_2nd; - - /* Qword 3 */ - __le32 flow_id; - union { - struct { - __le16 rsvd; - __le16 flow_id_ipv6; - } flex; - __le32 ts_high; - } flex_ts; -}; - -/* Rx Flex Descriptor Switch Profile - * RxDID Profile Id 3 - * Flex-field 0: Source Vsi - */ -struct virtchnl_rx_flex_desc_sw { - /* Qword 0 */ - u8 rxdid; - u8 mir_id_umb_cast; - __le16 ptype_flexi_flags0; - __le16 pkt_len; - __le16 hdr_len_sph_flex_flags1; - - /* Qword 1 */ - __le16 status_error0; - __le16 l2tag1; - __le16 src_vsi; /* [10:15] are reserved */ - __le16 flex_md1_rsvd; - - /* Qword 2 */ - __le16 status_error1; - u8 flex_flags2; - u8 ts_low; - __le16 l2tag2_1st; - __le16 l2tag2_2nd; - - /* Qword 3 */ - __le32 rsvd; /* flex words 2-3 are reserved */ - __le32 ts_high; -}; - -/* Rx Flex Descriptor NIC VEB Profile - * RxDID Profile Id 4 - * Flex-field 0: Destination Vsi - */ -struct virtchnl_rx_flex_desc_nic_veb_dbg { - /* Qword 0 */ - u8 rxdid; - u8 mir_id_umb_cast; - __le16 ptype_flexi_flags0; - __le16 pkt_len; - __le16 hdr_len_sph_flex_flags1; - - /* Qword 1 */ - __le16 status_error0; - __le16 l2tag1; - __le16 dst_vsi; /* [0:12]: destination vsi */ - /* 13: vsi valid bit */ - /* [14:15] are reserved */ - __le16 flex_field_1; - - /* Qword 2 */ - __le16 status_error1; - u8 flex_flags2; - u8 ts_low; - __le16 l2tag2_1st; - __le16 l2tag2_2nd; - - /* Qword 3 */ - __le32 rsvd; /* flex words 2-3 are reserved */ - __le32 ts_high; -}; - -/* Rx Flex Descriptor NIC ACL Profile - * RxDID Profile Id 5 - * Flex-field 0: ACL Counter 0 - * Flex-field 1: ACL Counter 1 - * Flex-field 2: ACL Counter 2 - */ -struct virtchnl_rx_flex_desc_nic_acl_dbg { - /* Qword 0 */ - u8 rxdid; - u8 mir_id_umb_cast; - __le16 ptype_flexi_flags0; - __le16 pkt_len; - __le16 hdr_len_sph_flex_flags1; - - /* Qword 1 */ - __le16 status_error0; - __le16 l2tag1; - __le16 acl_ctr0; - __le16 acl_ctr1; - - /* Qword 2 */ - __le16 status_error1; - u8 flex_flags2; - u8 ts_low; - __le16 l2tag2_1st; - __le16 l2tag2_2nd; - - /* Qword 3 */ - __le16 acl_ctr2; - __le16 rsvd; /* flex words 2-3 are reserved */ - __le32 ts_high; -}; - -/* Rx Flex Descriptor NIC Profile - * RxDID Profile Id 6 - * Flex-field 0: RSS hash lower 16-bits - * Flex-field 1: RSS hash upper 16-bits - * Flex-field 2: Flow Id lower 16-bits - * Flex-field 3: Source Vsi - * Flex-field 4: reserved, Vlan id taken from L2Tag - */ -struct virtchnl_rx_flex_desc_nic_2 { - /* Qword 0 */ - u8 rxdid; - u8 mir_id_umb_cast; - __le16 ptype_flexi_flags0; - __le16 pkt_len; - __le16 hdr_len_sph_flex_flags1; - - /* Qword 1 */ - __le16 status_error0; - __le16 l2tag1; - __le32 rss_hash; - - /* Qword 2 */ - __le16 status_error1; - u8 flexi_flags2; - u8 ts_low; - __le16 l2tag2_1st; - __le16 l2tag2_2nd; - - /* Qword 3 */ - __le16 flow_id; - __le16 src_vsi; - union { - struct { - __le16 rsvd; - __le16 flow_id_ipv6; - } flex; - __le32 ts_high; - } flex_ts; -}; - -/* Rx Flex Descriptor Advanced (Split Queue Model) - * RxDID Profile Id 7 - */ -struct virtchnl_rx_flex_desc_adv { - /* Qword 0 */ - u8 rxdid_ucast; /* profile_id=[3:0] */ - /* rsvd=[5:4] */ - /* ucast=[7:6] */ - u8 status_err0_qw0; - __le16 ptype_err_fflags0; /* ptype=[9:0] */ - /* ip_hdr_err=[10:10] */ - /* udp_len_err=[11:11] */ - /* ff0=[15:12] */ - __le16 pktlen_gen_bufq_id; /* plen=[13:0] */ - /* gen=[14:14] only in splitq */ - /* bufq_id=[15:15] only in splitq */ - __le16 hdrlen_flags; /* header=[9:0] */ - /* rsc=[10:10] only in splitq */ - /* sph=[11:11] only in splitq */ - /* ext_udp_0=[12:12] */ - /* int_udp_0=[13:13] */ - /* trunc_mirr=[14:14] */ - /* miss_prepend=[15:15] */ - /* Qword 1 */ - u8 status_err0_qw1; - u8 status_err1; - u8 fflags1; - u8 ts_low; - __le16 fmd0; - __le16 fmd1; - /* Qword 2 */ - __le16 fmd2; - u8 fflags2; - u8 hash3; - __le16 fmd3; - __le16 fmd4; - /* Qword 3 */ - __le16 fmd5; - __le16 fmd6; - __le16 fmd7_0; - __le16 fmd7_1; -}; /* writeback */ - -/* Rx Flex Descriptor Advanced (Split Queue Model) NIC Profile - * RxDID Profile Id 8 - * Flex-field 0: BufferID - * Flex-field 1: Raw checksum/L2TAG1/RSC Seg Len (determined by HW) - * Flex-field 2: Hash[15:0] - * Flex-flags 2: Hash[23:16] - * Flex-field 3: L2TAG2 - * Flex-field 5: L2TAG1 - * Flex-field 7: Timestamp (upper 32 bits) - */ -struct virtchnl_rx_flex_desc_adv_nic_3 { - /* Qword 0 */ - u8 rxdid_ucast; /* profile_id=[3:0] */ - /* rsvd=[5:4] */ - /* ucast=[7:6] */ - u8 status_err0_qw0; - __le16 ptype_err_fflags0; /* ptype=[9:0] */ - /* ip_hdr_err=[10:10] */ - /* udp_len_err=[11:11] */ - /* ff0=[15:12] */ - __le16 pktlen_gen_bufq_id; /* plen=[13:0] */ - /* gen=[14:14] only in splitq */ - /* bufq_id=[15:15] only in splitq */ - __le16 hdrlen_flags; /* header=[9:0] */ - /* rsc=[10:10] only in splitq */ - /* sph=[11:11] only in splitq */ - /* ext_udp_0=[12:12] */ - /* int_udp_0=[13:13] */ - /* trunc_mirr=[14:14] */ - /* miss_prepend=[15:15] */ - /* Qword 1 */ - u8 status_err0_qw1; - u8 status_err1; - u8 fflags1; - u8 ts_low; - __le16 buf_id; /* only in splitq */ - union { - __le16 raw_cs; - __le16 l2tag1; - __le16 rscseglen; - } misc; - /* Qword 2 */ - __le16 hash1; - union { - u8 fflags2; - u8 mirrorid; - u8 hash2; - } ff2_mirrid_hash2; - u8 hash3; - __le16 l2tag2; - __le16 fmd4; - /* Qword 3 */ - __le16 l2tag1; - __le16 fmd6; - __le32 ts_high; -}; /* writeback */ - -union virtchnl_rx_desc { - struct virtchnl_singleq_rx_buf_desc read; - struct virtchnl_singleq_base_rx_desc base_wb; - struct virtchnl_rx_flex_desc flex_wb; - struct virtchnl_rx_flex_desc_adv flex_wb_adv; -}; - -#endif /* _VIRTCHNL_LAN_DESC_H_ */